diff --git a/gns3server/compute/base_manager.py b/gns3server/compute/base_manager.py
index 9584fe1e..0eb6426a 100644
--- a/gns3server/compute/base_manager.py
+++ b/gns3server/compute/base_manager.py
@@ -20,6 +20,7 @@ import os
import struct
import stat
import asyncio
+import aiofiles
import aiohttp
import socket
@@ -46,6 +47,8 @@ from .nios.nio_ethernet import NIOEthernet
from ..utils.images import md5sum, remove_checksum, images_directories, default_images_directory, list_images
from .error import NodeError, ImageMissingError
+CHUNK_SIZE = 1024 * 8 # 8KB
+
class BaseManager:
@@ -456,7 +459,7 @@ class BaseManager:
with open(path, "rb") as f:
await response.prepare(request)
while nio.capturing:
- data = f.read(4096)
+ data = f.read(CHUNK_SIZE)
if not data:
await asyncio.sleep(0.1)
continue
@@ -594,18 +597,18 @@ class BaseManager:
path = os.path.abspath(os.path.join(directory, *os.path.split(filename)))
if os.path.commonprefix([directory, path]) != directory:
raise aiohttp.web.HTTPForbidden(text="Could not write image: {}, {} is forbidden".format(filename, path))
- log.info("Writing image file %s", path)
+ log.info("Writing image file to '{}'".format(path))
try:
remove_checksum(path)
# We store the file under his final name only when the upload is finished
tmp_path = path + ".tmp"
os.makedirs(os.path.dirname(path), exist_ok=True)
- with open(tmp_path, 'wb') as f:
+ async with aiofiles.open(tmp_path, 'wb') as f:
while True:
- packet = await stream.read(4096)
- if not packet:
+ chunk = await stream.read(CHUNK_SIZE)
+ if not chunk:
break
- f.write(packet)
+ await f.write(chunk)
os.chmod(tmp_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
shutil.move(tmp_path, path)
await cancellable_wait_run_in_executor(md5sum, path)
diff --git a/gns3server/compute/docker/__init__.py b/gns3server/compute/docker/__init__.py
index b24be8d2..1a13d1af 100644
--- a/gns3server/compute/docker/__init__.py
+++ b/gns3server/compute/docker/__init__.py
@@ -37,6 +37,7 @@ log = logging.getLogger(__name__)
DOCKER_MINIMUM_API_VERSION = "1.25"
DOCKER_MINIMUM_VERSION = "1.13"
DOCKER_PREFERRED_API_VERSION = "1.30"
+CHUNK_SIZE = 1024 * 8 # 8KB
class Docker(BaseManager):
@@ -206,7 +207,7 @@ class Docker(BaseManager):
content = ""
while True:
try:
- chunk = await response.content.read(1024)
+ chunk = await response.content.read(CHUNK_SIZE)
except aiohttp.ServerDisconnectedError:
log.error("Disconnected from server while pulling Docker image '{}' from docker hub".format(image))
break
diff --git a/gns3server/controller/compute.py b/gns3server/controller/compute.py
index e36c27fe..2244eded 100644
--- a/gns3server/controller/compute.py
+++ b/gns3server/controller/compute.py
@@ -320,28 +320,6 @@ class Compute:
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image))
return response
- async def stream_file(self, project, path, timeout=None):
- """
- Read file of a project and stream it
-
- :param project: A project object
- :param path: The path of the file in the project
- :param timeout: timeout
- :returns: A file stream
- """
-
- url = self._getUrl("/projects/{}/stream/{}".format(project.id, path))
- response = await self._session().request("GET", url, auth=self._auth, timeout=timeout)
- if response.status == 404:
- raise aiohttp.web.HTTPNotFound(text="file '{}' not found on compute".format(path))
- elif response.status == 403:
- raise aiohttp.web.HTTPForbidden(text="forbidden to open '{}' on compute".format(path))
- elif response.status != 200:
- raise aiohttp.web.HTTPInternalServerError(text="Unexpected error {}: {}: while opening {} on compute".format(response.status,
- response.reason,
- path))
- return response
-
async def http_query(self, method, path, data=None, dont_connect=False, **kwargs):
"""
:param dont_connect: If true do not reconnect if not connected
diff --git a/gns3server/controller/export_project.py b/gns3server/controller/export_project.py
index 9afcd906..fd88f632 100644
--- a/gns3server/controller/export_project.py
+++ b/gns3server/controller/export_project.py
@@ -19,31 +19,33 @@ import os
import sys
import json
import asyncio
+import aiofiles
import aiohttp
import zipfile
import tempfile
-import zipstream
from datetime import datetime
import logging
log = logging.getLogger(__name__)
+CHUNK_SIZE = 1024 * 8 # 8KB
-async def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False, reset_mac_addresses=False):
+
+async def export_project(zstream, project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False, reset_mac_addresses=False):
"""
Export a project to a zip file.
The file will be read chunk by chunk when you iterate over the zip stream.
Some files like snapshots and packet captures are ignored.
+ :param zstream: ZipStream object
+ :param project: Project instance
:param temporary_dir: A temporary dir where to store intermediate data
:param include images: save OS images to the zip file
:param keep_compute_id: If false replace all compute id by local (standard behavior for .gns3project to make it portable)
:param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable
:param reset_mac_addresses: Reset MAC addresses for every nodes.
-
- :returns: ZipStream object
"""
# To avoid issue with data not saved we disallow the export of a running project
@@ -53,8 +55,6 @@ async def export_project(project, temporary_dir, include_images=False, keep_comp
# Make sure we save the project
project.dump()
- zstream = zipstream.ZipFile(allowZip64=True)
-
if not os.path.exists(project._path):
raise aiohttp.web.HTTPNotFound(text="Project could not be found at '{}'".format(project._path))
@@ -80,33 +80,31 @@ async def export_project(project, temporary_dir, include_images=False, keep_comp
if file.endswith(".gns3"):
continue
_patch_mtime(path)
- zstream.write(path, os.path.relpath(path, project._path), compress_type=zipfile.ZIP_DEFLATED)
+ zstream.write(path, os.path.relpath(path, project._path))
# Export files from remote computes
- downloaded_files = set()
for compute in project.computes:
if compute.id != "local":
compute_files = await compute.list_files(project)
for compute_file in compute_files:
if _is_exportable(compute_file["path"]):
- (fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
- f = open(fd, "wb", closefd=True)
+ log.debug("Downloading file '{}' from compute '{}'".format(compute_file["path"], compute.id))
response = await compute.download_file(project, compute_file["path"])
- while True:
- try:
- data = await response.content.read(1024)
- except asyncio.TimeoutError:
- raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading file '{}' from remote compute {}:{}".format(compute_file["path"], compute.host, compute.port))
- if not data:
- break
- f.write(data)
+ #if response.status != 200:
+ # raise aiohttp.web.HTTPConflict(text="Cannot export file from compute '{}'. Compute returned status code {}.".format(compute.id, response.status))
+ (fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
+ async with aiofiles.open(fd, 'wb') as f:
+ while True:
+ try:
+ data = await response.content.read(CHUNK_SIZE)
+ except asyncio.TimeoutError:
+ raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading file '{}' from remote compute {}:{}".format(compute_file["path"], compute.host, compute.port))
+ if not data:
+ break
+ await f.write(data)
response.close()
- f.close()
_patch_mtime(temp_path)
- zstream.write(temp_path, arcname=compute_file["path"], compress_type=zipfile.ZIP_DEFLATED)
- downloaded_files.add(compute_file['path'])
-
- return zstream
+ zstream.write(temp_path, arcname=compute_file["path"])
def _patch_mtime(path):
@@ -232,6 +230,7 @@ async def _patch_project_file(project, path, zstream, include_images, keep_compu
zstream.writestr("project.gns3", json.dumps(topology).encode())
return images
+
def _export_local_image(image, zstream):
"""
Exports a local image to the zip file.
@@ -266,30 +265,26 @@ async def _export_remote_images(project, compute_id, image_type, image, project_
Export specific image from remote compute.
"""
- log.info("Downloading image '{}' from compute '{}'".format(image, compute_id))
-
+ log.debug("Downloading image '{}' from compute '{}'".format(image, compute_id))
try:
compute = [compute for compute in project.computes if compute.id == compute_id][0]
except IndexError:
raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute doesn't exist.".format(compute_id))
- (fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
- f = open(fd, "wb", closefd=True)
response = await compute.download_image(image_type, image)
-
if response.status != 200:
- raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute returned status code {}.".format(compute_id, response.status))
+ raise aiohttp.web.HTTPConflict(text="Cannot export image from compute '{}'. Compute returned status code {}.".format(compute_id, response.status))
- while True:
- try:
- data = await response.content.read(1024)
- except asyncio.TimeoutError:
- raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading image '{}' from remote compute {}:{}".format(image, compute.host, compute.port))
- if not data:
- break
- f.write(data)
+ (fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
+ async with aiofiles.open(fd, 'wb') as f:
+ while True:
+ try:
+ data = await response.content.read(CHUNK_SIZE)
+ except asyncio.TimeoutError:
+ raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading image '{}' from remote compute {}:{}".format(image, compute.host, compute.port))
+ if not data:
+ break
+ await f.write(data)
response.close()
- f.close()
arcname = os.path.join("images", image_type, image)
- log.info("Saved {}".format(arcname))
project_zipfile.write(temp_path, arcname=arcname, compress_type=zipfile.ZIP_DEFLATED)
diff --git a/gns3server/controller/import_project.py b/gns3server/controller/import_project.py
index 771d0236..962edddd 100644
--- a/gns3server/controller/import_project.py
+++ b/gns3server/controller/import_project.py
@@ -20,7 +20,6 @@ import sys
import json
import uuid
import shutil
-import asyncio
import zipfile
import aiohttp
import itertools
diff --git a/gns3server/controller/project.py b/gns3server/controller/project.py
index f163a92c..76a2396b 100644
--- a/gns3server/controller/project.py
+++ b/gns3server/controller/project.py
@@ -21,9 +21,12 @@ import json
import uuid
import copy
import shutil
+import time
import asyncio
import aiohttp
+import aiofiles
import tempfile
+import zipfile
from uuid import UUID, uuid4
@@ -37,7 +40,7 @@ from ..config import Config
from ..utils.path import check_path_allowed, get_default_project_directory
from ..utils.asyncio.pool import Pool
from ..utils.asyncio import locking
-from ..utils.asyncio import wait_run_in_executor
+from ..utils.asyncio import aiozipstream
from .export_project import export_project
from .import_project import import_project
@@ -947,15 +950,6 @@ class Project:
while self._loading:
await asyncio.sleep(0.5)
- def _create_duplicate_project_file(self, path, zipstream):
- """
- Creates the project file (to be run in its own thread)
- """
-
- with open(path, "wb") as f:
- for data in zipstream:
- f.write(data)
-
async def duplicate(self, name=None, location=None):
"""
Duplicate a project
@@ -975,12 +969,24 @@ class Project:
self.dump()
assert self._status != "closed"
try:
+ begin = time.time()
with tempfile.TemporaryDirectory() as tmpdir:
- zipstream = await export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True, reset_mac_addresses=True)
- project_path = os.path.join(tmpdir, "project.gns3p")
- await wait_run_in_executor(self._create_duplicate_project_file, project_path, zipstream)
- with open(project_path, "rb") as f:
- project = await import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
+ # Do not compress the exported project when duplicating
+ with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
+ await export_project(zstream, self, tmpdir, keep_compute_id=True, allow_all_nodes=True, reset_mac_addresses=True)
+
+ # export the project to a temporary location
+ project_path = os.path.join(tmpdir, "project.gns3p")
+ log.info("Exporting project to '{}'".format(project_path))
+ async with aiofiles.open(project_path, 'wb') as f:
+ async for chunk in zstream:
+ await f.write(chunk)
+
+ # import the temporary project
+ with open(project_path, "rb") as f:
+ project = await import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
+
+ log.info("Project '{}' duplicated in {:.4f} seconds".format(project.name, time.time() - begin))
except (ValueError, OSError, UnicodeEncodeError) as e:
raise aiohttp.web.HTTPConflict(text="Cannot duplicate project: {}".format(str(e)))
diff --git a/gns3server/controller/snapshot.py b/gns3server/controller/snapshot.py
index ebe22b58..72be81fd 100644
--- a/gns3server/controller/snapshot.py
+++ b/gns3server/controller/snapshot.py
@@ -20,14 +20,20 @@ import os
import uuid
import shutil
import tempfile
-import asyncio
+import aiofiles
+import zipfile
+import time
import aiohttp.web
from datetime import datetime, timezone
from ..utils.asyncio import wait_run_in_executor
+from ..utils.asyncio import aiozipstream
from .export_project import export_project
from .import_project import import_project
+import logging
+log = logging.getLogger(__name__)
+
# The string use to extract the date from the filename
FILENAME_TIME_FORMAT = "%d%m%y_%H%M%S"
@@ -73,15 +79,6 @@ class Snapshot:
def created_at(self):
return int(self._created_at)
- def _create_snapshot_file(self, zipstream):
- """
- Creates the snapshot file (to be run in its own thread)
- """
-
- with open(self.path, "wb") as f:
- for data in zipstream:
- f.write(data)
-
async def create(self):
"""
Create the snapshot
@@ -97,9 +94,15 @@ class Snapshot:
raise aiohttp.web.HTTPInternalServerError(text="Could not create the snapshot directory '{}': {}".format(snapshot_directory, e))
try:
+ begin = time.time()
with tempfile.TemporaryDirectory() as tmpdir:
- zipstream = await export_project(self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True)
- await wait_run_in_executor(self._create_snapshot_file, zipstream)
+ # Do not compress the snapshots
+ with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
+ await export_project(zstream, self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True)
+ async with aiofiles.open(self.path, 'wb') as f:
+ async for chunk in zstream:
+ await f.write(chunk)
+ log.info("Snapshot '{}' created in {:.4f} seconds".format(self.name, time.time() - begin))
except (ValueError, OSError, RuntimeError) as e:
raise aiohttp.web.HTTPConflict(text="Could not create snapshot file '{}': {}".format(self.path, e))
diff --git a/gns3server/handlers/api/compute/dynamips_vm_handler.py b/gns3server/handlers/api/compute/dynamips_vm_handler.py
index 92936924..88bb1a9d 100644
--- a/gns3server/handlers/api/compute/dynamips_vm_handler.py
+++ b/gns3server/handlers/api/compute/dynamips_vm_handler.py
@@ -493,7 +493,7 @@ class DynamipsVMHandler:
if filename[0] == ".":
raise aiohttp.web.HTTPForbidden()
- await response.file(image_path)
+ await response.stream_file(image_path)
@Route.post(
r"/projects/{project_id}/dynamips/nodes/{node_id}/duplicate",
diff --git a/gns3server/handlers/api/compute/iou_handler.py b/gns3server/handlers/api/compute/iou_handler.py
index 68e671cc..43b1c07f 100644
--- a/gns3server/handlers/api/compute/iou_handler.py
+++ b/gns3server/handlers/api/compute/iou_handler.py
@@ -451,4 +451,4 @@ class IOUHandler:
if filename[0] == ".":
raise aiohttp.web.HTTPForbidden()
- await response.file(image_path)
+ await response.stream_file(image_path)
diff --git a/gns3server/handlers/api/compute/project_handler.py b/gns3server/handlers/api/compute/project_handler.py
index c10c1c49..e59d9ab5 100644
--- a/gns3server/handlers/api/compute/project_handler.py
+++ b/gns3server/handlers/api/compute/project_handler.py
@@ -37,6 +37,8 @@ from gns3server.schemas.project import (
import logging
log = logging.getLogger()
+CHUNK_SIZE = 1024 * 8 # 8KB
+
class ProjectHandler:
@@ -248,64 +250,7 @@ class ProjectHandler:
raise aiohttp.web.HTTPForbidden()
path = os.path.join(project.path, path)
- response.content_type = "application/octet-stream"
- response.set_status(200)
- response.enable_chunked_encoding()
-
- try:
- with open(path, "rb") as f:
- await response.prepare(request)
- while True:
- data = f.read(4096)
- if not data:
- break
- await response.write(data)
-
- except FileNotFoundError:
- raise aiohttp.web.HTTPNotFound()
- except PermissionError:
- raise aiohttp.web.HTTPForbidden()
-
- @Route.get(
- r"/projects/{project_id}/stream/{path:.+}",
- description="Stream a file from a project",
- parameters={
- "project_id": "Project UUID",
- },
- status_codes={
- 200: "File returned",
- 403: "Permission denied",
- 404: "The file doesn't exist"
- })
- async def stream_file(request, response):
-
- pm = ProjectManager.instance()
- project = pm.get_project(request.match_info["project_id"])
- path = request.match_info["path"]
- path = os.path.normpath(path)
-
- # Raise an error if user try to escape
- if path[0] == ".":
- raise aiohttp.web.HTTPForbidden()
- path = os.path.join(project.path, path)
-
- response.content_type = "application/octet-stream"
- response.set_status(200)
- response.enable_chunked_encoding()
-
- # FIXME: file streaming is never stopped
- try:
- with open(path, "rb") as f:
- await response.prepare(request)
- while True:
- data = f.read(4096)
- if not data:
- await asyncio.sleep(0.1)
- await response.write(data)
- except FileNotFoundError:
- raise aiohttp.web.HTTPNotFound()
- except PermissionError:
- raise aiohttp.web.HTTPForbidden()
+ await response.stream_file(path)
@Route.post(
r"/projects/{project_id}/files/{path:.+}",
@@ -338,7 +283,7 @@ class ProjectHandler:
with open(path, 'wb+') as f:
while True:
try:
- chunk = await request.content.read(1024)
+ chunk = await request.content.read(CHUNK_SIZE)
except asyncio.TimeoutError:
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
if not chunk:
@@ -349,64 +294,3 @@ class ProjectHandler:
raise aiohttp.web.HTTPNotFound()
except PermissionError:
raise aiohttp.web.HTTPForbidden()
-
- @Route.get(
- r"/projects/{project_id}/export",
- description="Export a project as a portable archive",
- parameters={
- "project_id": "Project UUID",
- },
- raw=True,
- status_codes={
- 200: "File returned",
- 404: "The project doesn't exist"
- })
- async def export_project(request, response):
-
- pm = ProjectManager.instance()
- project = pm.get_project(request.match_info["project_id"])
- response.content_type = 'application/gns3project'
- response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
- response.enable_chunked_encoding()
- await response.prepare(request)
-
- include_images = bool(int(request.json.get("include_images", "0")))
- for data in project.export(include_images=include_images):
- await response.write(data)
-
- #await response.write_eof() #FIXME: shound't be needed anymore
-
- @Route.post(
- r"/projects/{project_id}/import",
- description="Import a project from a portable archive",
- parameters={
- "project_id": "Project UUID",
- },
- raw=True,
- output=PROJECT_OBJECT_SCHEMA,
- status_codes={
- 200: "Project imported",
- 403: "Forbidden to import project"
- })
- async def import_project(request, response):
-
- pm = ProjectManager.instance()
- project_id = request.match_info["project_id"]
- project = pm.create_project(project_id=project_id)
-
- # We write the content to a temporary location and after we extract it all.
- # It could be more optimal to stream this but it is not implemented in Python.
- # Spooled means the file is temporary kept in memory until max_size is reached
- try:
- with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
- while True:
- chunk = await request.content.read(1024)
- if not chunk:
- break
- temp.write(chunk)
- project.import_zip(temp, gns3vm=bool(int(request.GET.get("gns3vm", "1"))))
- except OSError as e:
- raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
-
- response.json(project)
- response.set_status(201)
diff --git a/gns3server/handlers/api/compute/qemu_handler.py b/gns3server/handlers/api/compute/qemu_handler.py
index d505275c..30453f6e 100644
--- a/gns3server/handlers/api/compute/qemu_handler.py
+++ b/gns3server/handlers/api/compute/qemu_handler.py
@@ -576,4 +576,4 @@ class QEMUHandler:
if filename[0] == ".":
raise aiohttp.web.HTTPForbidden()
- await response.file(image_path)
+ await response.stream_file(image_path)
diff --git a/gns3server/handlers/api/controller/appliance_handler.py b/gns3server/handlers/api/controller/appliance_handler.py
index 20eaeb36..178279e6 100644
--- a/gns3server/handlers/api/controller/appliance_handler.py
+++ b/gns3server/handlers/api/controller/appliance_handler.py
@@ -36,7 +36,7 @@ class ApplianceHandler:
async def list_appliances(request, response):
controller = Controller.instance()
- if request.query.get("update", "no") == "yes":
+ if request.query.get("update", "no").lower() == "yes":
await controller.appliance_manager.download_appliances()
controller.appliance_manager.load_appliances()
response.json([c for c in controller.appliance_manager.appliances.values()])
diff --git a/gns3server/handlers/api/controller/project_handler.py b/gns3server/handlers/api/controller/project_handler.py
index 8a1ad4d9..8ba36697 100644
--- a/gns3server/handlers/api/controller/project_handler.py
+++ b/gns3server/handlers/api/controller/project_handler.py
@@ -16,15 +16,18 @@
# along with this program. If not, see .
import os
-import sys
import aiohttp
import asyncio
import tempfile
+import zipfile
+import aiofiles
+import time
from gns3server.web.route import Route
from gns3server.controller import Controller
from gns3server.controller.import_project import import_project
from gns3server.controller.export_project import export_project
+from gns3server.utils.asyncio import aiozipstream
from gns3server.config import Config
@@ -48,6 +51,8 @@ async def process_websocket(ws):
except aiohttp.WSServerHandshakeError:
pass
+CHUNK_SIZE = 1024 * 8 # 8KB
+
class ProjectHandler:
@@ -300,21 +305,38 @@ class ProjectHandler:
controller = Controller.instance()
project = await controller.get_loaded_project(request.match_info["project_id"])
+ if request.query.get("include_images", "no").lower() == "yes":
+ include_images = True
+ else:
+ include_images = False
+ compression_query = request.query.get("compression", "zip").lower()
+ if compression_query == "zip":
+ compression = zipfile.ZIP_DEFLATED
+ elif compression_query == "none":
+ compression = zipfile.ZIP_STORED
+ elif compression_query == "bzip2":
+ compression = zipfile.ZIP_BZIP2
+ elif compression_query == "lzma":
+ compression = zipfile.ZIP_LZMA
try:
+ begin = time.time()
with tempfile.TemporaryDirectory() as tmp_dir:
- stream = await export_project(project, tmp_dir, include_images=bool(int(request.query.get("include_images", "0"))))
- # We need to do that now because export could failed and raise an HTTP error
- # that why response start need to be the later possible
- response.content_type = 'application/gns3project'
- response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
- response.enable_chunked_encoding()
- await response.prepare(request)
+ with aiozipstream.ZipFile(compression=compression) as zstream:
+ await export_project(zstream, project, tmp_dir, include_images=include_images)
- for data in stream:
- await response.write(data)
+ # We need to do that now because export could failed and raise an HTTP error
+ # that why response start need to be the later possible
+ response.content_type = 'application/gns3project'
+ response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
+ response.enable_chunked_encoding()
+ await response.prepare(request)
+
+ async for chunk in zstream:
+ await response.write(chunk)
+
+ log.info("Project '{}' exported in {:.4f} seconds".format(project.name, time.time() - begin))
- #await response.write_eof() #FIXME: shound't be needed anymore
# Will be raise if you have no space left or permission issue on your temporary directory
# RuntimeError: something was wrong during the zip process
except (ValueError, OSError, RuntimeError) as e:
@@ -346,29 +368,23 @@ class ProjectHandler:
# We write the content to a temporary location and after we extract it all.
# It could be more optimal to stream this but it is not implemented in Python.
- # Spooled means the file is temporary kept in memory until max_size is reached
- # Cannot use tempfile.SpooledTemporaryFile(max_size=10000) in Python 3.7 due
- # to a bug https://bugs.python.org/issue26175
try:
- if sys.version_info >= (3, 7) and sys.version_info < (3, 8):
- with tempfile.TemporaryFile() as temp:
+ begin = time.time()
+ with tempfile.TemporaryDirectory() as tmpdir:
+ temp_project_path = os.path.join(tmpdir, "project.zip")
+ async with aiofiles.open(temp_project_path, 'wb') as f:
while True:
- chunk = await request.content.read(1024)
+ chunk = await request.content.read(CHUNK_SIZE)
if not chunk:
break
- temp.write(chunk)
- project = await import_project(controller, request.match_info["project_id"], temp, location=path, name=name)
- else:
- with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
- while True:
- chunk = await request.content.read(1024)
- if not chunk:
- break
- temp.write(chunk)
- project = await import_project(controller, request.match_info["project_id"], temp, location=path, name=name)
+ await f.write(chunk)
+
+ with open(temp_project_path, "rb") as f:
+ project = await import_project(controller, request.match_info["project_id"], f, location=path, name=name)
+
+ log.info("Project '{}' imported in {:.4f} seconds".format(project.name, time.time() - begin))
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
-
response.json(project)
response.set_status(201)
@@ -427,23 +443,7 @@ class ProjectHandler:
raise aiohttp.web.HTTPForbidden()
path = os.path.join(project.path, path)
- response.content_type = "application/octet-stream"
- response.set_status(200)
- response.enable_chunked_encoding()
-
- try:
- with open(path, "rb") as f:
- await response.prepare(request)
- while True:
- data = f.read(4096)
- if not data:
- break
- await response.write(data)
-
- except FileNotFoundError:
- raise aiohttp.web.HTTPNotFound()
- except PermissionError:
- raise aiohttp.web.HTTPForbidden()
+ await response.stream_file(path)
@Route.post(
r"/projects/{project_id}/files/{path:.+}",
@@ -472,15 +472,15 @@ class ProjectHandler:
response.set_status(200)
try:
- with open(path, 'wb+') as f:
+ async with aiofiles.open(path, 'wb+') as f:
while True:
try:
- chunk = await request.content.read(1024)
+ chunk = await request.content.read(CHUNK_SIZE)
except asyncio.TimeoutError:
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
if not chunk:
break
- f.write(chunk)
+ await f.write(chunk)
except FileNotFoundError:
raise aiohttp.web.HTTPNotFound()
except PermissionError:
diff --git a/gns3server/handlers/api/controller/symbol_handler.py b/gns3server/handlers/api/controller/symbol_handler.py
index 5b73ea63..3b754379 100644
--- a/gns3server/handlers/api/controller/symbol_handler.py
+++ b/gns3server/handlers/api/controller/symbol_handler.py
@@ -53,7 +53,7 @@ class SymbolHandler:
controller = Controller.instance()
try:
- await response.file(controller.symbols.get_path(request.match_info["symbol_id"]))
+ await response.stream_file(controller.symbols.get_path(request.match_info["symbol_id"]))
except (KeyError, OSError) as e:
log.warning("Could not get symbol file: {}".format(e))
response.set_status(404)
diff --git a/gns3server/handlers/index_handler.py b/gns3server/handlers/index_handler.py
index 676d40fe..88bd4e76 100644
--- a/gns3server/handlers/index_handler.py
+++ b/gns3server/handlers/index_handler.py
@@ -92,7 +92,7 @@ class IndexHandler:
if not os.path.exists(static):
static = get_static_path(os.path.join('web-ui', 'index.html'))
- await response.file(static)
+ await response.stream_file(static)
@Route.get(
r"/v1/version",
diff --git a/gns3server/utils/asyncio/aiozipstream.py b/gns3server/utils/asyncio/aiozipstream.py
new file mode 100644
index 00000000..f6062b5e
--- /dev/null
+++ b/gns3server/utils/asyncio/aiozipstream.py
@@ -0,0 +1,430 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 GNS3 Technologies Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+"""
+Iterable ZIP archive generator.
+
+Derived directly from zipfile.py and the zipstream project
+https://github.com/allanlei/python-zipstream
+"""
+
+import os
+import sys
+import stat
+import struct
+import time
+import zipfile
+import asyncio
+import aiofiles
+from concurrent import futures
+from async_generator import async_generator, yield_
+
+from zipfile import (structCentralDir, structEndArchive64, structEndArchive, structEndArchive64Locator,
+ stringCentralDir, stringEndArchive64, stringEndArchive, stringEndArchive64Locator)
+
+stringDataDescriptor = b'PK\x07\x08' # magic number for data descriptor
+
+
+def _get_compressor(compress_type):
+ """
+ Return the compressor.
+ """
+
+ if compress_type == zipfile.ZIP_DEFLATED:
+ from zipfile import zlib
+ return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
+ elif compress_type == zipfile.ZIP_BZIP2:
+ from zipfile import bz2
+ return bz2.BZ2Compressor()
+ elif compress_type == zipfile.ZIP_LZMA:
+ from zipfile import LZMACompressor
+ return LZMACompressor()
+ else:
+ return None
+
+
+class PointerIO(object):
+
+ def __init__(self, mode='wb'):
+ if mode not in ('wb', ):
+ raise RuntimeError('zipstream.ZipFile() requires mode "wb"')
+ self.data_pointer = 0
+ self.__mode = mode
+ self.__closed = False
+
+ @property
+ def mode(self):
+ return self.__mode
+
+ @property
+ def closed(self):
+ return self.__closed
+
+ def close(self):
+ self.__closed = True
+
+ def flush(self):
+ pass
+
+ def next(self):
+ raise NotImplementedError()
+
+ def tell(self):
+ return self.data_pointer
+
+ def truncate(size=None):
+ raise NotImplementedError()
+
+ def write(self, data):
+ if self.closed:
+ raise ValueError('I/O operation on closed file')
+
+ if isinstance(data, str):
+ data = data.encode('utf-8')
+ if not isinstance(data, bytes):
+ raise TypeError('expected bytes')
+ self.data_pointer += len(data)
+ return data
+
+
+class ZipInfo(zipfile.ZipInfo):
+
+ def __init__(self, *args, **kwargs):
+ zipfile.ZipInfo.__init__(self, *args, **kwargs)
+
+ def DataDescriptor(self):
+ """
+ crc-32 4 bytes
+ compressed size 4 bytes
+ uncompressed size 4 bytes
+ """
+
+ if self.compress_size > zipfile.ZIP64_LIMIT or self.file_size > zipfile.ZIP64_LIMIT:
+ fmt = b'<4sLQQ'
+ else:
+ fmt = b'<4sLLL'
+ return struct.pack(fmt, stringDataDescriptor, self.CRC, self.compress_size, self.file_size)
+
+
+class ZipFile(zipfile.ZipFile):
+
+ def __init__(self, fileobj=None, mode='w', compression=zipfile.ZIP_STORED, allowZip64=True, chunksize=32768):
+ """Open the ZIP file with mode write "w"."""
+
+ if mode not in ('w', ):
+ raise RuntimeError('aiozipstream.ZipFile() requires mode "w"')
+ if fileobj is None:
+ fileobj = PointerIO()
+
+ self._comment = b''
+ zipfile.ZipFile.__init__(self, fileobj, mode=mode, compression=compression, allowZip64=allowZip64)
+ self._chunksize = chunksize
+ self.paths_to_write = []
+
+ def __aiter__(self):
+ return self._stream()
+
+ @property
+ def comment(self):
+ """
+ The comment text associated with the ZIP file.
+ """
+
+ return self._comment
+
+ @comment.setter
+ def comment(self, comment):
+ """
+ Add a comment text associated with the ZIP file.
+ """
+
+ if not isinstance(comment, bytes):
+ raise TypeError("comment: expected bytes, got %s" % type(comment))
+ # check for valid comment length
+ if len(comment) >= zipfile.ZIP_MAX_COMMENT:
+ if self.debug:
+ print('Archive comment is too long; truncating to %d bytes' % zipfile.ZIP_MAX_COMMENT)
+ comment = comment[:zipfile.ZIP_MAX_COMMENT]
+ self._comment = comment
+ self._didModify = True
+
+ @async_generator
+ async def data_generator(self, path):
+
+ async with aiofiles.open(path, "rb") as f:
+ while True:
+ part = await f.read(self._chunksize)
+ if not part:
+ break
+ await yield_(part)
+ return
+
+ async def _run_in_executor(self, task, *args, **kwargs):
+ """
+ Run synchronous task in separate thread and await for result.
+ """
+
+ loop = asyncio.get_event_loop()
+ return await loop.run_in_executor(futures.ThreadPoolExecutor(max_workers=1), task, *args, **kwargs)
+
+ @async_generator
+ async def _stream(self):
+
+ for kwargs in self.paths_to_write:
+ async for chunk in self._write(**kwargs):
+ await yield_(chunk)
+ for chunk in self._close():
+ await yield_(chunk)
+
+ def write(self, filename, arcname=None, compress_type=None):
+ """
+ Write a file to the archive under the name `arcname`.
+ """
+
+ kwargs = {'filename': filename, 'arcname': arcname, 'compress_type': compress_type}
+ self.paths_to_write.append(kwargs)
+
+ def write_iter(self, arcname, iterable, compress_type=None):
+ """
+ Write the bytes iterable `iterable` to the archive under the name `arcname`.
+ """
+
+ kwargs = {'arcname': arcname, 'iterable': iterable, 'compress_type': compress_type}
+ self.paths_to_write.append(kwargs)
+
+ def writestr(self, arcname, data, compress_type=None):
+ """
+ Writes a str into ZipFile by wrapping data as a generator
+ """
+
+ def _iterable():
+ yield data
+ return self.write_iter(arcname, _iterable(), compress_type=compress_type)
+
+ @async_generator
+ async def _write(self, filename=None, iterable=None, arcname=None, compress_type=None):
+ """
+ Put the bytes from filename into the archive under the name `arcname`.
+ """
+
+ if not self.fp:
+ raise RuntimeError(
+ "Attempt to write to ZIP archive that was already closed")
+ if (filename is None and iterable is None) or (filename is not None and iterable is not None):
+ raise ValueError("either (exclusively) filename or iterable shall be not None")
+
+ if filename:
+ st = os.stat(filename)
+ isdir = stat.S_ISDIR(st.st_mode)
+ mtime = time.localtime(st.st_mtime)
+ date_time = mtime[0:6]
+ else:
+ st, isdir, date_time = None, False, time.localtime()[0:6]
+ # Create ZipInfo instance to store file information
+ if arcname is None:
+ arcname = filename
+ arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
+ while arcname[0] in (os.sep, os.altsep):
+ arcname = arcname[1:]
+ if isdir:
+ arcname += '/'
+ zinfo = ZipInfo(arcname, date_time)
+ if st:
+ zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
+ else:
+ zinfo.external_attr = 0o600 << 16 # ?rw-------
+ if compress_type is None:
+ zinfo.compress_type = self.compression
+ else:
+ zinfo.compress_type = compress_type
+
+ if st:
+ zinfo.file_size = st[6]
+ else:
+ zinfo.file_size = 0
+ zinfo.flag_bits = 0x00
+ zinfo.flag_bits |= 0x08 # ZIP flag bits, bit 3 indicates presence of data descriptor
+ zinfo.header_offset = self.fp.tell() # Start of header bytes
+ if zinfo.compress_type == zipfile.ZIP_LZMA:
+ # Compressed data includes an end-of-stream (EOS) marker
+ zinfo.flag_bits |= 0x02
+
+ self._writecheck(zinfo)
+ self._didModify = True
+
+ if isdir:
+ zinfo.file_size = 0
+ zinfo.compress_size = 0
+ zinfo.CRC = 0
+ self.filelist.append(zinfo)
+ self.NameToInfo[zinfo.filename] = zinfo
+ await yield_(self.fp.write(zinfo.FileHeader(False)))
+ return
+
+ cmpr = _get_compressor(zinfo.compress_type)
+
+ # Must overwrite CRC and sizes with correct data later
+ zinfo.CRC = CRC = 0
+ zinfo.compress_size = compress_size = 0
+ # Compressed size can be larger than uncompressed size
+ zip64 = self._allowZip64 and zinfo.file_size * 1.05 > zipfile.ZIP64_LIMIT
+ await yield_(self.fp.write(zinfo.FileHeader(zip64)))
+
+ file_size = 0
+ if filename:
+ async for buf in self.data_generator(filename):
+ file_size = file_size + len(buf)
+ CRC = zipfile.crc32(buf, CRC) & 0xffffffff
+ if cmpr:
+ buf = await self._run_in_executor(cmpr.compress, buf)
+ compress_size = compress_size + len(buf)
+ await yield_(self.fp.write(buf))
+ else: # we have an iterable
+ for buf in iterable:
+ file_size = file_size + len(buf)
+ CRC = zipfile.crc32(buf, CRC) & 0xffffffff
+ if cmpr:
+ buf = await self._run_in_executor(cmpr.compress, buf)
+ compress_size = compress_size + len(buf)
+ await yield_(self.fp.write(buf))
+
+ if cmpr:
+ buf = cmpr.flush()
+ compress_size = compress_size + len(buf)
+ await yield_(self.fp.write(buf))
+ zinfo.compress_size = compress_size
+ else:
+ zinfo.compress_size = file_size
+ zinfo.CRC = CRC
+ zinfo.file_size = file_size
+ if not zip64 and self._allowZip64:
+ if file_size > zipfile.ZIP64_LIMIT:
+ raise RuntimeError('File size has increased during compressing')
+ if compress_size > zipfile.ZIP64_LIMIT:
+ raise RuntimeError('Compressed size larger than uncompressed size')
+
+ await yield_(self.fp.write(zinfo.DataDescriptor()))
+ self.filelist.append(zinfo)
+ self.NameToInfo[zinfo.filename] = zinfo
+
+ def _close(self):
+ """
+ Close the file, and for mode "w" write the ending records.
+ """
+
+ if self.fp is None:
+ return
+
+ try:
+ if self.mode in ('w', 'a') and self._didModify: # write ending records
+ count = 0
+ pos1 = self.fp.tell()
+ for zinfo in self.filelist: # write central directory
+ count = count + 1
+ dt = zinfo.date_time
+ dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
+ dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
+ extra = []
+ if zinfo.file_size > zipfile.ZIP64_LIMIT or zinfo.compress_size > zipfile.ZIP64_LIMIT:
+ extra.append(zinfo.file_size)
+ extra.append(zinfo.compress_size)
+ file_size = 0xffffffff
+ compress_size = 0xffffffff
+ else:
+ file_size = zinfo.file_size
+ compress_size = zinfo.compress_size
+
+ if zinfo.header_offset > zipfile.ZIP64_LIMIT:
+ extra.append(zinfo.header_offset)
+ header_offset = 0xffffffff
+ else:
+ header_offset = zinfo.header_offset
+
+ extra_data = zinfo.extra
+ min_version = 0
+ if extra:
+ # Append a ZIP64 field to the extra's
+ extra_data = struct.pack(
+ b'= zipfile.ZIP_FILECOUNT_LIMIT or
+ centDirOffset > zipfile.ZIP64_LIMIT or
+ centDirSize > zipfile.ZIP64_LIMIT):
+ # Need to write the ZIP64 end-of-archive records
+ zip64endrec = struct.pack(
+ structEndArchive64, stringEndArchive64,
+ 44, 45, 45, 0, 0, centDirCount, centDirCount,
+ centDirSize, centDirOffset)
+ yield self.fp.write(zip64endrec)
+
+ zip64locrec = struct.pack(
+ structEndArchive64Locator,
+ stringEndArchive64Locator, 0, pos2, 1)
+ yield self.fp.write(zip64locrec)
+ centDirCount = min(centDirCount, 0xFFFF)
+ centDirSize = min(centDirSize, 0xFFFFFFFF)
+ centDirOffset = min(centDirOffset, 0xFFFFFFFF)
+
+ endrec = struct.pack(structEndArchive, stringEndArchive,
+ 0, 0, centDirCount, centDirCount,
+ centDirSize, centDirOffset, len(self._comment))
+ yield self.fp.write(endrec)
+ yield self.fp.write(self._comment)
+ self.fp.flush()
+ finally:
+ fp = self.fp
+ self.fp = None
+ if not self._filePassed:
+ fp.close()
diff --git a/gns3server/web/response.py b/gns3server/web/response.py
index 733ce466..6405dd4e 100644
--- a/gns3server/web/response.py
+++ b/gns3server/web/response.py
@@ -20,7 +20,7 @@ import jsonschema
import aiohttp
import aiohttp.web
import mimetypes
-import asyncio
+import aiofiles
import logging
import jinja2
import sys
@@ -32,6 +32,8 @@ from ..version import __version__
log = logging.getLogger(__name__)
renderer = jinja2.Environment(loader=jinja2.FileSystemLoader(get_resource('templates')))
+CHUNK_SIZE = 1024 * 8 # 8KB
+
class Response(aiohttp.web.Response):
@@ -112,16 +114,21 @@ class Response(aiohttp.web.Response):
raise aiohttp.web.HTTPBadRequest(text="{}".format(e))
self.body = json.dumps(answer, indent=4, sort_keys=True).encode('utf-8')
- async def file(self, path, status=200, set_content_length=True):
+ async def stream_file(self, path, status=200, set_content_type=None, set_content_length=True):
"""
- Return a file as a response
+ Stream a file as a response
"""
+
if not os.path.exists(path):
raise aiohttp.web.HTTPNotFound()
- ct, encoding = mimetypes.guess_type(path)
- if not ct:
- ct = 'application/octet-stream'
+ if not set_content_type:
+ ct, encoding = mimetypes.guess_type(path)
+ if not ct:
+ ct = 'application/octet-stream'
+ else:
+ ct = set_content_type
+
if encoding:
self.headers[aiohttp.hdrs.CONTENT_ENCODING] = encoding
self.content_type = ct
@@ -136,16 +143,13 @@ class Response(aiohttp.web.Response):
self.set_status(status)
try:
- with open(path, 'rb') as fobj:
+ async with aiofiles.open(path, 'rb') as f:
await self.prepare(self._request)
-
while True:
- data = fobj.read(4096)
+ data = await f.read(CHUNK_SIZE)
if not data:
break
await self.write(data)
- # await self.drain()
-
except FileNotFoundError:
raise aiohttp.web.HTTPNotFound()
except PermissionError:
diff --git a/requirements.txt b/requirements.txt
index 959f9adc..207656e1 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,10 +1,11 @@
jsonschema==2.6.0 # pyup: ignore
aiohttp==3.5.4
aiohttp-cors==0.7.0
+aiofiles==0.4.0
+async_generator>=1.10
Jinja2>=2.7.3
raven>=5.23.0
psutil>=3.0.0
-zipstream>=1.1.4
prompt-toolkit==1.0.15
async-timeout==3.0.1
distro>=1.3.0
diff --git a/tests/controller/test_compute.py b/tests/controller/test_compute.py
index e0410db0..f03b680b 100644
--- a/tests/controller/test_compute.py
+++ b/tests/controller/test_compute.py
@@ -293,15 +293,6 @@ def test_json(compute):
}
-def test_streamFile(project, async_run, compute):
- response = MagicMock()
- response.status = 200
- with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
- async_run(compute.stream_file(project, "test/titi", timeout=120))
- mock.assert_called_with("GET", "https://example.com:84/v2/compute/projects/{}/stream/test/titi".format(project.id), auth=None, timeout=120)
- async_run(compute.close())
-
-
def test_downloadFile(project, async_run, compute):
response = MagicMock()
response.status = 200
@@ -310,6 +301,7 @@ def test_downloadFile(project, async_run, compute):
mock.assert_called_with("GET", "https://example.com:84/v2/compute/projects/{}/files/test/titi".format(project.id), auth=None)
async_run(compute.close())
+
def test_close(compute, async_run):
assert compute.connected is True
async_run(compute.close())
diff --git a/tests/controller/test_export_project.py b/tests/controller/test_export_project.py
index c3532d9e..b562a8bc 100644
--- a/tests/controller/test_export_project.py
+++ b/tests/controller/test_export_project.py
@@ -29,6 +29,7 @@ from tests.utils import AsyncioMagicMock, AsyncioBytesIO
from gns3server.controller.project import Project
from gns3server.controller.export_project import export_project, _is_exportable
+from gns3server.utils.asyncio import aiozipstream
@pytest.fixture
@@ -51,6 +52,13 @@ def node(controller, project, async_run):
return node
+async def write_file(path, z):
+
+ with open(path, 'wb') as f:
+ async for chunk in z:
+ f.write(chunk)
+
+
def test_exportable_files():
assert _is_exportable("hello/world")
assert not _is_exportable("project-files/tmp")
@@ -103,12 +111,10 @@ def test_export(tmpdir, project, async_run):
with open(os.path.join(path, "project-files", "snapshots", "test"), 'w+') as f:
f.write("WORLD")
- with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
- z = async_run(export_project(project, str(tmpdir), include_images=False))
-
- with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
- for data in z:
- f.write(data)
+ with aiozipstream.ZipFile() as z:
+ with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
+ async_run(export_project(z, project, str(tmpdir), include_images=False))
+ async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
with myzip.open("vm-1/dynamips/test") as myfile:
@@ -128,7 +134,7 @@ def test_export(tmpdir, project, async_run):
assert topo["computes"] == []
-def test_export_vm(tmpdir, project, async_run, controller):
+def test_export_vm(tmpdir, project, async_run):
"""
If data is on a remote server export it locally before
sending it in the archive.
@@ -154,12 +160,10 @@ def test_export_vm(tmpdir, project, async_run, controller):
with open(os.path.join(path, "test.gns3"), 'w+') as f:
f.write("{}")
- z = async_run(export_project(project, str(tmpdir)))
- assert compute.list_files.called
-
- with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
- for data in z:
- f.write(data)
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir)))
+ assert compute.list_files.called
+ async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
with myzip.open("vm-1/dynamips/test") as myfile:
@@ -169,7 +173,7 @@ def test_export_vm(tmpdir, project, async_run, controller):
def test_export_disallow_running(tmpdir, project, node, async_run):
"""
- Dissallow export when a node is running
+ Disallow export when a node is running
"""
path = project.path
@@ -189,12 +193,13 @@ def test_export_disallow_running(tmpdir, project, node, async_run):
node._status = "started"
with pytest.raises(aiohttp.web.HTTPConflict):
- async_run(export_project(project, str(tmpdir)))
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir)))
def test_export_disallow_some_type(tmpdir, project, async_run):
"""
- Dissalow export for some node type
+ Disallow export for some node type
"""
path = project.path
@@ -213,8 +218,10 @@ def test_export_disallow_some_type(tmpdir, project, async_run):
json.dump(topology, f)
with pytest.raises(aiohttp.web.HTTPConflict):
- z = async_run(export_project(project, str(tmpdir)))
- z = async_run(export_project(project, str(tmpdir), allow_all_nodes=True))
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir)))
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir), allow_all_nodes=True))
# VirtualBox is always disallowed
topology = {
@@ -232,7 +239,8 @@ def test_export_disallow_some_type(tmpdir, project, async_run):
with open(os.path.join(path, "test.gns3"), 'w+') as f:
json.dump(topology, f)
with pytest.raises(aiohttp.web.HTTPConflict):
- z = async_run(export_project(project, str(tmpdir), allow_all_nodes=True))
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir), allow_all_nodes=True))
def test_export_fix_path(tmpdir, project, async_run):
@@ -264,10 +272,9 @@ def test_export_fix_path(tmpdir, project, async_run):
with open(os.path.join(path, "test.gns3"), 'w+') as f:
json.dump(topology, f)
- z = async_run(export_project(project, str(tmpdir)))
- with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
- for data in z:
- f.write(data)
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir)))
+ async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
with myzip.open("project.gns3") as myfile:
@@ -303,11 +310,10 @@ def test_export_with_images(tmpdir, project, async_run):
with open(os.path.join(path, "test.gns3"), 'w+') as f:
json.dump(topology, f)
- with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
- z = async_run(export_project(project, str(tmpdir), include_images=True))
- with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
- for data in z:
- f.write(data)
+ with aiozipstream.ZipFile() as z:
+ with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
+ async_run(export_project(z, project, str(tmpdir), include_images=True))
+ async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
myzip.getinfo("images/IOS/test.image")
@@ -341,11 +347,9 @@ def test_export_keep_compute_id(tmpdir, project, async_run):
}
json.dump(data, f)
- z = async_run(export_project(project, str(tmpdir), keep_compute_id=True))
-
- with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
- for data in z:
- f.write(data)
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir), keep_compute_id=True))
+ async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
with myzip.open("project.gns3") as myfile:
@@ -353,7 +357,8 @@ def test_export_keep_compute_id(tmpdir, project, async_run):
assert topo["nodes"][0]["compute_id"] == "6b7149c8-7d6e-4ca0-ab6b-daa8ab567be0"
assert len(topo["computes"]) == 1
-def test_export_images_from_vm(tmpdir, project, async_run, controller):
+
+def test_export_images_from_vm(tmpdir, project, async_run):
"""
If data is on a remote server export it locally before
sending it in the archive.
@@ -405,12 +410,10 @@ def test_export_images_from_vm(tmpdir, project, async_run, controller):
with open(os.path.join(path, "test.gns3"), 'w+') as f:
f.write(json.dumps(topology))
- z = async_run(export_project(project, str(tmpdir), include_images=True))
- assert compute.list_files.called
-
- with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
- for data in z:
- f.write(data)
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir), include_images=True))
+ assert compute.list_files.called
+ async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
with myzip.open("vm-1/dynamips/test") as myfile:
@@ -450,12 +453,9 @@ def test_export_with_ignoring_snapshots(tmpdir, project, async_run):
os.makedirs(snapshots_dir)
Path(os.path.join(snapshots_dir, 'snap.gns3project')).touch()
- z = async_run(export_project(project, str(tmpdir), keep_compute_id=True))
-
- with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
- for data in z:
- f.write(data)
+ with aiozipstream.ZipFile() as z:
+ async_run(export_project(z, project, str(tmpdir), keep_compute_id=True))
+ async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
assert not os.path.join('snapshots', 'snap.gns3project') in [f.filename for f in myzip.filelist]
-
diff --git a/tests/controller/test_project.py b/tests/controller/test_project.py
index 9fdd2936..ac4f7256 100644
--- a/tests/controller/test_project.py
+++ b/tests/controller/test_project.py
@@ -21,7 +21,6 @@ import sys
import uuid
import pytest
import aiohttp
-import zipstream
from unittest.mock import MagicMock
from tests.utils import AsyncioMagicMock, asyncio_patch
from unittest.mock import patch
@@ -532,15 +531,6 @@ def test_duplicate(project, async_run, controller):
assert list(new_project.nodes.values())[1].compute.id == "remote"
-def test_duplicate_with_zipfile_encoding_issues(project, async_run, controller):
- zf = zipstream.ZipFile()
- zf.writestr('test\udcc3', "data")
-
- with asyncio_patch('gns3server.controller.project.export_project', return_value=zf):
- with pytest.raises(aiohttp.web.HTTPConflict):
- async_run(project.duplicate(name="Hello"))
-
-
def test_snapshots(project):
"""
List the snapshots
diff --git a/tests/handlers/api/compute/test_cloud.py b/tests/handlers/api/compute/test_cloud.py
index 0d96a80b..38f8b426 100644
--- a/tests/handlers/api/compute/test_cloud.py
+++ b/tests/handlers/api/compute/test_cloud.py
@@ -64,7 +64,7 @@ def test_cloud_nio_create_udp(http_compute, vm):
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -82,7 +82,7 @@ def test_cloud_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode()
- assert response.route == "/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -93,7 +93,7 @@ def test_cloud_delete_nio(http_compute, vm):
"rhost": "127.0.0.1"})
response = http_compute.delete("/projects/{project_id}/cloud/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert response.status == 204
- assert response.route == "/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_cloud_delete(http_compute, vm):
diff --git a/tests/handlers/api/compute/test_docker.py b/tests/handlers/api/compute/test_docker.py
index 3f9c3618..b584109e 100644
--- a/tests/handlers/api/compute/test_docker.py
+++ b/tests/handlers/api/compute/test_docker.py
@@ -120,7 +120,7 @@ def test_docker_nio_create_udp(http_compute, vm):
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -141,14 +141,14 @@ def test_docker_update_nio(http_compute, vm):
},
example=True)
assert response.status == 201, response.body.decode()
- assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_docker_delete_nio(http_compute, vm):
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
response = http_compute.delete("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert response.status == 204
- assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_docker_update(http_compute, vm, tmpdir, free_console_port):
diff --git a/tests/handlers/api/compute/test_iou.py b/tests/handlers/api/compute/test_iou.py
index 5365f921..9058a9d7 100644
--- a/tests/handlers/api/compute/test_iou.py
+++ b/tests/handlers/api/compute/test_iou.py
@@ -201,7 +201,7 @@ def test_iou_nio_create_udp(http_compute, vm):
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -219,7 +219,7 @@ def test_iou_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode()
- assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -229,7 +229,7 @@ def test_iou_nio_create_ethernet(http_compute, vm, ethernet_device):
},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_ethernet"
assert response.json["ethernet_device"] == ethernet_device
@@ -240,7 +240,7 @@ def test_iou_nio_create_ethernet_different_port(http_compute, vm, ethernet_devic
},
example=False)
assert response.status == 201
- assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_ethernet"
assert response.json["ethernet_device"] == ethernet_device
@@ -250,7 +250,7 @@ def test_iou_nio_create_tap(http_compute, vm, ethernet_device):
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_tap",
"tap_device": ethernet_device})
assert response.status == 201
- assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_tap"
@@ -261,7 +261,7 @@ def test_iou_delete_nio(http_compute, vm):
"rhost": "127.0.0.1"})
response = http_compute.delete("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert response.status == 204
- assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_iou_start_capture(http_compute, vm, tmpdir, project):
diff --git a/tests/handlers/api/compute/test_nat.py b/tests/handlers/api/compute/test_nat.py
index f0ff7776..7e0da71c 100644
--- a/tests/handlers/api/compute/test_nat.py
+++ b/tests/handlers/api/compute/test_nat.py
@@ -65,7 +65,7 @@ def test_nat_nio_create_udp(http_compute, vm):
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -83,7 +83,7 @@ def test_nat_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode()
- assert response.route == "/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -97,7 +97,7 @@ def test_nat_delete_nio(http_compute, vm):
response = http_compute.delete("/projects/{project_id}/nat/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert mock_remove_nio.called
assert response.status == 204
- assert response.route == "/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_nat_delete(http_compute, vm):
diff --git a/tests/handlers/api/compute/test_qemu.py b/tests/handlers/api/compute/test_qemu.py
index 352db57a..c5c1f033 100644
--- a/tests/handlers/api/compute/test_qemu.py
+++ b/tests/handlers/api/compute/test_qemu.py
@@ -189,7 +189,7 @@ def test_qemu_nio_create_udp(http_compute, vm):
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -208,7 +208,7 @@ def test_qemu_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode()
- assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -221,7 +221,7 @@ def test_qemu_delete_nio(http_compute, vm):
"rhost": "127.0.0.1"})
response = http_compute.delete("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert response.status == 204
- assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_qemu_list_binaries(http_compute, vm):
diff --git a/tests/handlers/api/compute/test_traceng.py b/tests/handlers/api/compute/test_traceng.py
index f9ece5e5..ce93e2fb 100644
--- a/tests/handlers/api/compute/test_traceng.py
+++ b/tests/handlers/api/compute/test_traceng.py
@@ -55,7 +55,7 @@ def test_traceng_nio_create_udp(http_compute, vm):
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -76,7 +76,7 @@ def test_traceng_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode("utf-8")
- assert response.route == "/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -88,7 +88,7 @@ def test_traceng_delete_nio(http_compute, vm):
"rhost": "127.0.0.1"})
response = http_compute.delete("/projects/{project_id}/traceng/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert response.status == 204, response.body.decode()
- assert response.route == "/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_traceng_start(http_compute, vm):
diff --git a/tests/handlers/api/compute/test_virtualbox.py b/tests/handlers/api/compute/test_virtualbox.py
index 743141c7..91ec22f1 100644
--- a/tests/handlers/api/compute/test_virtualbox.py
+++ b/tests/handlers/api/compute/test_virtualbox.py
@@ -108,7 +108,7 @@ def test_vbox_nio_create_udp(http_compute, vm):
assert args[0] == 0
assert response.status == 201
- assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -127,7 +127,7 @@ def test_virtualbox_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode()
- assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -141,7 +141,7 @@ def test_vbox_delete_nio(http_compute, vm):
assert args[0] == 0
assert response.status == 204
- assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_vbox_update(http_compute, vm, free_console_port):
diff --git a/tests/handlers/api/compute/test_vmware.py b/tests/handlers/api/compute/test_vmware.py
index 3cca7cd1..1dc9d070 100644
--- a/tests/handlers/api/compute/test_vmware.py
+++ b/tests/handlers/api/compute/test_vmware.py
@@ -116,7 +116,7 @@ def test_vmware_nio_create_udp(http_compute, vm):
assert args[0] == 0
assert response.status == 201
- assert response.route == "/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -135,7 +135,7 @@ def test_vmware_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode()
- assert response.route == "/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -149,7 +149,7 @@ def test_vmware_delete_nio(http_compute, vm):
assert args[0] == 0
assert response.status == 204
- assert response.route == "/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_vmware_update(http_compute, vm, free_console_port):
diff --git a/tests/handlers/api/compute/test_vpcs.py b/tests/handlers/api/compute/test_vpcs.py
index 9cd4ce13..a42cd3fe 100644
--- a/tests/handlers/api/compute/test_vpcs.py
+++ b/tests/handlers/api/compute/test_vpcs.py
@@ -70,7 +70,7 @@ def test_vpcs_nio_create_udp(http_compute, vm):
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
- assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -91,7 +91,7 @@ def test_vpcs_nio_update_udp(http_compute, vm):
"filters": {}},
example=True)
assert response.status == 201, response.body.decode("utf-8")
- assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
@@ -103,7 +103,7 @@ def test_vpcs_delete_nio(http_compute, vm):
"rhost": "127.0.0.1"})
response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
assert response.status == 204, response.body.decode()
- assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
+ assert response.route == r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_vpcs_start(http_compute, vm):
diff --git a/tests/handlers/api/controller/test_project.py b/tests/handlers/api/controller/test_project.py
index f7b011dc..6245558c 100644
--- a/tests/handlers/api/controller/test_project.py
+++ b/tests/handlers/api/controller/test_project.py
@@ -235,7 +235,7 @@ def test_export_with_images(http_controller, tmpdir, loop, project):
json.dump(topology, f)
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
- response = http_controller.get("/projects/{project_id}/export?include_images=1".format(project_id=project.id), raw=True)
+ response = http_controller.get("/projects/{project_id}/export?include_images=yes".format(project_id=project.id), raw=True)
assert response.status == 200
assert response.headers['CONTENT-TYPE'] == 'application/gns3project'
assert response.headers['CONTENT-DISPOSITION'] == 'attachment; filename="{}.gns3project"'.format(project.name)
diff --git a/tests/web/test_response.py b/tests/web/test_response.py
index ad3c2361..82963104 100644
--- a/tests/web/test_response.py
+++ b/tests/web/test_response.py
@@ -34,11 +34,11 @@ def test_response_file(async_run, tmpdir, response):
with open(filename, 'w+') as f:
f.write('world')
- async_run(response.file(filename))
+ async_run(response.stream_file(filename))
assert response.status == 200
def test_response_file_not_found(async_run, tmpdir, response):
filename = str(tmpdir / 'hello-not-found')
- pytest.raises(HTTPNotFound, lambda: async_run(response.file(filename)))
+ pytest.raises(HTTPNotFound, lambda: async_run(response.stream_file(filename)))