mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-28 11:18:11 +00:00
Merge pull request #1537 from GNS3/import-export-refactoring
Project import/export refactoring. Fixes #1349
This commit is contained in:
commit
372017f467
@ -20,6 +20,7 @@ import os
|
|||||||
import struct
|
import struct
|
||||||
import stat
|
import stat
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import aiofiles
|
||||||
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import socket
|
import socket
|
||||||
@ -46,6 +47,8 @@ from .nios.nio_ethernet import NIOEthernet
|
|||||||
from ..utils.images import md5sum, remove_checksum, images_directories, default_images_directory, list_images
|
from ..utils.images import md5sum, remove_checksum, images_directories, default_images_directory, list_images
|
||||||
from .error import NodeError, ImageMissingError
|
from .error import NodeError, ImageMissingError
|
||||||
|
|
||||||
|
CHUNK_SIZE = 1024 * 8 # 8KB
|
||||||
|
|
||||||
|
|
||||||
class BaseManager:
|
class BaseManager:
|
||||||
|
|
||||||
@ -456,7 +459,7 @@ class BaseManager:
|
|||||||
with open(path, "rb") as f:
|
with open(path, "rb") as f:
|
||||||
await response.prepare(request)
|
await response.prepare(request)
|
||||||
while nio.capturing:
|
while nio.capturing:
|
||||||
data = f.read(4096)
|
data = f.read(CHUNK_SIZE)
|
||||||
if not data:
|
if not data:
|
||||||
await asyncio.sleep(0.1)
|
await asyncio.sleep(0.1)
|
||||||
continue
|
continue
|
||||||
@ -594,18 +597,18 @@ class BaseManager:
|
|||||||
path = os.path.abspath(os.path.join(directory, *os.path.split(filename)))
|
path = os.path.abspath(os.path.join(directory, *os.path.split(filename)))
|
||||||
if os.path.commonprefix([directory, path]) != directory:
|
if os.path.commonprefix([directory, path]) != directory:
|
||||||
raise aiohttp.web.HTTPForbidden(text="Could not write image: {}, {} is forbidden".format(filename, path))
|
raise aiohttp.web.HTTPForbidden(text="Could not write image: {}, {} is forbidden".format(filename, path))
|
||||||
log.info("Writing image file %s", path)
|
log.info("Writing image file to '{}'".format(path))
|
||||||
try:
|
try:
|
||||||
remove_checksum(path)
|
remove_checksum(path)
|
||||||
# We store the file under his final name only when the upload is finished
|
# We store the file under his final name only when the upload is finished
|
||||||
tmp_path = path + ".tmp"
|
tmp_path = path + ".tmp"
|
||||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||||
with open(tmp_path, 'wb') as f:
|
async with aiofiles.open(tmp_path, 'wb') as f:
|
||||||
while True:
|
while True:
|
||||||
packet = await stream.read(4096)
|
chunk = await stream.read(CHUNK_SIZE)
|
||||||
if not packet:
|
if not chunk:
|
||||||
break
|
break
|
||||||
f.write(packet)
|
await f.write(chunk)
|
||||||
os.chmod(tmp_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
os.chmod(tmp_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
||||||
shutil.move(tmp_path, path)
|
shutil.move(tmp_path, path)
|
||||||
await cancellable_wait_run_in_executor(md5sum, path)
|
await cancellable_wait_run_in_executor(md5sum, path)
|
||||||
|
@ -37,6 +37,7 @@ log = logging.getLogger(__name__)
|
|||||||
DOCKER_MINIMUM_API_VERSION = "1.25"
|
DOCKER_MINIMUM_API_VERSION = "1.25"
|
||||||
DOCKER_MINIMUM_VERSION = "1.13"
|
DOCKER_MINIMUM_VERSION = "1.13"
|
||||||
DOCKER_PREFERRED_API_VERSION = "1.30"
|
DOCKER_PREFERRED_API_VERSION = "1.30"
|
||||||
|
CHUNK_SIZE = 1024 * 8 # 8KB
|
||||||
|
|
||||||
|
|
||||||
class Docker(BaseManager):
|
class Docker(BaseManager):
|
||||||
@ -206,7 +207,7 @@ class Docker(BaseManager):
|
|||||||
content = ""
|
content = ""
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
chunk = await response.content.read(1024)
|
chunk = await response.content.read(CHUNK_SIZE)
|
||||||
except aiohttp.ServerDisconnectedError:
|
except aiohttp.ServerDisconnectedError:
|
||||||
log.error("Disconnected from server while pulling Docker image '{}' from docker hub".format(image))
|
log.error("Disconnected from server while pulling Docker image '{}' from docker hub".format(image))
|
||||||
break
|
break
|
||||||
|
@ -320,28 +320,6 @@ class Compute:
|
|||||||
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image))
|
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
async def stream_file(self, project, path, timeout=None):
|
|
||||||
"""
|
|
||||||
Read file of a project and stream it
|
|
||||||
|
|
||||||
:param project: A project object
|
|
||||||
:param path: The path of the file in the project
|
|
||||||
:param timeout: timeout
|
|
||||||
:returns: A file stream
|
|
||||||
"""
|
|
||||||
|
|
||||||
url = self._getUrl("/projects/{}/stream/{}".format(project.id, path))
|
|
||||||
response = await self._session().request("GET", url, auth=self._auth, timeout=timeout)
|
|
||||||
if response.status == 404:
|
|
||||||
raise aiohttp.web.HTTPNotFound(text="file '{}' not found on compute".format(path))
|
|
||||||
elif response.status == 403:
|
|
||||||
raise aiohttp.web.HTTPForbidden(text="forbidden to open '{}' on compute".format(path))
|
|
||||||
elif response.status != 200:
|
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Unexpected error {}: {}: while opening {} on compute".format(response.status,
|
|
||||||
response.reason,
|
|
||||||
path))
|
|
||||||
return response
|
|
||||||
|
|
||||||
async def http_query(self, method, path, data=None, dont_connect=False, **kwargs):
|
async def http_query(self, method, path, data=None, dont_connect=False, **kwargs):
|
||||||
"""
|
"""
|
||||||
:param dont_connect: If true do not reconnect if not connected
|
:param dont_connect: If true do not reconnect if not connected
|
||||||
|
@ -19,31 +19,33 @@ import os
|
|||||||
import sys
|
import sys
|
||||||
import json
|
import json
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import aiofiles
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import zipfile
|
import zipfile
|
||||||
import tempfile
|
import tempfile
|
||||||
import zipstream
|
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CHUNK_SIZE = 1024 * 8 # 8KB
|
||||||
|
|
||||||
async def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False, reset_mac_addresses=False):
|
|
||||||
|
async def export_project(zstream, project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False, reset_mac_addresses=False):
|
||||||
"""
|
"""
|
||||||
Export a project to a zip file.
|
Export a project to a zip file.
|
||||||
|
|
||||||
The file will be read chunk by chunk when you iterate over the zip stream.
|
The file will be read chunk by chunk when you iterate over the zip stream.
|
||||||
Some files like snapshots and packet captures are ignored.
|
Some files like snapshots and packet captures are ignored.
|
||||||
|
|
||||||
|
:param zstream: ZipStream object
|
||||||
|
:param project: Project instance
|
||||||
:param temporary_dir: A temporary dir where to store intermediate data
|
:param temporary_dir: A temporary dir where to store intermediate data
|
||||||
:param include images: save OS images to the zip file
|
:param include images: save OS images to the zip file
|
||||||
:param keep_compute_id: If false replace all compute id by local (standard behavior for .gns3project to make it portable)
|
:param keep_compute_id: If false replace all compute id by local (standard behavior for .gns3project to make it portable)
|
||||||
:param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable
|
:param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable
|
||||||
:param reset_mac_addresses: Reset MAC addresses for every nodes.
|
:param reset_mac_addresses: Reset MAC addresses for every nodes.
|
||||||
|
|
||||||
:returns: ZipStream object
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# To avoid issue with data not saved we disallow the export of a running project
|
# To avoid issue with data not saved we disallow the export of a running project
|
||||||
@ -53,8 +55,6 @@ async def export_project(project, temporary_dir, include_images=False, keep_comp
|
|||||||
# Make sure we save the project
|
# Make sure we save the project
|
||||||
project.dump()
|
project.dump()
|
||||||
|
|
||||||
zstream = zipstream.ZipFile(allowZip64=True)
|
|
||||||
|
|
||||||
if not os.path.exists(project._path):
|
if not os.path.exists(project._path):
|
||||||
raise aiohttp.web.HTTPNotFound(text="Project could not be found at '{}'".format(project._path))
|
raise aiohttp.web.HTTPNotFound(text="Project could not be found at '{}'".format(project._path))
|
||||||
|
|
||||||
@ -80,33 +80,31 @@ async def export_project(project, temporary_dir, include_images=False, keep_comp
|
|||||||
if file.endswith(".gns3"):
|
if file.endswith(".gns3"):
|
||||||
continue
|
continue
|
||||||
_patch_mtime(path)
|
_patch_mtime(path)
|
||||||
zstream.write(path, os.path.relpath(path, project._path), compress_type=zipfile.ZIP_DEFLATED)
|
zstream.write(path, os.path.relpath(path, project._path))
|
||||||
|
|
||||||
# Export files from remote computes
|
# Export files from remote computes
|
||||||
downloaded_files = set()
|
|
||||||
for compute in project.computes:
|
for compute in project.computes:
|
||||||
if compute.id != "local":
|
if compute.id != "local":
|
||||||
compute_files = await compute.list_files(project)
|
compute_files = await compute.list_files(project)
|
||||||
for compute_file in compute_files:
|
for compute_file in compute_files:
|
||||||
if _is_exportable(compute_file["path"]):
|
if _is_exportable(compute_file["path"]):
|
||||||
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
log.debug("Downloading file '{}' from compute '{}'".format(compute_file["path"], compute.id))
|
||||||
f = open(fd, "wb", closefd=True)
|
|
||||||
response = await compute.download_file(project, compute_file["path"])
|
response = await compute.download_file(project, compute_file["path"])
|
||||||
while True:
|
#if response.status != 200:
|
||||||
try:
|
# raise aiohttp.web.HTTPConflict(text="Cannot export file from compute '{}'. Compute returned status code {}.".format(compute.id, response.status))
|
||||||
data = await response.content.read(1024)
|
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
||||||
except asyncio.TimeoutError:
|
async with aiofiles.open(fd, 'wb') as f:
|
||||||
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading file '{}' from remote compute {}:{}".format(compute_file["path"], compute.host, compute.port))
|
while True:
|
||||||
if not data:
|
try:
|
||||||
break
|
data = await response.content.read(CHUNK_SIZE)
|
||||||
f.write(data)
|
except asyncio.TimeoutError:
|
||||||
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading file '{}' from remote compute {}:{}".format(compute_file["path"], compute.host, compute.port))
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
await f.write(data)
|
||||||
response.close()
|
response.close()
|
||||||
f.close()
|
|
||||||
_patch_mtime(temp_path)
|
_patch_mtime(temp_path)
|
||||||
zstream.write(temp_path, arcname=compute_file["path"], compress_type=zipfile.ZIP_DEFLATED)
|
zstream.write(temp_path, arcname=compute_file["path"])
|
||||||
downloaded_files.add(compute_file['path'])
|
|
||||||
|
|
||||||
return zstream
|
|
||||||
|
|
||||||
|
|
||||||
def _patch_mtime(path):
|
def _patch_mtime(path):
|
||||||
@ -232,6 +230,7 @@ async def _patch_project_file(project, path, zstream, include_images, keep_compu
|
|||||||
zstream.writestr("project.gns3", json.dumps(topology).encode())
|
zstream.writestr("project.gns3", json.dumps(topology).encode())
|
||||||
return images
|
return images
|
||||||
|
|
||||||
|
|
||||||
def _export_local_image(image, zstream):
|
def _export_local_image(image, zstream):
|
||||||
"""
|
"""
|
||||||
Exports a local image to the zip file.
|
Exports a local image to the zip file.
|
||||||
@ -266,30 +265,26 @@ async def _export_remote_images(project, compute_id, image_type, image, project_
|
|||||||
Export specific image from remote compute.
|
Export specific image from remote compute.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log.info("Downloading image '{}' from compute '{}'".format(image, compute_id))
|
log.debug("Downloading image '{}' from compute '{}'".format(image, compute_id))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
compute = [compute for compute in project.computes if compute.id == compute_id][0]
|
compute = [compute for compute in project.computes if compute.id == compute_id][0]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute doesn't exist.".format(compute_id))
|
raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute doesn't exist.".format(compute_id))
|
||||||
|
|
||||||
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
|
||||||
f = open(fd, "wb", closefd=True)
|
|
||||||
response = await compute.download_image(image_type, image)
|
response = await compute.download_image(image_type, image)
|
||||||
|
|
||||||
if response.status != 200:
|
if response.status != 200:
|
||||||
raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute returned status code {}.".format(compute_id, response.status))
|
raise aiohttp.web.HTTPConflict(text="Cannot export image from compute '{}'. Compute returned status code {}.".format(compute_id, response.status))
|
||||||
|
|
||||||
while True:
|
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
||||||
try:
|
async with aiofiles.open(fd, 'wb') as f:
|
||||||
data = await response.content.read(1024)
|
while True:
|
||||||
except asyncio.TimeoutError:
|
try:
|
||||||
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading image '{}' from remote compute {}:{}".format(image, compute.host, compute.port))
|
data = await response.content.read(CHUNK_SIZE)
|
||||||
if not data:
|
except asyncio.TimeoutError:
|
||||||
break
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading image '{}' from remote compute {}:{}".format(image, compute.host, compute.port))
|
||||||
f.write(data)
|
if not data:
|
||||||
|
break
|
||||||
|
await f.write(data)
|
||||||
response.close()
|
response.close()
|
||||||
f.close()
|
|
||||||
arcname = os.path.join("images", image_type, image)
|
arcname = os.path.join("images", image_type, image)
|
||||||
log.info("Saved {}".format(arcname))
|
|
||||||
project_zipfile.write(temp_path, arcname=arcname, compress_type=zipfile.ZIP_DEFLATED)
|
project_zipfile.write(temp_path, arcname=arcname, compress_type=zipfile.ZIP_DEFLATED)
|
||||||
|
@ -20,7 +20,6 @@ import sys
|
|||||||
import json
|
import json
|
||||||
import uuid
|
import uuid
|
||||||
import shutil
|
import shutil
|
||||||
import asyncio
|
|
||||||
import zipfile
|
import zipfile
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import itertools
|
import itertools
|
||||||
|
@ -21,9 +21,12 @@ import json
|
|||||||
import uuid
|
import uuid
|
||||||
import copy
|
import copy
|
||||||
import shutil
|
import shutil
|
||||||
|
import time
|
||||||
import asyncio
|
import asyncio
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
import aiofiles
|
||||||
import tempfile
|
import tempfile
|
||||||
|
import zipfile
|
||||||
|
|
||||||
from uuid import UUID, uuid4
|
from uuid import UUID, uuid4
|
||||||
|
|
||||||
@ -37,7 +40,7 @@ from ..config import Config
|
|||||||
from ..utils.path import check_path_allowed, get_default_project_directory
|
from ..utils.path import check_path_allowed, get_default_project_directory
|
||||||
from ..utils.asyncio.pool import Pool
|
from ..utils.asyncio.pool import Pool
|
||||||
from ..utils.asyncio import locking
|
from ..utils.asyncio import locking
|
||||||
from ..utils.asyncio import wait_run_in_executor
|
from ..utils.asyncio import aiozipstream
|
||||||
from .export_project import export_project
|
from .export_project import export_project
|
||||||
from .import_project import import_project
|
from .import_project import import_project
|
||||||
|
|
||||||
@ -947,15 +950,6 @@ class Project:
|
|||||||
while self._loading:
|
while self._loading:
|
||||||
await asyncio.sleep(0.5)
|
await asyncio.sleep(0.5)
|
||||||
|
|
||||||
def _create_duplicate_project_file(self, path, zipstream):
|
|
||||||
"""
|
|
||||||
Creates the project file (to be run in its own thread)
|
|
||||||
"""
|
|
||||||
|
|
||||||
with open(path, "wb") as f:
|
|
||||||
for data in zipstream:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
async def duplicate(self, name=None, location=None):
|
async def duplicate(self, name=None, location=None):
|
||||||
"""
|
"""
|
||||||
Duplicate a project
|
Duplicate a project
|
||||||
@ -975,12 +969,24 @@ class Project:
|
|||||||
self.dump()
|
self.dump()
|
||||||
assert self._status != "closed"
|
assert self._status != "closed"
|
||||||
try:
|
try:
|
||||||
|
begin = time.time()
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
zipstream = await export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True, reset_mac_addresses=True)
|
# Do not compress the exported project when duplicating
|
||||||
project_path = os.path.join(tmpdir, "project.gns3p")
|
with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
|
||||||
await wait_run_in_executor(self._create_duplicate_project_file, project_path, zipstream)
|
await export_project(zstream, self, tmpdir, keep_compute_id=True, allow_all_nodes=True, reset_mac_addresses=True)
|
||||||
with open(project_path, "rb") as f:
|
|
||||||
project = await import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
|
# export the project to a temporary location
|
||||||
|
project_path = os.path.join(tmpdir, "project.gns3p")
|
||||||
|
log.info("Exporting project to '{}'".format(project_path))
|
||||||
|
async with aiofiles.open(project_path, 'wb') as f:
|
||||||
|
async for chunk in zstream:
|
||||||
|
await f.write(chunk)
|
||||||
|
|
||||||
|
# import the temporary project
|
||||||
|
with open(project_path, "rb") as f:
|
||||||
|
project = await import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
|
||||||
|
|
||||||
|
log.info("Project '{}' duplicated in {:.4f} seconds".format(project.name, time.time() - begin))
|
||||||
except (ValueError, OSError, UnicodeEncodeError) as e:
|
except (ValueError, OSError, UnicodeEncodeError) as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Cannot duplicate project: {}".format(str(e)))
|
raise aiohttp.web.HTTPConflict(text="Cannot duplicate project: {}".format(str(e)))
|
||||||
|
|
||||||
|
@ -20,14 +20,20 @@ import os
|
|||||||
import uuid
|
import uuid
|
||||||
import shutil
|
import shutil
|
||||||
import tempfile
|
import tempfile
|
||||||
import asyncio
|
import aiofiles
|
||||||
|
import zipfile
|
||||||
|
import time
|
||||||
import aiohttp.web
|
import aiohttp.web
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
from ..utils.asyncio import wait_run_in_executor
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
|
from ..utils.asyncio import aiozipstream
|
||||||
from .export_project import export_project
|
from .export_project import export_project
|
||||||
from .import_project import import_project
|
from .import_project import import_project
|
||||||
|
|
||||||
|
import logging
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
# The string use to extract the date from the filename
|
# The string use to extract the date from the filename
|
||||||
FILENAME_TIME_FORMAT = "%d%m%y_%H%M%S"
|
FILENAME_TIME_FORMAT = "%d%m%y_%H%M%S"
|
||||||
@ -73,15 +79,6 @@ class Snapshot:
|
|||||||
def created_at(self):
|
def created_at(self):
|
||||||
return int(self._created_at)
|
return int(self._created_at)
|
||||||
|
|
||||||
def _create_snapshot_file(self, zipstream):
|
|
||||||
"""
|
|
||||||
Creates the snapshot file (to be run in its own thread)
|
|
||||||
"""
|
|
||||||
|
|
||||||
with open(self.path, "wb") as f:
|
|
||||||
for data in zipstream:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
async def create(self):
|
async def create(self):
|
||||||
"""
|
"""
|
||||||
Create the snapshot
|
Create the snapshot
|
||||||
@ -97,9 +94,15 @@ class Snapshot:
|
|||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not create the snapshot directory '{}': {}".format(snapshot_directory, e))
|
raise aiohttp.web.HTTPInternalServerError(text="Could not create the snapshot directory '{}': {}".format(snapshot_directory, e))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
begin = time.time()
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
zipstream = await export_project(self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True)
|
# Do not compress the snapshots
|
||||||
await wait_run_in_executor(self._create_snapshot_file, zipstream)
|
with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
|
||||||
|
await export_project(zstream, self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True)
|
||||||
|
async with aiofiles.open(self.path, 'wb') as f:
|
||||||
|
async for chunk in zstream:
|
||||||
|
await f.write(chunk)
|
||||||
|
log.info("Snapshot '{}' created in {:.4f} seconds".format(self.name, time.time() - begin))
|
||||||
except (ValueError, OSError, RuntimeError) as e:
|
except (ValueError, OSError, RuntimeError) as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Could not create snapshot file '{}': {}".format(self.path, e))
|
raise aiohttp.web.HTTPConflict(text="Could not create snapshot file '{}': {}".format(self.path, e))
|
||||||
|
|
||||||
|
@ -493,7 +493,7 @@ class DynamipsVMHandler:
|
|||||||
if filename[0] == ".":
|
if filename[0] == ".":
|
||||||
raise aiohttp.web.HTTPForbidden()
|
raise aiohttp.web.HTTPForbidden()
|
||||||
|
|
||||||
await response.file(image_path)
|
await response.stream_file(image_path)
|
||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/duplicate",
|
r"/projects/{project_id}/dynamips/nodes/{node_id}/duplicate",
|
||||||
|
@ -451,4 +451,4 @@ class IOUHandler:
|
|||||||
if filename[0] == ".":
|
if filename[0] == ".":
|
||||||
raise aiohttp.web.HTTPForbidden()
|
raise aiohttp.web.HTTPForbidden()
|
||||||
|
|
||||||
await response.file(image_path)
|
await response.stream_file(image_path)
|
||||||
|
@ -37,6 +37,8 @@ from gns3server.schemas.project import (
|
|||||||
import logging
|
import logging
|
||||||
log = logging.getLogger()
|
log = logging.getLogger()
|
||||||
|
|
||||||
|
CHUNK_SIZE = 1024 * 8 # 8KB
|
||||||
|
|
||||||
|
|
||||||
class ProjectHandler:
|
class ProjectHandler:
|
||||||
|
|
||||||
@ -248,64 +250,7 @@ class ProjectHandler:
|
|||||||
raise aiohttp.web.HTTPForbidden()
|
raise aiohttp.web.HTTPForbidden()
|
||||||
path = os.path.join(project.path, path)
|
path = os.path.join(project.path, path)
|
||||||
|
|
||||||
response.content_type = "application/octet-stream"
|
await response.stream_file(path)
|
||||||
response.set_status(200)
|
|
||||||
response.enable_chunked_encoding()
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(path, "rb") as f:
|
|
||||||
await response.prepare(request)
|
|
||||||
while True:
|
|
||||||
data = f.read(4096)
|
|
||||||
if not data:
|
|
||||||
break
|
|
||||||
await response.write(data)
|
|
||||||
|
|
||||||
except FileNotFoundError:
|
|
||||||
raise aiohttp.web.HTTPNotFound()
|
|
||||||
except PermissionError:
|
|
||||||
raise aiohttp.web.HTTPForbidden()
|
|
||||||
|
|
||||||
@Route.get(
|
|
||||||
r"/projects/{project_id}/stream/{path:.+}",
|
|
||||||
description="Stream a file from a project",
|
|
||||||
parameters={
|
|
||||||
"project_id": "Project UUID",
|
|
||||||
},
|
|
||||||
status_codes={
|
|
||||||
200: "File returned",
|
|
||||||
403: "Permission denied",
|
|
||||||
404: "The file doesn't exist"
|
|
||||||
})
|
|
||||||
async def stream_file(request, response):
|
|
||||||
|
|
||||||
pm = ProjectManager.instance()
|
|
||||||
project = pm.get_project(request.match_info["project_id"])
|
|
||||||
path = request.match_info["path"]
|
|
||||||
path = os.path.normpath(path)
|
|
||||||
|
|
||||||
# Raise an error if user try to escape
|
|
||||||
if path[0] == ".":
|
|
||||||
raise aiohttp.web.HTTPForbidden()
|
|
||||||
path = os.path.join(project.path, path)
|
|
||||||
|
|
||||||
response.content_type = "application/octet-stream"
|
|
||||||
response.set_status(200)
|
|
||||||
response.enable_chunked_encoding()
|
|
||||||
|
|
||||||
# FIXME: file streaming is never stopped
|
|
||||||
try:
|
|
||||||
with open(path, "rb") as f:
|
|
||||||
await response.prepare(request)
|
|
||||||
while True:
|
|
||||||
data = f.read(4096)
|
|
||||||
if not data:
|
|
||||||
await asyncio.sleep(0.1)
|
|
||||||
await response.write(data)
|
|
||||||
except FileNotFoundError:
|
|
||||||
raise aiohttp.web.HTTPNotFound()
|
|
||||||
except PermissionError:
|
|
||||||
raise aiohttp.web.HTTPForbidden()
|
|
||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/files/{path:.+}",
|
r"/projects/{project_id}/files/{path:.+}",
|
||||||
@ -338,7 +283,7 @@ class ProjectHandler:
|
|||||||
with open(path, 'wb+') as f:
|
with open(path, 'wb+') as f:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
chunk = await request.content.read(1024)
|
chunk = await request.content.read(CHUNK_SIZE)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
|
||||||
if not chunk:
|
if not chunk:
|
||||||
@ -349,64 +294,3 @@ class ProjectHandler:
|
|||||||
raise aiohttp.web.HTTPNotFound()
|
raise aiohttp.web.HTTPNotFound()
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
raise aiohttp.web.HTTPForbidden()
|
raise aiohttp.web.HTTPForbidden()
|
||||||
|
|
||||||
@Route.get(
|
|
||||||
r"/projects/{project_id}/export",
|
|
||||||
description="Export a project as a portable archive",
|
|
||||||
parameters={
|
|
||||||
"project_id": "Project UUID",
|
|
||||||
},
|
|
||||||
raw=True,
|
|
||||||
status_codes={
|
|
||||||
200: "File returned",
|
|
||||||
404: "The project doesn't exist"
|
|
||||||
})
|
|
||||||
async def export_project(request, response):
|
|
||||||
|
|
||||||
pm = ProjectManager.instance()
|
|
||||||
project = pm.get_project(request.match_info["project_id"])
|
|
||||||
response.content_type = 'application/gns3project'
|
|
||||||
response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
|
|
||||||
response.enable_chunked_encoding()
|
|
||||||
await response.prepare(request)
|
|
||||||
|
|
||||||
include_images = bool(int(request.json.get("include_images", "0")))
|
|
||||||
for data in project.export(include_images=include_images):
|
|
||||||
await response.write(data)
|
|
||||||
|
|
||||||
#await response.write_eof() #FIXME: shound't be needed anymore
|
|
||||||
|
|
||||||
@Route.post(
|
|
||||||
r"/projects/{project_id}/import",
|
|
||||||
description="Import a project from a portable archive",
|
|
||||||
parameters={
|
|
||||||
"project_id": "Project UUID",
|
|
||||||
},
|
|
||||||
raw=True,
|
|
||||||
output=PROJECT_OBJECT_SCHEMA,
|
|
||||||
status_codes={
|
|
||||||
200: "Project imported",
|
|
||||||
403: "Forbidden to import project"
|
|
||||||
})
|
|
||||||
async def import_project(request, response):
|
|
||||||
|
|
||||||
pm = ProjectManager.instance()
|
|
||||||
project_id = request.match_info["project_id"]
|
|
||||||
project = pm.create_project(project_id=project_id)
|
|
||||||
|
|
||||||
# We write the content to a temporary location and after we extract it all.
|
|
||||||
# It could be more optimal to stream this but it is not implemented in Python.
|
|
||||||
# Spooled means the file is temporary kept in memory until max_size is reached
|
|
||||||
try:
|
|
||||||
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
|
||||||
while True:
|
|
||||||
chunk = await request.content.read(1024)
|
|
||||||
if not chunk:
|
|
||||||
break
|
|
||||||
temp.write(chunk)
|
|
||||||
project.import_zip(temp, gns3vm=bool(int(request.GET.get("gns3vm", "1"))))
|
|
||||||
except OSError as e:
|
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
|
||||||
|
|
||||||
response.json(project)
|
|
||||||
response.set_status(201)
|
|
||||||
|
@ -576,4 +576,4 @@ class QEMUHandler:
|
|||||||
if filename[0] == ".":
|
if filename[0] == ".":
|
||||||
raise aiohttp.web.HTTPForbidden()
|
raise aiohttp.web.HTTPForbidden()
|
||||||
|
|
||||||
await response.file(image_path)
|
await response.stream_file(image_path)
|
||||||
|
@ -36,7 +36,7 @@ class ApplianceHandler:
|
|||||||
async def list_appliances(request, response):
|
async def list_appliances(request, response):
|
||||||
|
|
||||||
controller = Controller.instance()
|
controller = Controller.instance()
|
||||||
if request.query.get("update", "no") == "yes":
|
if request.query.get("update", "no").lower() == "yes":
|
||||||
await controller.appliance_manager.download_appliances()
|
await controller.appliance_manager.download_appliances()
|
||||||
controller.appliance_manager.load_appliances()
|
controller.appliance_manager.load_appliances()
|
||||||
response.json([c for c in controller.appliance_manager.appliances.values()])
|
response.json([c for c in controller.appliance_manager.appliances.values()])
|
||||||
|
@ -16,15 +16,18 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import asyncio
|
import asyncio
|
||||||
import tempfile
|
import tempfile
|
||||||
|
import zipfile
|
||||||
|
import aiofiles
|
||||||
|
import time
|
||||||
|
|
||||||
from gns3server.web.route import Route
|
from gns3server.web.route import Route
|
||||||
from gns3server.controller import Controller
|
from gns3server.controller import Controller
|
||||||
from gns3server.controller.import_project import import_project
|
from gns3server.controller.import_project import import_project
|
||||||
from gns3server.controller.export_project import export_project
|
from gns3server.controller.export_project import export_project
|
||||||
|
from gns3server.utils.asyncio import aiozipstream
|
||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
|
|
||||||
|
|
||||||
@ -48,6 +51,8 @@ async def process_websocket(ws):
|
|||||||
except aiohttp.WSServerHandshakeError:
|
except aiohttp.WSServerHandshakeError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
CHUNK_SIZE = 1024 * 8 # 8KB
|
||||||
|
|
||||||
|
|
||||||
class ProjectHandler:
|
class ProjectHandler:
|
||||||
|
|
||||||
@ -300,21 +305,38 @@ class ProjectHandler:
|
|||||||
|
|
||||||
controller = Controller.instance()
|
controller = Controller.instance()
|
||||||
project = await controller.get_loaded_project(request.match_info["project_id"])
|
project = await controller.get_loaded_project(request.match_info["project_id"])
|
||||||
|
if request.query.get("include_images", "no").lower() == "yes":
|
||||||
|
include_images = True
|
||||||
|
else:
|
||||||
|
include_images = False
|
||||||
|
compression_query = request.query.get("compression", "zip").lower()
|
||||||
|
if compression_query == "zip":
|
||||||
|
compression = zipfile.ZIP_DEFLATED
|
||||||
|
elif compression_query == "none":
|
||||||
|
compression = zipfile.ZIP_STORED
|
||||||
|
elif compression_query == "bzip2":
|
||||||
|
compression = zipfile.ZIP_BZIP2
|
||||||
|
elif compression_query == "lzma":
|
||||||
|
compression = zipfile.ZIP_LZMA
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
begin = time.time()
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||||
stream = await export_project(project, tmp_dir, include_images=bool(int(request.query.get("include_images", "0"))))
|
with aiozipstream.ZipFile(compression=compression) as zstream:
|
||||||
# We need to do that now because export could failed and raise an HTTP error
|
await export_project(zstream, project, tmp_dir, include_images=include_images)
|
||||||
# that why response start need to be the later possible
|
|
||||||
response.content_type = 'application/gns3project'
|
|
||||||
response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
|
|
||||||
response.enable_chunked_encoding()
|
|
||||||
await response.prepare(request)
|
|
||||||
|
|
||||||
for data in stream:
|
# We need to do that now because export could failed and raise an HTTP error
|
||||||
await response.write(data)
|
# that why response start need to be the later possible
|
||||||
|
response.content_type = 'application/gns3project'
|
||||||
|
response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3project"'.format(project.name)
|
||||||
|
response.enable_chunked_encoding()
|
||||||
|
await response.prepare(request)
|
||||||
|
|
||||||
|
async for chunk in zstream:
|
||||||
|
await response.write(chunk)
|
||||||
|
|
||||||
|
log.info("Project '{}' exported in {:.4f} seconds".format(project.name, time.time() - begin))
|
||||||
|
|
||||||
#await response.write_eof() #FIXME: shound't be needed anymore
|
|
||||||
# Will be raise if you have no space left or permission issue on your temporary directory
|
# Will be raise if you have no space left or permission issue on your temporary directory
|
||||||
# RuntimeError: something was wrong during the zip process
|
# RuntimeError: something was wrong during the zip process
|
||||||
except (ValueError, OSError, RuntimeError) as e:
|
except (ValueError, OSError, RuntimeError) as e:
|
||||||
@ -346,29 +368,23 @@ class ProjectHandler:
|
|||||||
|
|
||||||
# We write the content to a temporary location and after we extract it all.
|
# We write the content to a temporary location and after we extract it all.
|
||||||
# It could be more optimal to stream this but it is not implemented in Python.
|
# It could be more optimal to stream this but it is not implemented in Python.
|
||||||
# Spooled means the file is temporary kept in memory until max_size is reached
|
|
||||||
# Cannot use tempfile.SpooledTemporaryFile(max_size=10000) in Python 3.7 due
|
|
||||||
# to a bug https://bugs.python.org/issue26175
|
|
||||||
try:
|
try:
|
||||||
if sys.version_info >= (3, 7) and sys.version_info < (3, 8):
|
begin = time.time()
|
||||||
with tempfile.TemporaryFile() as temp:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
|
temp_project_path = os.path.join(tmpdir, "project.zip")
|
||||||
|
async with aiofiles.open(temp_project_path, 'wb') as f:
|
||||||
while True:
|
while True:
|
||||||
chunk = await request.content.read(1024)
|
chunk = await request.content.read(CHUNK_SIZE)
|
||||||
if not chunk:
|
if not chunk:
|
||||||
break
|
break
|
||||||
temp.write(chunk)
|
await f.write(chunk)
|
||||||
project = await import_project(controller, request.match_info["project_id"], temp, location=path, name=name)
|
|
||||||
else:
|
with open(temp_project_path, "rb") as f:
|
||||||
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
project = await import_project(controller, request.match_info["project_id"], f, location=path, name=name)
|
||||||
while True:
|
|
||||||
chunk = await request.content.read(1024)
|
log.info("Project '{}' imported in {:.4f} seconds".format(project.name, time.time() - begin))
|
||||||
if not chunk:
|
|
||||||
break
|
|
||||||
temp.write(chunk)
|
|
||||||
project = await import_project(controller, request.match_info["project_id"], temp, location=path, name=name)
|
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
||||||
|
|
||||||
response.json(project)
|
response.json(project)
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
|
|
||||||
@ -427,23 +443,7 @@ class ProjectHandler:
|
|||||||
raise aiohttp.web.HTTPForbidden()
|
raise aiohttp.web.HTTPForbidden()
|
||||||
path = os.path.join(project.path, path)
|
path = os.path.join(project.path, path)
|
||||||
|
|
||||||
response.content_type = "application/octet-stream"
|
await response.stream_file(path)
|
||||||
response.set_status(200)
|
|
||||||
response.enable_chunked_encoding()
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(path, "rb") as f:
|
|
||||||
await response.prepare(request)
|
|
||||||
while True:
|
|
||||||
data = f.read(4096)
|
|
||||||
if not data:
|
|
||||||
break
|
|
||||||
await response.write(data)
|
|
||||||
|
|
||||||
except FileNotFoundError:
|
|
||||||
raise aiohttp.web.HTTPNotFound()
|
|
||||||
except PermissionError:
|
|
||||||
raise aiohttp.web.HTTPForbidden()
|
|
||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/projects/{project_id}/files/{path:.+}",
|
r"/projects/{project_id}/files/{path:.+}",
|
||||||
@ -472,15 +472,15 @@ class ProjectHandler:
|
|||||||
response.set_status(200)
|
response.set_status(200)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(path, 'wb+') as f:
|
async with aiofiles.open(path, 'wb+') as f:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
chunk = await request.content.read(1024)
|
chunk = await request.content.read(CHUNK_SIZE)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
|
||||||
if not chunk:
|
if not chunk:
|
||||||
break
|
break
|
||||||
f.write(chunk)
|
await f.write(chunk)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise aiohttp.web.HTTPNotFound()
|
raise aiohttp.web.HTTPNotFound()
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
|
@ -53,7 +53,7 @@ class SymbolHandler:
|
|||||||
|
|
||||||
controller = Controller.instance()
|
controller = Controller.instance()
|
||||||
try:
|
try:
|
||||||
await response.file(controller.symbols.get_path(request.match_info["symbol_id"]))
|
await response.stream_file(controller.symbols.get_path(request.match_info["symbol_id"]))
|
||||||
except (KeyError, OSError) as e:
|
except (KeyError, OSError) as e:
|
||||||
log.warning("Could not get symbol file: {}".format(e))
|
log.warning("Could not get symbol file: {}".format(e))
|
||||||
response.set_status(404)
|
response.set_status(404)
|
||||||
|
@ -92,7 +92,7 @@ class IndexHandler:
|
|||||||
if not os.path.exists(static):
|
if not os.path.exists(static):
|
||||||
static = get_static_path(os.path.join('web-ui', 'index.html'))
|
static = get_static_path(os.path.join('web-ui', 'index.html'))
|
||||||
|
|
||||||
await response.file(static)
|
await response.stream_file(static)
|
||||||
|
|
||||||
@Route.get(
|
@Route.get(
|
||||||
r"/v1/version",
|
r"/v1/version",
|
||||||
|
430
gns3server/utils/asyncio/aiozipstream.py
Normal file
430
gns3server/utils/asyncio/aiozipstream.py
Normal file
@ -0,0 +1,430 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
#
|
||||||
|
# Copyright (C) 2019 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Iterable ZIP archive generator.
|
||||||
|
|
||||||
|
Derived directly from zipfile.py and the zipstream project
|
||||||
|
https://github.com/allanlei/python-zipstream
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import stat
|
||||||
|
import struct
|
||||||
|
import time
|
||||||
|
import zipfile
|
||||||
|
import asyncio
|
||||||
|
import aiofiles
|
||||||
|
from concurrent import futures
|
||||||
|
from async_generator import async_generator, yield_
|
||||||
|
|
||||||
|
from zipfile import (structCentralDir, structEndArchive64, structEndArchive, structEndArchive64Locator,
|
||||||
|
stringCentralDir, stringEndArchive64, stringEndArchive, stringEndArchive64Locator)
|
||||||
|
|
||||||
|
stringDataDescriptor = b'PK\x07\x08' # magic number for data descriptor
|
||||||
|
|
||||||
|
|
||||||
|
def _get_compressor(compress_type):
|
||||||
|
"""
|
||||||
|
Return the compressor.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if compress_type == zipfile.ZIP_DEFLATED:
|
||||||
|
from zipfile import zlib
|
||||||
|
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
|
||||||
|
elif compress_type == zipfile.ZIP_BZIP2:
|
||||||
|
from zipfile import bz2
|
||||||
|
return bz2.BZ2Compressor()
|
||||||
|
elif compress_type == zipfile.ZIP_LZMA:
|
||||||
|
from zipfile import LZMACompressor
|
||||||
|
return LZMACompressor()
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class PointerIO(object):
|
||||||
|
|
||||||
|
def __init__(self, mode='wb'):
|
||||||
|
if mode not in ('wb', ):
|
||||||
|
raise RuntimeError('zipstream.ZipFile() requires mode "wb"')
|
||||||
|
self.data_pointer = 0
|
||||||
|
self.__mode = mode
|
||||||
|
self.__closed = False
|
||||||
|
|
||||||
|
@property
|
||||||
|
def mode(self):
|
||||||
|
return self.__mode
|
||||||
|
|
||||||
|
@property
|
||||||
|
def closed(self):
|
||||||
|
return self.__closed
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
self.__closed = True
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def tell(self):
|
||||||
|
return self.data_pointer
|
||||||
|
|
||||||
|
def truncate(size=None):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
def write(self, data):
|
||||||
|
if self.closed:
|
||||||
|
raise ValueError('I/O operation on closed file')
|
||||||
|
|
||||||
|
if isinstance(data, str):
|
||||||
|
data = data.encode('utf-8')
|
||||||
|
if not isinstance(data, bytes):
|
||||||
|
raise TypeError('expected bytes')
|
||||||
|
self.data_pointer += len(data)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class ZipInfo(zipfile.ZipInfo):
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
zipfile.ZipInfo.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def DataDescriptor(self):
|
||||||
|
"""
|
||||||
|
crc-32 4 bytes
|
||||||
|
compressed size 4 bytes
|
||||||
|
uncompressed size 4 bytes
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.compress_size > zipfile.ZIP64_LIMIT or self.file_size > zipfile.ZIP64_LIMIT:
|
||||||
|
fmt = b'<4sLQQ'
|
||||||
|
else:
|
||||||
|
fmt = b'<4sLLL'
|
||||||
|
return struct.pack(fmt, stringDataDescriptor, self.CRC, self.compress_size, self.file_size)
|
||||||
|
|
||||||
|
|
||||||
|
class ZipFile(zipfile.ZipFile):
|
||||||
|
|
||||||
|
def __init__(self, fileobj=None, mode='w', compression=zipfile.ZIP_STORED, allowZip64=True, chunksize=32768):
|
||||||
|
"""Open the ZIP file with mode write "w"."""
|
||||||
|
|
||||||
|
if mode not in ('w', ):
|
||||||
|
raise RuntimeError('aiozipstream.ZipFile() requires mode "w"')
|
||||||
|
if fileobj is None:
|
||||||
|
fileobj = PointerIO()
|
||||||
|
|
||||||
|
self._comment = b''
|
||||||
|
zipfile.ZipFile.__init__(self, fileobj, mode=mode, compression=compression, allowZip64=allowZip64)
|
||||||
|
self._chunksize = chunksize
|
||||||
|
self.paths_to_write = []
|
||||||
|
|
||||||
|
def __aiter__(self):
|
||||||
|
return self._stream()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def comment(self):
|
||||||
|
"""
|
||||||
|
The comment text associated with the ZIP file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._comment
|
||||||
|
|
||||||
|
@comment.setter
|
||||||
|
def comment(self, comment):
|
||||||
|
"""
|
||||||
|
Add a comment text associated with the ZIP file.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not isinstance(comment, bytes):
|
||||||
|
raise TypeError("comment: expected bytes, got %s" % type(comment))
|
||||||
|
# check for valid comment length
|
||||||
|
if len(comment) >= zipfile.ZIP_MAX_COMMENT:
|
||||||
|
if self.debug:
|
||||||
|
print('Archive comment is too long; truncating to %d bytes' % zipfile.ZIP_MAX_COMMENT)
|
||||||
|
comment = comment[:zipfile.ZIP_MAX_COMMENT]
|
||||||
|
self._comment = comment
|
||||||
|
self._didModify = True
|
||||||
|
|
||||||
|
@async_generator
|
||||||
|
async def data_generator(self, path):
|
||||||
|
|
||||||
|
async with aiofiles.open(path, "rb") as f:
|
||||||
|
while True:
|
||||||
|
part = await f.read(self._chunksize)
|
||||||
|
if not part:
|
||||||
|
break
|
||||||
|
await yield_(part)
|
||||||
|
return
|
||||||
|
|
||||||
|
async def _run_in_executor(self, task, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Run synchronous task in separate thread and await for result.
|
||||||
|
"""
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
return await loop.run_in_executor(futures.ThreadPoolExecutor(max_workers=1), task, *args, **kwargs)
|
||||||
|
|
||||||
|
@async_generator
|
||||||
|
async def _stream(self):
|
||||||
|
|
||||||
|
for kwargs in self.paths_to_write:
|
||||||
|
async for chunk in self._write(**kwargs):
|
||||||
|
await yield_(chunk)
|
||||||
|
for chunk in self._close():
|
||||||
|
await yield_(chunk)
|
||||||
|
|
||||||
|
def write(self, filename, arcname=None, compress_type=None):
|
||||||
|
"""
|
||||||
|
Write a file to the archive under the name `arcname`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
kwargs = {'filename': filename, 'arcname': arcname, 'compress_type': compress_type}
|
||||||
|
self.paths_to_write.append(kwargs)
|
||||||
|
|
||||||
|
def write_iter(self, arcname, iterable, compress_type=None):
|
||||||
|
"""
|
||||||
|
Write the bytes iterable `iterable` to the archive under the name `arcname`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
kwargs = {'arcname': arcname, 'iterable': iterable, 'compress_type': compress_type}
|
||||||
|
self.paths_to_write.append(kwargs)
|
||||||
|
|
||||||
|
def writestr(self, arcname, data, compress_type=None):
|
||||||
|
"""
|
||||||
|
Writes a str into ZipFile by wrapping data as a generator
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _iterable():
|
||||||
|
yield data
|
||||||
|
return self.write_iter(arcname, _iterable(), compress_type=compress_type)
|
||||||
|
|
||||||
|
@async_generator
|
||||||
|
async def _write(self, filename=None, iterable=None, arcname=None, compress_type=None):
|
||||||
|
"""
|
||||||
|
Put the bytes from filename into the archive under the name `arcname`.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not self.fp:
|
||||||
|
raise RuntimeError(
|
||||||
|
"Attempt to write to ZIP archive that was already closed")
|
||||||
|
if (filename is None and iterable is None) or (filename is not None and iterable is not None):
|
||||||
|
raise ValueError("either (exclusively) filename or iterable shall be not None")
|
||||||
|
|
||||||
|
if filename:
|
||||||
|
st = os.stat(filename)
|
||||||
|
isdir = stat.S_ISDIR(st.st_mode)
|
||||||
|
mtime = time.localtime(st.st_mtime)
|
||||||
|
date_time = mtime[0:6]
|
||||||
|
else:
|
||||||
|
st, isdir, date_time = None, False, time.localtime()[0:6]
|
||||||
|
# Create ZipInfo instance to store file information
|
||||||
|
if arcname is None:
|
||||||
|
arcname = filename
|
||||||
|
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
|
||||||
|
while arcname[0] in (os.sep, os.altsep):
|
||||||
|
arcname = arcname[1:]
|
||||||
|
if isdir:
|
||||||
|
arcname += '/'
|
||||||
|
zinfo = ZipInfo(arcname, date_time)
|
||||||
|
if st:
|
||||||
|
zinfo.external_attr = (st[0] & 0xFFFF) << 16 # Unix attributes
|
||||||
|
else:
|
||||||
|
zinfo.external_attr = 0o600 << 16 # ?rw-------
|
||||||
|
if compress_type is None:
|
||||||
|
zinfo.compress_type = self.compression
|
||||||
|
else:
|
||||||
|
zinfo.compress_type = compress_type
|
||||||
|
|
||||||
|
if st:
|
||||||
|
zinfo.file_size = st[6]
|
||||||
|
else:
|
||||||
|
zinfo.file_size = 0
|
||||||
|
zinfo.flag_bits = 0x00
|
||||||
|
zinfo.flag_bits |= 0x08 # ZIP flag bits, bit 3 indicates presence of data descriptor
|
||||||
|
zinfo.header_offset = self.fp.tell() # Start of header bytes
|
||||||
|
if zinfo.compress_type == zipfile.ZIP_LZMA:
|
||||||
|
# Compressed data includes an end-of-stream (EOS) marker
|
||||||
|
zinfo.flag_bits |= 0x02
|
||||||
|
|
||||||
|
self._writecheck(zinfo)
|
||||||
|
self._didModify = True
|
||||||
|
|
||||||
|
if isdir:
|
||||||
|
zinfo.file_size = 0
|
||||||
|
zinfo.compress_size = 0
|
||||||
|
zinfo.CRC = 0
|
||||||
|
self.filelist.append(zinfo)
|
||||||
|
self.NameToInfo[zinfo.filename] = zinfo
|
||||||
|
await yield_(self.fp.write(zinfo.FileHeader(False)))
|
||||||
|
return
|
||||||
|
|
||||||
|
cmpr = _get_compressor(zinfo.compress_type)
|
||||||
|
|
||||||
|
# Must overwrite CRC and sizes with correct data later
|
||||||
|
zinfo.CRC = CRC = 0
|
||||||
|
zinfo.compress_size = compress_size = 0
|
||||||
|
# Compressed size can be larger than uncompressed size
|
||||||
|
zip64 = self._allowZip64 and zinfo.file_size * 1.05 > zipfile.ZIP64_LIMIT
|
||||||
|
await yield_(self.fp.write(zinfo.FileHeader(zip64)))
|
||||||
|
|
||||||
|
file_size = 0
|
||||||
|
if filename:
|
||||||
|
async for buf in self.data_generator(filename):
|
||||||
|
file_size = file_size + len(buf)
|
||||||
|
CRC = zipfile.crc32(buf, CRC) & 0xffffffff
|
||||||
|
if cmpr:
|
||||||
|
buf = await self._run_in_executor(cmpr.compress, buf)
|
||||||
|
compress_size = compress_size + len(buf)
|
||||||
|
await yield_(self.fp.write(buf))
|
||||||
|
else: # we have an iterable
|
||||||
|
for buf in iterable:
|
||||||
|
file_size = file_size + len(buf)
|
||||||
|
CRC = zipfile.crc32(buf, CRC) & 0xffffffff
|
||||||
|
if cmpr:
|
||||||
|
buf = await self._run_in_executor(cmpr.compress, buf)
|
||||||
|
compress_size = compress_size + len(buf)
|
||||||
|
await yield_(self.fp.write(buf))
|
||||||
|
|
||||||
|
if cmpr:
|
||||||
|
buf = cmpr.flush()
|
||||||
|
compress_size = compress_size + len(buf)
|
||||||
|
await yield_(self.fp.write(buf))
|
||||||
|
zinfo.compress_size = compress_size
|
||||||
|
else:
|
||||||
|
zinfo.compress_size = file_size
|
||||||
|
zinfo.CRC = CRC
|
||||||
|
zinfo.file_size = file_size
|
||||||
|
if not zip64 and self._allowZip64:
|
||||||
|
if file_size > zipfile.ZIP64_LIMIT:
|
||||||
|
raise RuntimeError('File size has increased during compressing')
|
||||||
|
if compress_size > zipfile.ZIP64_LIMIT:
|
||||||
|
raise RuntimeError('Compressed size larger than uncompressed size')
|
||||||
|
|
||||||
|
await yield_(self.fp.write(zinfo.DataDescriptor()))
|
||||||
|
self.filelist.append(zinfo)
|
||||||
|
self.NameToInfo[zinfo.filename] = zinfo
|
||||||
|
|
||||||
|
def _close(self):
|
||||||
|
"""
|
||||||
|
Close the file, and for mode "w" write the ending records.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if self.fp is None:
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
if self.mode in ('w', 'a') and self._didModify: # write ending records
|
||||||
|
count = 0
|
||||||
|
pos1 = self.fp.tell()
|
||||||
|
for zinfo in self.filelist: # write central directory
|
||||||
|
count = count + 1
|
||||||
|
dt = zinfo.date_time
|
||||||
|
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
|
||||||
|
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
|
||||||
|
extra = []
|
||||||
|
if zinfo.file_size > zipfile.ZIP64_LIMIT or zinfo.compress_size > zipfile.ZIP64_LIMIT:
|
||||||
|
extra.append(zinfo.file_size)
|
||||||
|
extra.append(zinfo.compress_size)
|
||||||
|
file_size = 0xffffffff
|
||||||
|
compress_size = 0xffffffff
|
||||||
|
else:
|
||||||
|
file_size = zinfo.file_size
|
||||||
|
compress_size = zinfo.compress_size
|
||||||
|
|
||||||
|
if zinfo.header_offset > zipfile.ZIP64_LIMIT:
|
||||||
|
extra.append(zinfo.header_offset)
|
||||||
|
header_offset = 0xffffffff
|
||||||
|
else:
|
||||||
|
header_offset = zinfo.header_offset
|
||||||
|
|
||||||
|
extra_data = zinfo.extra
|
||||||
|
min_version = 0
|
||||||
|
if extra:
|
||||||
|
# Append a ZIP64 field to the extra's
|
||||||
|
extra_data = struct.pack(
|
||||||
|
b'<HH' + b'Q'*len(extra),
|
||||||
|
1, 8*len(extra), *extra) + extra_data
|
||||||
|
min_version = zipfile.ZIP64_VERSION
|
||||||
|
|
||||||
|
if zinfo.compress_type == zipfile.ZIP_BZIP2:
|
||||||
|
min_version = max(zipfile.BZIP2_VERSION, min_version)
|
||||||
|
elif zinfo.compress_type == zipfile.ZIP_LZMA:
|
||||||
|
min_version = max(zipfile.LZMA_VERSION, min_version)
|
||||||
|
|
||||||
|
extract_version = max(min_version, zinfo.extract_version)
|
||||||
|
create_version = max(min_version, zinfo.create_version)
|
||||||
|
try:
|
||||||
|
filename, flag_bits = zinfo._encodeFilenameFlags()
|
||||||
|
centdir = struct.pack(structCentralDir,
|
||||||
|
stringCentralDir, create_version,
|
||||||
|
zinfo.create_system, extract_version, zinfo.reserved,
|
||||||
|
flag_bits, zinfo.compress_type, dostime, dosdate,
|
||||||
|
zinfo.CRC, compress_size, file_size,
|
||||||
|
len(filename), len(extra_data), len(zinfo.comment),
|
||||||
|
0, zinfo.internal_attr, zinfo.external_attr,
|
||||||
|
header_offset)
|
||||||
|
except DeprecationWarning:
|
||||||
|
print((structCentralDir, stringCentralDir, create_version,
|
||||||
|
zinfo.create_system, extract_version, zinfo.reserved,
|
||||||
|
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
|
||||||
|
zinfo.CRC, compress_size, file_size,
|
||||||
|
len(zinfo.filename), len(extra_data), len(zinfo.comment),
|
||||||
|
0, zinfo.internal_attr, zinfo.external_attr,
|
||||||
|
header_offset), file=sys.stderr)
|
||||||
|
raise
|
||||||
|
yield self.fp.write(centdir)
|
||||||
|
yield self.fp.write(filename)
|
||||||
|
yield self.fp.write(extra_data)
|
||||||
|
yield self.fp.write(zinfo.comment)
|
||||||
|
|
||||||
|
pos2 = self.fp.tell()
|
||||||
|
# Write end-of-zip-archive record
|
||||||
|
centDirCount = count
|
||||||
|
centDirSize = pos2 - pos1
|
||||||
|
centDirOffset = pos1
|
||||||
|
if (centDirCount >= zipfile.ZIP_FILECOUNT_LIMIT or
|
||||||
|
centDirOffset > zipfile.ZIP64_LIMIT or
|
||||||
|
centDirSize > zipfile.ZIP64_LIMIT):
|
||||||
|
# Need to write the ZIP64 end-of-archive records
|
||||||
|
zip64endrec = struct.pack(
|
||||||
|
structEndArchive64, stringEndArchive64,
|
||||||
|
44, 45, 45, 0, 0, centDirCount, centDirCount,
|
||||||
|
centDirSize, centDirOffset)
|
||||||
|
yield self.fp.write(zip64endrec)
|
||||||
|
|
||||||
|
zip64locrec = struct.pack(
|
||||||
|
structEndArchive64Locator,
|
||||||
|
stringEndArchive64Locator, 0, pos2, 1)
|
||||||
|
yield self.fp.write(zip64locrec)
|
||||||
|
centDirCount = min(centDirCount, 0xFFFF)
|
||||||
|
centDirSize = min(centDirSize, 0xFFFFFFFF)
|
||||||
|
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
|
||||||
|
|
||||||
|
endrec = struct.pack(structEndArchive, stringEndArchive,
|
||||||
|
0, 0, centDirCount, centDirCount,
|
||||||
|
centDirSize, centDirOffset, len(self._comment))
|
||||||
|
yield self.fp.write(endrec)
|
||||||
|
yield self.fp.write(self._comment)
|
||||||
|
self.fp.flush()
|
||||||
|
finally:
|
||||||
|
fp = self.fp
|
||||||
|
self.fp = None
|
||||||
|
if not self._filePassed:
|
||||||
|
fp.close()
|
@ -20,7 +20,7 @@ import jsonschema
|
|||||||
import aiohttp
|
import aiohttp
|
||||||
import aiohttp.web
|
import aiohttp.web
|
||||||
import mimetypes
|
import mimetypes
|
||||||
import asyncio
|
import aiofiles
|
||||||
import logging
|
import logging
|
||||||
import jinja2
|
import jinja2
|
||||||
import sys
|
import sys
|
||||||
@ -32,6 +32,8 @@ from ..version import __version__
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
renderer = jinja2.Environment(loader=jinja2.FileSystemLoader(get_resource('templates')))
|
renderer = jinja2.Environment(loader=jinja2.FileSystemLoader(get_resource('templates')))
|
||||||
|
|
||||||
|
CHUNK_SIZE = 1024 * 8 # 8KB
|
||||||
|
|
||||||
|
|
||||||
class Response(aiohttp.web.Response):
|
class Response(aiohttp.web.Response):
|
||||||
|
|
||||||
@ -112,16 +114,21 @@ class Response(aiohttp.web.Response):
|
|||||||
raise aiohttp.web.HTTPBadRequest(text="{}".format(e))
|
raise aiohttp.web.HTTPBadRequest(text="{}".format(e))
|
||||||
self.body = json.dumps(answer, indent=4, sort_keys=True).encode('utf-8')
|
self.body = json.dumps(answer, indent=4, sort_keys=True).encode('utf-8')
|
||||||
|
|
||||||
async def file(self, path, status=200, set_content_length=True):
|
async def stream_file(self, path, status=200, set_content_type=None, set_content_length=True):
|
||||||
"""
|
"""
|
||||||
Return a file as a response
|
Stream a file as a response
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if not os.path.exists(path):
|
if not os.path.exists(path):
|
||||||
raise aiohttp.web.HTTPNotFound()
|
raise aiohttp.web.HTTPNotFound()
|
||||||
|
|
||||||
ct, encoding = mimetypes.guess_type(path)
|
if not set_content_type:
|
||||||
if not ct:
|
ct, encoding = mimetypes.guess_type(path)
|
||||||
ct = 'application/octet-stream'
|
if not ct:
|
||||||
|
ct = 'application/octet-stream'
|
||||||
|
else:
|
||||||
|
ct = set_content_type
|
||||||
|
|
||||||
if encoding:
|
if encoding:
|
||||||
self.headers[aiohttp.hdrs.CONTENT_ENCODING] = encoding
|
self.headers[aiohttp.hdrs.CONTENT_ENCODING] = encoding
|
||||||
self.content_type = ct
|
self.content_type = ct
|
||||||
@ -136,16 +143,13 @@ class Response(aiohttp.web.Response):
|
|||||||
self.set_status(status)
|
self.set_status(status)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(path, 'rb') as fobj:
|
async with aiofiles.open(path, 'rb') as f:
|
||||||
await self.prepare(self._request)
|
await self.prepare(self._request)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
data = fobj.read(4096)
|
data = await f.read(CHUNK_SIZE)
|
||||||
if not data:
|
if not data:
|
||||||
break
|
break
|
||||||
await self.write(data)
|
await self.write(data)
|
||||||
# await self.drain()
|
|
||||||
|
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise aiohttp.web.HTTPNotFound()
|
raise aiohttp.web.HTTPNotFound()
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
|
@ -1,10 +1,11 @@
|
|||||||
jsonschema==2.6.0 # pyup: ignore
|
jsonschema==2.6.0 # pyup: ignore
|
||||||
aiohttp==3.5.4
|
aiohttp==3.5.4
|
||||||
aiohttp-cors==0.7.0
|
aiohttp-cors==0.7.0
|
||||||
|
aiofiles==0.4.0
|
||||||
|
async_generator>=1.10
|
||||||
Jinja2>=2.7.3
|
Jinja2>=2.7.3
|
||||||
raven>=5.23.0
|
raven>=5.23.0
|
||||||
psutil>=3.0.0
|
psutil>=3.0.0
|
||||||
zipstream>=1.1.4
|
|
||||||
prompt-toolkit==1.0.15
|
prompt-toolkit==1.0.15
|
||||||
async-timeout==3.0.1
|
async-timeout==3.0.1
|
||||||
distro>=1.3.0
|
distro>=1.3.0
|
||||||
|
@ -293,15 +293,6 @@ def test_json(compute):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def test_streamFile(project, async_run, compute):
|
|
||||||
response = MagicMock()
|
|
||||||
response.status = 200
|
|
||||||
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
|
||||||
async_run(compute.stream_file(project, "test/titi", timeout=120))
|
|
||||||
mock.assert_called_with("GET", "https://example.com:84/v2/compute/projects/{}/stream/test/titi".format(project.id), auth=None, timeout=120)
|
|
||||||
async_run(compute.close())
|
|
||||||
|
|
||||||
|
|
||||||
def test_downloadFile(project, async_run, compute):
|
def test_downloadFile(project, async_run, compute):
|
||||||
response = MagicMock()
|
response = MagicMock()
|
||||||
response.status = 200
|
response.status = 200
|
||||||
@ -310,6 +301,7 @@ def test_downloadFile(project, async_run, compute):
|
|||||||
mock.assert_called_with("GET", "https://example.com:84/v2/compute/projects/{}/files/test/titi".format(project.id), auth=None)
|
mock.assert_called_with("GET", "https://example.com:84/v2/compute/projects/{}/files/test/titi".format(project.id), auth=None)
|
||||||
async_run(compute.close())
|
async_run(compute.close())
|
||||||
|
|
||||||
|
|
||||||
def test_close(compute, async_run):
|
def test_close(compute, async_run):
|
||||||
assert compute.connected is True
|
assert compute.connected is True
|
||||||
async_run(compute.close())
|
async_run(compute.close())
|
||||||
|
@ -29,6 +29,7 @@ from tests.utils import AsyncioMagicMock, AsyncioBytesIO
|
|||||||
|
|
||||||
from gns3server.controller.project import Project
|
from gns3server.controller.project import Project
|
||||||
from gns3server.controller.export_project import export_project, _is_exportable
|
from gns3server.controller.export_project import export_project, _is_exportable
|
||||||
|
from gns3server.utils.asyncio import aiozipstream
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@ -51,6 +52,13 @@ def node(controller, project, async_run):
|
|||||||
return node
|
return node
|
||||||
|
|
||||||
|
|
||||||
|
async def write_file(path, z):
|
||||||
|
|
||||||
|
with open(path, 'wb') as f:
|
||||||
|
async for chunk in z:
|
||||||
|
f.write(chunk)
|
||||||
|
|
||||||
|
|
||||||
def test_exportable_files():
|
def test_exportable_files():
|
||||||
assert _is_exportable("hello/world")
|
assert _is_exportable("hello/world")
|
||||||
assert not _is_exportable("project-files/tmp")
|
assert not _is_exportable("project-files/tmp")
|
||||||
@ -103,12 +111,10 @@ def test_export(tmpdir, project, async_run):
|
|||||||
with open(os.path.join(path, "project-files", "snapshots", "test"), 'w+') as f:
|
with open(os.path.join(path, "project-files", "snapshots", "test"), 'w+') as f:
|
||||||
f.write("WORLD")
|
f.write("WORLD")
|
||||||
|
|
||||||
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
|
with aiozipstream.ZipFile() as z:
|
||||||
z = async_run(export_project(project, str(tmpdir), include_images=False))
|
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
|
||||||
|
async_run(export_project(z, project, str(tmpdir), include_images=False))
|
||||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
|
||||||
for data in z:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||||
with myzip.open("vm-1/dynamips/test") as myfile:
|
with myzip.open("vm-1/dynamips/test") as myfile:
|
||||||
@ -128,7 +134,7 @@ def test_export(tmpdir, project, async_run):
|
|||||||
assert topo["computes"] == []
|
assert topo["computes"] == []
|
||||||
|
|
||||||
|
|
||||||
def test_export_vm(tmpdir, project, async_run, controller):
|
def test_export_vm(tmpdir, project, async_run):
|
||||||
"""
|
"""
|
||||||
If data is on a remote server export it locally before
|
If data is on a remote server export it locally before
|
||||||
sending it in the archive.
|
sending it in the archive.
|
||||||
@ -154,12 +160,10 @@ def test_export_vm(tmpdir, project, async_run, controller):
|
|||||||
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
||||||
f.write("{}")
|
f.write("{}")
|
||||||
|
|
||||||
z = async_run(export_project(project, str(tmpdir)))
|
with aiozipstream.ZipFile() as z:
|
||||||
assert compute.list_files.called
|
async_run(export_project(z, project, str(tmpdir)))
|
||||||
|
assert compute.list_files.called
|
||||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
|
||||||
for data in z:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||||
with myzip.open("vm-1/dynamips/test") as myfile:
|
with myzip.open("vm-1/dynamips/test") as myfile:
|
||||||
@ -169,7 +173,7 @@ def test_export_vm(tmpdir, project, async_run, controller):
|
|||||||
|
|
||||||
def test_export_disallow_running(tmpdir, project, node, async_run):
|
def test_export_disallow_running(tmpdir, project, node, async_run):
|
||||||
"""
|
"""
|
||||||
Dissallow export when a node is running
|
Disallow export when a node is running
|
||||||
"""
|
"""
|
||||||
|
|
||||||
path = project.path
|
path = project.path
|
||||||
@ -189,12 +193,13 @@ def test_export_disallow_running(tmpdir, project, node, async_run):
|
|||||||
|
|
||||||
node._status = "started"
|
node._status = "started"
|
||||||
with pytest.raises(aiohttp.web.HTTPConflict):
|
with pytest.raises(aiohttp.web.HTTPConflict):
|
||||||
async_run(export_project(project, str(tmpdir)))
|
with aiozipstream.ZipFile() as z:
|
||||||
|
async_run(export_project(z, project, str(tmpdir)))
|
||||||
|
|
||||||
|
|
||||||
def test_export_disallow_some_type(tmpdir, project, async_run):
|
def test_export_disallow_some_type(tmpdir, project, async_run):
|
||||||
"""
|
"""
|
||||||
Dissalow export for some node type
|
Disallow export for some node type
|
||||||
"""
|
"""
|
||||||
|
|
||||||
path = project.path
|
path = project.path
|
||||||
@ -213,8 +218,10 @@ def test_export_disallow_some_type(tmpdir, project, async_run):
|
|||||||
json.dump(topology, f)
|
json.dump(topology, f)
|
||||||
|
|
||||||
with pytest.raises(aiohttp.web.HTTPConflict):
|
with pytest.raises(aiohttp.web.HTTPConflict):
|
||||||
z = async_run(export_project(project, str(tmpdir)))
|
with aiozipstream.ZipFile() as z:
|
||||||
z = async_run(export_project(project, str(tmpdir), allow_all_nodes=True))
|
async_run(export_project(z, project, str(tmpdir)))
|
||||||
|
with aiozipstream.ZipFile() as z:
|
||||||
|
async_run(export_project(z, project, str(tmpdir), allow_all_nodes=True))
|
||||||
|
|
||||||
# VirtualBox is always disallowed
|
# VirtualBox is always disallowed
|
||||||
topology = {
|
topology = {
|
||||||
@ -232,7 +239,8 @@ def test_export_disallow_some_type(tmpdir, project, async_run):
|
|||||||
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
||||||
json.dump(topology, f)
|
json.dump(topology, f)
|
||||||
with pytest.raises(aiohttp.web.HTTPConflict):
|
with pytest.raises(aiohttp.web.HTTPConflict):
|
||||||
z = async_run(export_project(project, str(tmpdir), allow_all_nodes=True))
|
with aiozipstream.ZipFile() as z:
|
||||||
|
async_run(export_project(z, project, str(tmpdir), allow_all_nodes=True))
|
||||||
|
|
||||||
|
|
||||||
def test_export_fix_path(tmpdir, project, async_run):
|
def test_export_fix_path(tmpdir, project, async_run):
|
||||||
@ -264,10 +272,9 @@ def test_export_fix_path(tmpdir, project, async_run):
|
|||||||
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
||||||
json.dump(topology, f)
|
json.dump(topology, f)
|
||||||
|
|
||||||
z = async_run(export_project(project, str(tmpdir)))
|
with aiozipstream.ZipFile() as z:
|
||||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
async_run(export_project(z, project, str(tmpdir)))
|
||||||
for data in z:
|
async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||||
with myzip.open("project.gns3") as myfile:
|
with myzip.open("project.gns3") as myfile:
|
||||||
@ -303,11 +310,10 @@ def test_export_with_images(tmpdir, project, async_run):
|
|||||||
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
||||||
json.dump(topology, f)
|
json.dump(topology, f)
|
||||||
|
|
||||||
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
|
with aiozipstream.ZipFile() as z:
|
||||||
z = async_run(export_project(project, str(tmpdir), include_images=True))
|
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
|
||||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
async_run(export_project(z, project, str(tmpdir), include_images=True))
|
||||||
for data in z:
|
async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||||
myzip.getinfo("images/IOS/test.image")
|
myzip.getinfo("images/IOS/test.image")
|
||||||
@ -341,11 +347,9 @@ def test_export_keep_compute_id(tmpdir, project, async_run):
|
|||||||
}
|
}
|
||||||
json.dump(data, f)
|
json.dump(data, f)
|
||||||
|
|
||||||
z = async_run(export_project(project, str(tmpdir), keep_compute_id=True))
|
with aiozipstream.ZipFile() as z:
|
||||||
|
async_run(export_project(z, project, str(tmpdir), keep_compute_id=True))
|
||||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
|
||||||
for data in z:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||||
with myzip.open("project.gns3") as myfile:
|
with myzip.open("project.gns3") as myfile:
|
||||||
@ -353,7 +357,8 @@ def test_export_keep_compute_id(tmpdir, project, async_run):
|
|||||||
assert topo["nodes"][0]["compute_id"] == "6b7149c8-7d6e-4ca0-ab6b-daa8ab567be0"
|
assert topo["nodes"][0]["compute_id"] == "6b7149c8-7d6e-4ca0-ab6b-daa8ab567be0"
|
||||||
assert len(topo["computes"]) == 1
|
assert len(topo["computes"]) == 1
|
||||||
|
|
||||||
def test_export_images_from_vm(tmpdir, project, async_run, controller):
|
|
||||||
|
def test_export_images_from_vm(tmpdir, project, async_run):
|
||||||
"""
|
"""
|
||||||
If data is on a remote server export it locally before
|
If data is on a remote server export it locally before
|
||||||
sending it in the archive.
|
sending it in the archive.
|
||||||
@ -405,12 +410,10 @@ def test_export_images_from_vm(tmpdir, project, async_run, controller):
|
|||||||
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
||||||
f.write(json.dumps(topology))
|
f.write(json.dumps(topology))
|
||||||
|
|
||||||
z = async_run(export_project(project, str(tmpdir), include_images=True))
|
with aiozipstream.ZipFile() as z:
|
||||||
assert compute.list_files.called
|
async_run(export_project(z, project, str(tmpdir), include_images=True))
|
||||||
|
assert compute.list_files.called
|
||||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
|
||||||
for data in z:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||||
with myzip.open("vm-1/dynamips/test") as myfile:
|
with myzip.open("vm-1/dynamips/test") as myfile:
|
||||||
@ -450,12 +453,9 @@ def test_export_with_ignoring_snapshots(tmpdir, project, async_run):
|
|||||||
os.makedirs(snapshots_dir)
|
os.makedirs(snapshots_dir)
|
||||||
Path(os.path.join(snapshots_dir, 'snap.gns3project')).touch()
|
Path(os.path.join(snapshots_dir, 'snap.gns3project')).touch()
|
||||||
|
|
||||||
z = async_run(export_project(project, str(tmpdir), keep_compute_id=True))
|
with aiozipstream.ZipFile() as z:
|
||||||
|
async_run(export_project(z, project, str(tmpdir), keep_compute_id=True))
|
||||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
async_run(write_file(str(tmpdir / 'zipfile.zip'), z))
|
||||||
for data in z:
|
|
||||||
f.write(data)
|
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||||
assert not os.path.join('snapshots', 'snap.gns3project') in [f.filename for f in myzip.filelist]
|
assert not os.path.join('snapshots', 'snap.gns3project') in [f.filename for f in myzip.filelist]
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@ import sys
|
|||||||
import uuid
|
import uuid
|
||||||
import pytest
|
import pytest
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import zipstream
|
|
||||||
from unittest.mock import MagicMock
|
from unittest.mock import MagicMock
|
||||||
from tests.utils import AsyncioMagicMock, asyncio_patch
|
from tests.utils import AsyncioMagicMock, asyncio_patch
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
@ -532,15 +531,6 @@ def test_duplicate(project, async_run, controller):
|
|||||||
assert list(new_project.nodes.values())[1].compute.id == "remote"
|
assert list(new_project.nodes.values())[1].compute.id == "remote"
|
||||||
|
|
||||||
|
|
||||||
def test_duplicate_with_zipfile_encoding_issues(project, async_run, controller):
|
|
||||||
zf = zipstream.ZipFile()
|
|
||||||
zf.writestr('test\udcc3', "data")
|
|
||||||
|
|
||||||
with asyncio_patch('gns3server.controller.project.export_project', return_value=zf):
|
|
||||||
with pytest.raises(aiohttp.web.HTTPConflict):
|
|
||||||
async_run(project.duplicate(name="Hello"))
|
|
||||||
|
|
||||||
|
|
||||||
def test_snapshots(project):
|
def test_snapshots(project):
|
||||||
"""
|
"""
|
||||||
List the snapshots
|
List the snapshots
|
||||||
|
@ -64,7 +64,7 @@ def test_cloud_nio_create_udp(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"},
|
"rhost": "127.0.0.1"},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ def test_cloud_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode()
|
assert response.status == 201, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ def test_cloud_delete_nio(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"})
|
"rhost": "127.0.0.1"})
|
||||||
response = http_compute.delete("/projects/{project_id}/cloud/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/cloud/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
assert response.status == 204
|
assert response.status == 204
|
||||||
assert response.route == "/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/cloud/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_cloud_delete(http_compute, vm):
|
def test_cloud_delete(http_compute, vm):
|
||||||
|
@ -120,7 +120,7 @@ def test_docker_nio_create_udp(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"},
|
"rhost": "127.0.0.1"},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -141,14 +141,14 @@ def test_docker_update_nio(http_compute, vm):
|
|||||||
},
|
},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode()
|
assert response.status == 201, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_docker_delete_nio(http_compute, vm):
|
def test_docker_delete_nio(http_compute, vm):
|
||||||
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
|
with asyncio_patch("gns3server.compute.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
|
||||||
response = http_compute.delete("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/docker/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
assert response.status == 204
|
assert response.status == 204
|
||||||
assert response.route == "/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/docker/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_docker_update(http_compute, vm, tmpdir, free_console_port):
|
def test_docker_update(http_compute, vm, tmpdir, free_console_port):
|
||||||
|
@ -201,7 +201,7 @@ def test_iou_nio_create_udp(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"},
|
"rhost": "127.0.0.1"},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -219,7 +219,7 @@ def test_iou_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode()
|
assert response.status == 201, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -229,7 +229,7 @@ def test_iou_nio_create_ethernet(http_compute, vm, ethernet_device):
|
|||||||
},
|
},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_ethernet"
|
assert response.json["type"] == "nio_ethernet"
|
||||||
assert response.json["ethernet_device"] == ethernet_device
|
assert response.json["ethernet_device"] == ethernet_device
|
||||||
|
|
||||||
@ -240,7 +240,7 @@ def test_iou_nio_create_ethernet_different_port(http_compute, vm, ethernet_devic
|
|||||||
},
|
},
|
||||||
example=False)
|
example=False)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_ethernet"
|
assert response.json["type"] == "nio_ethernet"
|
||||||
assert response.json["ethernet_device"] == ethernet_device
|
assert response.json["ethernet_device"] == ethernet_device
|
||||||
|
|
||||||
@ -250,7 +250,7 @@ def test_iou_nio_create_tap(http_compute, vm, ethernet_device):
|
|||||||
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_tap",
|
response = http_compute.post("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_tap",
|
||||||
"tap_device": ethernet_device})
|
"tap_device": ethernet_device})
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_tap"
|
assert response.json["type"] == "nio_tap"
|
||||||
|
|
||||||
|
|
||||||
@ -261,7 +261,7 @@ def test_iou_delete_nio(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"})
|
"rhost": "127.0.0.1"})
|
||||||
response = http_compute.delete("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/iou/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
assert response.status == 204
|
assert response.status == 204
|
||||||
assert response.route == "/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/iou/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_iou_start_capture(http_compute, vm, tmpdir, project):
|
def test_iou_start_capture(http_compute, vm, tmpdir, project):
|
||||||
|
@ -65,7 +65,7 @@ def test_nat_nio_create_udp(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"},
|
"rhost": "127.0.0.1"},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ def test_nat_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode()
|
assert response.status == 201, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -97,7 +97,7 @@ def test_nat_delete_nio(http_compute, vm):
|
|||||||
response = http_compute.delete("/projects/{project_id}/nat/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/nat/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
assert mock_remove_nio.called
|
assert mock_remove_nio.called
|
||||||
assert response.status == 204
|
assert response.status == 204
|
||||||
assert response.route == "/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/nat/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_nat_delete(http_compute, vm):
|
def test_nat_delete(http_compute, vm):
|
||||||
|
@ -189,7 +189,7 @@ def test_qemu_nio_create_udp(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"},
|
"rhost": "127.0.0.1"},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ def test_qemu_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode()
|
assert response.status == 201, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -221,7 +221,7 @@ def test_qemu_delete_nio(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"})
|
"rhost": "127.0.0.1"})
|
||||||
response = http_compute.delete("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
assert response.status == 204
|
assert response.status == 204
|
||||||
assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_qemu_list_binaries(http_compute, vm):
|
def test_qemu_list_binaries(http_compute, vm):
|
||||||
|
@ -55,7 +55,7 @@ def test_traceng_nio_create_udp(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"},
|
"rhost": "127.0.0.1"},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ def test_traceng_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode("utf-8")
|
assert response.status == 201, response.body.decode("utf-8")
|
||||||
assert response.route == "/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ def test_traceng_delete_nio(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"})
|
"rhost": "127.0.0.1"})
|
||||||
response = http_compute.delete("/projects/{project_id}/traceng/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/traceng/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
assert response.status == 204, response.body.decode()
|
assert response.status == 204, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/traceng/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_traceng_start(http_compute, vm):
|
def test_traceng_start(http_compute, vm):
|
||||||
|
@ -108,7 +108,7 @@ def test_vbox_nio_create_udp(http_compute, vm):
|
|||||||
assert args[0] == 0
|
assert args[0] == 0
|
||||||
|
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ def test_virtualbox_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode()
|
assert response.status == 201, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -141,7 +141,7 @@ def test_vbox_delete_nio(http_compute, vm):
|
|||||||
assert args[0] == 0
|
assert args[0] == 0
|
||||||
|
|
||||||
assert response.status == 204
|
assert response.status == 204
|
||||||
assert response.route == "/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/virtualbox/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_vbox_update(http_compute, vm, free_console_port):
|
def test_vbox_update(http_compute, vm, free_console_port):
|
||||||
|
@ -116,7 +116,7 @@ def test_vmware_nio_create_udp(http_compute, vm):
|
|||||||
assert args[0] == 0
|
assert args[0] == 0
|
||||||
|
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -135,7 +135,7 @@ def test_vmware_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode()
|
assert response.status == 201, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -149,7 +149,7 @@ def test_vmware_delete_nio(http_compute, vm):
|
|||||||
assert args[0] == 0
|
assert args[0] == 0
|
||||||
|
|
||||||
assert response.status == 204
|
assert response.status == 204
|
||||||
assert response.route == "/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/vmware/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_vmware_update(http_compute, vm, free_console_port):
|
def test_vmware_update(http_compute, vm, free_console_port):
|
||||||
|
@ -70,7 +70,7 @@ def test_vpcs_nio_create_udp(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"},
|
"rhost": "127.0.0.1"},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201
|
assert response.status == 201
|
||||||
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -91,7 +91,7 @@ def test_vpcs_nio_update_udp(http_compute, vm):
|
|||||||
"filters": {}},
|
"filters": {}},
|
||||||
example=True)
|
example=True)
|
||||||
assert response.status == 201, response.body.decode("utf-8")
|
assert response.status == 201, response.body.decode("utf-8")
|
||||||
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
assert response.json["type"] == "nio_udp"
|
assert response.json["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ def test_vpcs_delete_nio(http_compute, vm):
|
|||||||
"rhost": "127.0.0.1"})
|
"rhost": "127.0.0.1"})
|
||||||
response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True)
|
||||||
assert response.status == 204, response.body.decode()
|
assert response.status == 204, response.body.decode()
|
||||||
assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
assert response.route == r"/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
|
||||||
|
|
||||||
|
|
||||||
def test_vpcs_start(http_compute, vm):
|
def test_vpcs_start(http_compute, vm):
|
||||||
|
@ -235,7 +235,7 @@ def test_export_with_images(http_controller, tmpdir, loop, project):
|
|||||||
json.dump(topology, f)
|
json.dump(topology, f)
|
||||||
|
|
||||||
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
|
with patch("gns3server.compute.Dynamips.get_images_directory", return_value=str(tmpdir / "IOS"),):
|
||||||
response = http_controller.get("/projects/{project_id}/export?include_images=1".format(project_id=project.id), raw=True)
|
response = http_controller.get("/projects/{project_id}/export?include_images=yes".format(project_id=project.id), raw=True)
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
assert response.headers['CONTENT-TYPE'] == 'application/gns3project'
|
assert response.headers['CONTENT-TYPE'] == 'application/gns3project'
|
||||||
assert response.headers['CONTENT-DISPOSITION'] == 'attachment; filename="{}.gns3project"'.format(project.name)
|
assert response.headers['CONTENT-DISPOSITION'] == 'attachment; filename="{}.gns3project"'.format(project.name)
|
||||||
|
@ -34,11 +34,11 @@ def test_response_file(async_run, tmpdir, response):
|
|||||||
with open(filename, 'w+') as f:
|
with open(filename, 'w+') as f:
|
||||||
f.write('world')
|
f.write('world')
|
||||||
|
|
||||||
async_run(response.file(filename))
|
async_run(response.stream_file(filename))
|
||||||
assert response.status == 200
|
assert response.status == 200
|
||||||
|
|
||||||
|
|
||||||
def test_response_file_not_found(async_run, tmpdir, response):
|
def test_response_file_not_found(async_run, tmpdir, response):
|
||||||
filename = str(tmpdir / 'hello-not-found')
|
filename = str(tmpdir / 'hello-not-found')
|
||||||
|
|
||||||
pytest.raises(HTTPNotFound, lambda: async_run(response.file(filename)))
|
pytest.raises(HTTPNotFound, lambda: async_run(response.stream_file(filename)))
|
||||||
|
Loading…
Reference in New Issue
Block a user