mirror of
https://github.com/GNS3/gns3-server
synced 2025-05-29 04:08:50 +00:00
Merge branch '2.1' into 2.2
# Conflicts: # gns3server/controller/__init__.py # gns3server/controller/export_project.py # gns3server/controller/import_project.py # gns3server/controller/project.py # gns3server/version.py
This commit is contained in:
commit
21a1800edc
16
CHANGELOG
16
CHANGELOG
@ -1,5 +1,21 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 2.1.5 18/04/2018
|
||||||
|
|
||||||
|
* Set the first byte to 0C when generating a random MAC address for a Qemu VM. Ref #1267.
|
||||||
|
* Update appliance files.
|
||||||
|
* Do not use VMnet0 when allocating VMnet adapters.
|
||||||
|
* Use SO_REUSEADDR before calling bind() where missing. Fixes #1289.
|
||||||
|
* Do not fail a Dynamips project conversion if a file being used.
|
||||||
|
* Catch exceptions when using AsyncioTelnetServer. Fixes #1321.
|
||||||
|
* Grid size support for projects.
|
||||||
|
* Remove 'include INSTALL' from MANIFEST.
|
||||||
|
* Fix issue with start all.
|
||||||
|
* Check for valid IP address and prevent to run on non-Windows platforms.
|
||||||
|
* Enable UDP tunnel option and use ICMP probing by default.
|
||||||
|
* Use the configured IP address to trace.
|
||||||
|
* Have TraceNG start without needing cmd.exe
|
||||||
|
|
||||||
## 2.1.4 12/03/2018
|
## 2.1.4 12/03/2018
|
||||||
|
|
||||||
* Add Juniper JunOS space appliance.
|
* Add Juniper JunOS space appliance.
|
||||||
|
@ -203,7 +203,7 @@ class Docker(BaseManager):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
if progress_callback:
|
if progress_callback:
|
||||||
progress_callback("Pull {} from docker hub".format(image))
|
progress_callback("Pulling '{}' from docker hub".format(image))
|
||||||
response = yield from self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
|
response = yield from self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
|
||||||
# The pull api will stream status via an HTTP JSON stream
|
# The pull api will stream status via an HTTP JSON stream
|
||||||
content = ""
|
content = ""
|
||||||
@ -211,6 +211,10 @@ class Docker(BaseManager):
|
|||||||
try:
|
try:
|
||||||
chunk = yield from response.content.read(1024)
|
chunk = yield from response.content.read(1024)
|
||||||
except aiohttp.ServerDisconnectedError:
|
except aiohttp.ServerDisconnectedError:
|
||||||
|
log.error("Disconnected from server while pulling Docker image '{}' from docker hub".format(image))
|
||||||
|
break
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
log.error("Timeout while pulling Docker image '{}' from docker hub".format(image))
|
||||||
break
|
break
|
||||||
if not chunk:
|
if not chunk:
|
||||||
break
|
break
|
||||||
|
@ -314,7 +314,10 @@ class VPCSVM(BaseNode):
|
|||||||
|
|
||||||
log.info("Stopping VPCS instance {} PID={}".format(self.name, self._process.pid))
|
log.info("Stopping VPCS instance {} PID={}".format(self.name, self._process.pid))
|
||||||
if sys.platform.startswith("win32"):
|
if sys.platform.startswith("win32"):
|
||||||
self._process.send_signal(signal.CTRL_BREAK_EVENT)
|
try:
|
||||||
|
self._process.send_signal(signal.CTRL_BREAK_EVENT)
|
||||||
|
except OSError:
|
||||||
|
pass
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
self._process.terminate()
|
self._process.terminate()
|
||||||
|
@ -193,13 +193,13 @@ class Controller:
|
|||||||
user=server_config.get("user", ""),
|
user=server_config.get("user", ""),
|
||||||
password=server_config.get("password", ""),
|
password=server_config.get("password", ""),
|
||||||
force=True)
|
force=True)
|
||||||
except aiohttp.web_exceptions.HTTPConflict as e:
|
except aiohttp.web.HTTPConflict as e:
|
||||||
log.fatal("Cannot access to the local server, make sure something else is not running on the TCP port {}".format(port))
|
log.fatal("Cannot access to the local server, make sure something else is not running on the TCP port {}".format(port))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
for c in computes:
|
for c in computes:
|
||||||
try:
|
try:
|
||||||
yield from self.add_compute(**c)
|
yield from self.add_compute(**c)
|
||||||
except (aiohttp.web_exceptions.HTTPConflict, KeyError):
|
except (aiohttp.web.HTTPConflict, KeyError):
|
||||||
pass # Skip not available servers at loading
|
pass # Skip not available servers at loading
|
||||||
yield from self.load_projects()
|
yield from self.load_projects()
|
||||||
try:
|
try:
|
||||||
@ -228,7 +228,7 @@ class Controller:
|
|||||||
try:
|
try:
|
||||||
yield from compute.close()
|
yield from compute.close()
|
||||||
# We don't care if a compute is down at this step
|
# We don't care if a compute is down at this step
|
||||||
except (ComputeError, aiohttp.web_exceptions.HTTPError, OSError):
|
except (ComputeError, aiohttp.web.HTTPError, OSError):
|
||||||
pass
|
pass
|
||||||
yield from self.gns3vm.exit_vm()
|
yield from self.gns3vm.exit_vm()
|
||||||
self._computes = {}
|
self._computes = {}
|
||||||
@ -311,7 +311,7 @@ class Controller:
|
|||||||
if file.endswith(".gns3"):
|
if file.endswith(".gns3"):
|
||||||
try:
|
try:
|
||||||
yield from self.load_project(os.path.join(project_dir, file), load=False)
|
yield from self.load_project(os.path.join(project_dir, file), load=False)
|
||||||
except (aiohttp.web_exceptions.HTTPConflict, NotImplementedError):
|
except (aiohttp.web.HTTPConflict, NotImplementedError):
|
||||||
pass # Skip not compatible projects
|
pass # Skip not compatible projects
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.error(str(e))
|
log.error(str(e))
|
||||||
|
@ -29,44 +29,44 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def export_project(project, temporary_dir, include_images=False, keep_compute_id=False,
|
def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False):
|
||||||
allow_all_nodes=False, ignore_prefixes=None):
|
|
||||||
"""
|
"""
|
||||||
Export the project as zip. It's a ZipStream object.
|
Export a project to a zip file.
|
||||||
The file will be read chunk by chunk when you iterate on
|
|
||||||
the zip.
|
|
||||||
|
|
||||||
It will ignore some files like snapshots and
|
The file will be read chunk by chunk when you iterate over the zip stream.
|
||||||
|
Some files like snapshots and packet captures are ignored.
|
||||||
|
|
||||||
:param temporary_dir: A temporary dir where to store intermediate data
|
:param temporary_dir: A temporary dir where to store intermediate data
|
||||||
:param keep_compute_id: If false replace all compute id by local it's the standard behavior for .gns3project to make them portable
|
:param include images: save OS images to the zip file
|
||||||
:param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable default False
|
:param keep_compute_id: If false replace all compute id by local (standard behavior for .gns3project to make it portable)
|
||||||
|
:param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable
|
||||||
|
|
||||||
:returns: ZipStream object
|
:returns: ZipStream object
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# To avoid issue with data not saved we disallow the export of a running topologie
|
# To avoid issue with data not saved we disallow the export of a running project
|
||||||
if project.is_running():
|
if project.is_running():
|
||||||
raise aiohttp.web.HTTPConflict(text="Running topology could not be exported")
|
raise aiohttp.web.HTTPConflict(text="Project must be stopped in order to export it")
|
||||||
|
|
||||||
# Make sure we save the project
|
# Make sure we save the project
|
||||||
project.dump()
|
project.dump()
|
||||||
|
|
||||||
z = zipstream.ZipFile(allowZip64=True)
|
zstream = zipstream.ZipFile(allowZip64=True)
|
||||||
|
|
||||||
if not os.path.exists(project._path):
|
if not os.path.exists(project._path):
|
||||||
raise aiohttp.web.HTTPNotFound(text="The project doesn't exist at location {}".format(project._path))
|
raise aiohttp.web.HTTPNotFound(text="Project could not be found at '{}'".format(project._path))
|
||||||
|
|
||||||
# First we process the .gns3 in order to be sure we don't have an error
|
# First we process the .gns3 in order to be sure we don't have an error
|
||||||
for file in os.listdir(project._path):
|
for file in os.listdir(project._path):
|
||||||
if file.endswith(".gns3"):
|
if file.endswith(".gns3"):
|
||||||
images = yield from _export_project_file(project, os.path.join(project._path, file),
|
yield from _patch_project_file(project, os.path.join(project._path, file), zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir)
|
||||||
z, include_images, keep_compute_id, allow_all_nodes, temporary_dir)
|
|
||||||
|
|
||||||
|
# Export the local files
|
||||||
for root, dirs, files in os.walk(project._path, topdown=True):
|
for root, dirs, files in os.walk(project._path, topdown=True):
|
||||||
files = [f for f in files if not _filter_files(os.path.join(root, f))]
|
files = [f for f in files if _is_exportable(os.path.join(root, f))]
|
||||||
for file in files:
|
for file in files:
|
||||||
path = os.path.join(root, file)
|
path = os.path.join(root, file)
|
||||||
# Try open the file
|
# check if we can export the file
|
||||||
try:
|
try:
|
||||||
open(path).close()
|
open(path).close()
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
@ -74,77 +74,83 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id
|
|||||||
log.warning(msg)
|
log.warning(msg)
|
||||||
project.controller.notification.emit("log.warning", {"message": msg})
|
project.controller.notification.emit("log.warning", {"message": msg})
|
||||||
continue
|
continue
|
||||||
|
# ignore the .gns3 file
|
||||||
if file.endswith(".gns3"):
|
if file.endswith(".gns3"):
|
||||||
pass
|
continue
|
||||||
else:
|
zstream.write(path, os.path.relpath(path, project._path), compress_type=zipfile.ZIP_DEFLATED)
|
||||||
z.write(path, os.path.relpath(path, project._path), compress_type=zipfile.ZIP_DEFLATED)
|
|
||||||
|
|
||||||
|
# Export files from remote computes
|
||||||
downloaded_files = set()
|
downloaded_files = set()
|
||||||
|
|
||||||
for compute in project.computes:
|
for compute in project.computes:
|
||||||
if compute.id != "local":
|
if compute.id != "local":
|
||||||
compute_files = yield from compute.list_files(project)
|
compute_files = yield from compute.list_files(project)
|
||||||
for compute_file in compute_files:
|
for compute_file in compute_files:
|
||||||
if not _filter_files(compute_file["path"]):
|
if _is_exportable(compute_file["path"]):
|
||||||
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
||||||
f = open(fd, "wb", closefd=True)
|
f = open(fd, "wb", closefd=True)
|
||||||
response = yield from compute.download_file(project, compute_file["path"])
|
response = yield from compute.download_file(project, compute_file["path"])
|
||||||
while True:
|
while True:
|
||||||
data = yield from response.content.read(512)
|
try:
|
||||||
|
data = yield from response.content.read(1024)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading file '{}' from remote compute server {}:{}".format(compute_file["path"], compute.host, compute.port))
|
||||||
if not data:
|
if not data:
|
||||||
break
|
break
|
||||||
f.write(data)
|
f.write(data)
|
||||||
response.close()
|
response.close()
|
||||||
f.close()
|
f.close()
|
||||||
z.write(temp_path, arcname=compute_file["path"], compress_type=zipfile.ZIP_DEFLATED)
|
zstream.write(temp_path, arcname=compute_file["path"], compress_type=zipfile.ZIP_DEFLATED)
|
||||||
downloaded_files.add(compute_file['path'])
|
downloaded_files.add(compute_file['path'])
|
||||||
|
|
||||||
return z
|
return zstream
|
||||||
|
|
||||||
|
|
||||||
def _filter_files(path):
|
def _is_exportable(path):
|
||||||
"""
|
"""
|
||||||
:returns: True if file should not be included in the final archive
|
:returns: True if file should not be included in the final archive
|
||||||
"""
|
"""
|
||||||
s = os.path.normpath(path).split(os.path.sep)
|
|
||||||
|
|
||||||
|
# do not export snapshots
|
||||||
if path.endswith("snapshots"):
|
if path.endswith("snapshots"):
|
||||||
return True
|
return False
|
||||||
|
|
||||||
# filter directory of snapshots
|
# do not export directories of snapshots
|
||||||
if "{sep}snapshots{sep}".format(sep=os.path.sep) in path:
|
if "{sep}snapshots{sep}".format(sep=os.path.sep) in path:
|
||||||
return True
|
return False
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
# do not export captures and other temporary directory
|
||||||
|
s = os.path.normpath(path).split(os.path.sep)
|
||||||
i = s.index("project-files")
|
i = s.index("project-files")
|
||||||
if s[i + 1] in ("tmp", "captures", "snapshots"):
|
if s[i + 1] in ("tmp", "captures", "snapshots"):
|
||||||
return True
|
return False
|
||||||
except (ValueError, IndexError):
|
except (ValueError, IndexError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
file_name = os.path.basename(path)
|
# do not export log files and OS noise
|
||||||
# Ignore log files and OS noises
|
filename = os.path.basename(path)
|
||||||
if file_name.endswith('_log.txt') or file_name.endswith('.log') or file_name == '.DS_Store':
|
if filename.endswith('_log.txt') or filename.endswith('.log') or filename == '.DS_Store':
|
||||||
return True
|
return False
|
||||||
|
return True
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _export_project_file(project, path, z, include_images, keep_compute_id, allow_all_nodes, temporary_dir):
|
def _patch_project_file(project, path, zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir):
|
||||||
"""
|
"""
|
||||||
Take a project file (.gns3) and patch it for the export
|
Patch a project file (.gns3) to export a project.
|
||||||
|
The .gns3 file is renamed to project.gns3
|
||||||
|
|
||||||
We rename the .gns3 project.gns3 to avoid the task to the client to guess the file name
|
:param path: path of the .gns3 file
|
||||||
|
|
||||||
:param path: Path of the .gns3
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Image file that we need to include in the exported archive
|
# image files that we need to include in the exported archive
|
||||||
images = []
|
images = []
|
||||||
|
|
||||||
with open(path) as f:
|
try:
|
||||||
topology = json.load(f)
|
with open(path) as f:
|
||||||
|
topology = json.load(f)
|
||||||
|
except (OSError, ValueError) as e:
|
||||||
|
raise aiohttp.web.HTTPConflict(text="Project file '{}' cannot be read: {}".format(path, e))
|
||||||
|
|
||||||
if "topology" in topology:
|
if "topology" in topology:
|
||||||
if "nodes" in topology["topology"]:
|
if "nodes" in topology["topology"]:
|
||||||
@ -152,9 +158,9 @@ def _export_project_file(project, path, z, include_images, keep_compute_id, allo
|
|||||||
compute_id = node.get('compute_id', 'local')
|
compute_id = node.get('compute_id', 'local')
|
||||||
|
|
||||||
if node["node_type"] == "virtualbox" and node.get("properties", {}).get("linked_clone"):
|
if node["node_type"] == "virtualbox" and node.get("properties", {}).get("linked_clone"):
|
||||||
raise aiohttp.web.HTTPConflict(text="Topology with a linked {} clone could not be exported. Use qemu instead.".format(node["node_type"]))
|
raise aiohttp.web.HTTPConflict(text="Projects with a linked {} clone node cannot not be exported. Please use Qemu instead.".format(node["node_type"]))
|
||||||
if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware", "cloud"]:
|
if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware", "cloud"]:
|
||||||
raise aiohttp.web.HTTPConflict(text="Topology with a {} could not be exported".format(node["node_type"]))
|
raise aiohttp.web.HTTPConflict(text="Projects with a {} node cannot be exported".format(node["node_type"]))
|
||||||
|
|
||||||
if not keep_compute_id:
|
if not keep_compute_id:
|
||||||
node["compute_id"] = "local" # To make project portable all node by default run on local
|
node["compute_id"] = "local" # To make project portable all node by default run on local
|
||||||
@ -186,78 +192,71 @@ def _export_project_file(project, path, z, include_images, keep_compute_id, allo
|
|||||||
local_images = set([i['image'] for i in images if i['compute_id'] == 'local'])
|
local_images = set([i['image'] for i in images if i['compute_id'] == 'local'])
|
||||||
|
|
||||||
for image in local_images:
|
for image in local_images:
|
||||||
_export_local_images(project, image, z)
|
_export_local_image(image, zstream)
|
||||||
|
|
||||||
remote_images = set([
|
remote_images = set([
|
||||||
(i['compute_id'], i['image_type'], i['image'])
|
(i['compute_id'], i['image_type'], i['image'])
|
||||||
for i in images if i['compute_id'] != 'local'])
|
for i in images if i['compute_id'] != 'local'])
|
||||||
|
|
||||||
for compute_id, image_type, image in remote_images:
|
for compute_id, image_type, image in remote_images:
|
||||||
yield from _export_remote_images(project, compute_id, image_type, image, z, temporary_dir)
|
yield from _export_remote_images(project, compute_id, image_type, image, zstream, temporary_dir)
|
||||||
|
|
||||||
z.writestr("project.gns3", json.dumps(topology).encode())
|
|
||||||
|
|
||||||
|
zstream.writestr("project.gns3", json.dumps(topology).encode())
|
||||||
return images
|
return images
|
||||||
|
|
||||||
def _export_local_images(project, image, z):
|
def _export_local_image(image, zstream):
|
||||||
"""
|
"""
|
||||||
Take a project file (.gns3) and export images to the zip
|
Exports a local image to the zip file.
|
||||||
|
|
||||||
:param image: Image path
|
:param image: image path
|
||||||
:param z: Zipfile instance for the export
|
:param zstream: Zipfile instance for the export
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from ..compute import MODULES
|
from ..compute import MODULES
|
||||||
|
|
||||||
for module in MODULES:
|
for module in MODULES:
|
||||||
try:
|
try:
|
||||||
img_directory = module.instance().get_images_directory()
|
images_directory = module.instance().get_images_directory()
|
||||||
except NotImplementedError:
|
except NotImplementedError:
|
||||||
# Some modules don't have images
|
# Some modules don't have images
|
||||||
continue
|
continue
|
||||||
|
|
||||||
directory = os.path.split(img_directory)[-1:][0]
|
directory = os.path.split(images_directory)[-1:][0]
|
||||||
if os.path.exists(image):
|
if os.path.exists(image):
|
||||||
path = image
|
path = image
|
||||||
else:
|
else:
|
||||||
path = os.path.join(img_directory, image)
|
path = os.path.join(images_directory, image)
|
||||||
|
|
||||||
if os.path.exists(path):
|
if os.path.exists(path):
|
||||||
arcname = os.path.join("images", directory, os.path.basename(image))
|
arcname = os.path.join("images", directory, os.path.basename(image))
|
||||||
z.write(path, arcname)
|
zstream.write(path, arcname)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _export_remote_images(project, compute_id, image_type, image, project_zipfile, temporary_dir):
|
def _export_remote_images(project, compute_id, image_type, image, project_zipfile, temporary_dir):
|
||||||
"""
|
"""
|
||||||
Export specific image from remote compute
|
Export specific image from remote compute.
|
||||||
:param project:
|
|
||||||
:param compute_id:
|
|
||||||
:param image_type:
|
|
||||||
:param image:
|
|
||||||
:param project_zipfile:
|
|
||||||
:return:
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log.info("Obtaining image `{}` from `{}`".format(image, compute_id))
|
log.info("Downloading image '{}' from compute server '{}'".format(image, compute_id))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
compute = [compute for compute in project.computes if compute.id == compute_id][0]
|
compute = [compute for compute in project.computes if compute.id == compute_id][0]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise aiohttp.web.HTTPConflict(
|
raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute doesn't exist.".format(compute_id))
|
||||||
text="Cannot export image from `{}` compute. Compute doesn't exist.".format(compute_id))
|
|
||||||
|
|
||||||
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
||||||
f = open(fd, "wb", closefd=True)
|
f = open(fd, "wb", closefd=True)
|
||||||
response = yield from compute.download_image(image_type, image)
|
response = yield from compute.download_image(image_type, image)
|
||||||
|
|
||||||
if response.status != 200:
|
if response.status != 200:
|
||||||
raise aiohttp.web.HTTPConflict(
|
raise aiohttp.web.HTTPConflict(text="Cannot export image from '{}' compute. Compute returned status code {}.".format(compute_id, response.status))
|
||||||
text="Cannot export image from `{}` compute. Compute sent `{}` status.".format(
|
|
||||||
compute_id, response.status))
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
data = yield from response.content.read(512)
|
try:
|
||||||
|
data = yield from response.content.read(1024)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when downloading image '{}' from remote compute server {}:{}".format(image, compute.host, compute.port))
|
||||||
if not data:
|
if not data:
|
||||||
break
|
break
|
||||||
f.write(data)
|
f.write(data)
|
||||||
|
@ -26,7 +26,7 @@ import aiohttp
|
|||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from .topology import load_topology
|
from .topology import load_topology
|
||||||
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Handle the import of project from a .gns3project
|
Handle the import of project from a .gns3project
|
||||||
@ -46,6 +46,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
|
|||||||
:param location: Directory for the project if None put in the default directory
|
:param location: Directory for the project if None put in the default directory
|
||||||
:param name: Wanted project name, generate one from the .gns3 if None
|
:param name: Wanted project name, generate one from the .gns3 if None
|
||||||
:param keep_compute_id: If true do not touch the compute id
|
:param keep_compute_id: If true do not touch the compute id
|
||||||
|
|
||||||
:returns: Project
|
:returns: Project
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -53,115 +54,118 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
|
|||||||
raise aiohttp.web.HTTPConflict(text="The destination path should not contain .gns3")
|
raise aiohttp.web.HTTPConflict(text="The destination path should not contain .gns3")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with zipfile.ZipFile(stream) as myzip:
|
with zipfile.ZipFile(stream) as zip_file:
|
||||||
|
project_file = zip_file.read("project.gns3").decode()
|
||||||
try:
|
|
||||||
topology = json.loads(myzip.read("project.gns3").decode())
|
|
||||||
|
|
||||||
# We import the project on top of an existing project (snapshots)
|
|
||||||
if topology["project_id"] == project_id:
|
|
||||||
project_name = topology["name"]
|
|
||||||
else:
|
|
||||||
# If the project name is already used we generate a new one
|
|
||||||
if name:
|
|
||||||
project_name = controller.get_free_project_name(name)
|
|
||||||
else:
|
|
||||||
project_name = controller.get_free_project_name(topology["name"])
|
|
||||||
except KeyError:
|
|
||||||
raise aiohttp.web.HTTPConflict(text="Cannot import topology the .gns3 is corrupted or missing")
|
|
||||||
|
|
||||||
if location:
|
|
||||||
path = location
|
|
||||||
else:
|
|
||||||
projects_path = controller.projects_directory()
|
|
||||||
path = os.path.join(projects_path, project_id)
|
|
||||||
try:
|
|
||||||
os.makedirs(path, exist_ok=True)
|
|
||||||
except UnicodeEncodeError as e:
|
|
||||||
raise aiohttp.web.HTTPConflict(text="The project name contain non supported or invalid characters")
|
|
||||||
myzip.extractall(path)
|
|
||||||
|
|
||||||
topology = load_topology(os.path.join(path, "project.gns3"))
|
|
||||||
topology["name"] = project_name
|
|
||||||
# To avoid unexpected behavior (project start without manual operations just after import)
|
|
||||||
topology["auto_start"] = False
|
|
||||||
topology["auto_open"] = False
|
|
||||||
topology["auto_close"] = True
|
|
||||||
|
|
||||||
# Generate a new node id
|
|
||||||
node_old_to_new = {}
|
|
||||||
for node in topology["topology"]["nodes"]:
|
|
||||||
if "node_id" in node:
|
|
||||||
node_old_to_new[node["node_id"]] = str(uuid.uuid4())
|
|
||||||
_move_node_file(path, node["node_id"], node_old_to_new[node["node_id"]])
|
|
||||||
node["node_id"] = node_old_to_new[node["node_id"]]
|
|
||||||
else:
|
|
||||||
node["node_id"] = str(uuid.uuid4())
|
|
||||||
|
|
||||||
# Update link to use new id
|
|
||||||
for link in topology["topology"]["links"]:
|
|
||||||
link["link_id"] = str(uuid.uuid4())
|
|
||||||
for node in link["nodes"]:
|
|
||||||
node["node_id"] = node_old_to_new[node["node_id"]]
|
|
||||||
|
|
||||||
# Generate new drawings id
|
|
||||||
for drawing in topology["topology"]["drawings"]:
|
|
||||||
drawing["drawing_id"] = str(uuid.uuid4())
|
|
||||||
|
|
||||||
# Modify the compute id of the node depending of compute capacity
|
|
||||||
if not keep_compute_id:
|
|
||||||
# For some VM type we move them to the GNS3 VM if possible
|
|
||||||
# unless it's a linux host without GNS3 VM
|
|
||||||
if not sys.platform.startswith("linux") or controller.has_compute("vm"):
|
|
||||||
for node in topology["topology"]["nodes"]:
|
|
||||||
if node["node_type"] in ("docker", "qemu", "iou", "nat"):
|
|
||||||
node["compute_id"] = "vm"
|
|
||||||
else:
|
|
||||||
# Round-robin through available compute resources.
|
|
||||||
compute_nodes = itertools.cycle(controller.computes)
|
|
||||||
for node in topology["topology"]["nodes"]:
|
|
||||||
node["compute_id"] = next(compute_nodes)
|
|
||||||
|
|
||||||
compute_created = set()
|
|
||||||
for node in topology["topology"]["nodes"]:
|
|
||||||
|
|
||||||
if node["compute_id"] != "local":
|
|
||||||
# Project created on the remote GNS3 VM?
|
|
||||||
if node["compute_id"] not in compute_created:
|
|
||||||
compute = controller.get_compute(node["compute_id"])
|
|
||||||
yield from compute.post("/projects", data={
|
|
||||||
"name": project_name,
|
|
||||||
"project_id": project_id,
|
|
||||||
})
|
|
||||||
compute_created.add(node["compute_id"])
|
|
||||||
|
|
||||||
yield from _move_files_to_compute(compute, project_id, path, os.path.join("project-files", node["node_type"], node["node_id"]))
|
|
||||||
|
|
||||||
# And we dump the updated.gns3
|
|
||||||
dot_gns3_path = os.path.join(path, project_name + ".gns3")
|
|
||||||
# We change the project_id to avoid erasing the project
|
|
||||||
topology["project_id"] = project_id
|
|
||||||
with open(dot_gns3_path, "w+") as f:
|
|
||||||
json.dump(topology, f, indent=4)
|
|
||||||
os.remove(os.path.join(path, "project.gns3"))
|
|
||||||
|
|
||||||
if os.path.exists(os.path.join(path, "images")):
|
|
||||||
_import_images(controller, path)
|
|
||||||
|
|
||||||
project = yield from controller.load_project(dot_gns3_path, load=False)
|
|
||||||
return project
|
|
||||||
except zipfile.BadZipFile:
|
except zipfile.BadZipFile:
|
||||||
raise aiohttp.web.HTTPConflict(text="Can't import topology the file is corrupted or not a GNS3 project (invalid zip)")
|
raise aiohttp.web.HTTPConflict(text="Cannot import project, not a GNS3 project (invalid zip)")
|
||||||
|
except KeyError:
|
||||||
|
raise aiohttp.web.HTTPConflict(text="Cannot import project, project.gns3 file could not be found")
|
||||||
|
|
||||||
|
try:
|
||||||
|
topology = json.loads(project_file)
|
||||||
|
# We import the project on top of an existing project (snapshots)
|
||||||
|
if topology["project_id"] == project_id:
|
||||||
|
project_name = topology["name"]
|
||||||
|
else:
|
||||||
|
# If the project name is already used we generate a new one
|
||||||
|
if name:
|
||||||
|
project_name = controller.get_free_project_name(name)
|
||||||
|
else:
|
||||||
|
project_name = controller.get_free_project_name(topology["name"])
|
||||||
|
except (ValueError, KeyError):
|
||||||
|
raise aiohttp.web.HTTPConflict(text="Cannot import project, the project.gns3 file is corrupted")
|
||||||
|
|
||||||
|
if location:
|
||||||
|
path = location
|
||||||
|
else:
|
||||||
|
projects_path = controller.projects_directory()
|
||||||
|
path = os.path.join(projects_path, project_id)
|
||||||
|
try:
|
||||||
|
os.makedirs(path, exist_ok=True)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
raise aiohttp.web.HTTPConflict(text="The project name contain non supported or invalid characters")
|
||||||
|
|
||||||
|
try:
|
||||||
|
with zipfile.ZipFile(stream) as zip_file:
|
||||||
|
yield from wait_run_in_executor(zip_file.extractall, path)
|
||||||
|
except zipfile.BadZipFile:
|
||||||
|
raise aiohttp.web.HTTPConflict(text="Cannot extract files from GNS3 project (invalid zip)")
|
||||||
|
|
||||||
|
topology = load_topology(os.path.join(path, "project.gns3"))
|
||||||
|
topology["name"] = project_name
|
||||||
|
# To avoid unexpected behavior (project start without manual operations just after import)
|
||||||
|
topology["auto_start"] = False
|
||||||
|
topology["auto_open"] = False
|
||||||
|
topology["auto_close"] = True
|
||||||
|
|
||||||
|
# Generate a new node id
|
||||||
|
node_old_to_new = {}
|
||||||
|
for node in topology["topology"]["nodes"]:
|
||||||
|
if "node_id" in node:
|
||||||
|
node_old_to_new[node["node_id"]] = str(uuid.uuid4())
|
||||||
|
_move_node_file(path, node["node_id"], node_old_to_new[node["node_id"]])
|
||||||
|
node["node_id"] = node_old_to_new[node["node_id"]]
|
||||||
|
else:
|
||||||
|
node["node_id"] = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Update link to use new id
|
||||||
|
for link in topology["topology"]["links"]:
|
||||||
|
link["link_id"] = str(uuid.uuid4())
|
||||||
|
for node in link["nodes"]:
|
||||||
|
node["node_id"] = node_old_to_new[node["node_id"]]
|
||||||
|
|
||||||
|
# Generate new drawings id
|
||||||
|
for drawing in topology["topology"]["drawings"]:
|
||||||
|
drawing["drawing_id"] = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Modify the compute id of the node depending of compute capacity
|
||||||
|
if not keep_compute_id:
|
||||||
|
# For some VM type we move them to the GNS3 VM if possible
|
||||||
|
# unless it's a linux host without GNS3 VM
|
||||||
|
if not sys.platform.startswith("linux") or controller.has_compute("vm"):
|
||||||
|
for node in topology["topology"]["nodes"]:
|
||||||
|
if node["node_type"] in ("docker", "qemu", "iou", "nat"):
|
||||||
|
node["compute_id"] = "vm"
|
||||||
|
else:
|
||||||
|
# Round-robin through available compute resources.
|
||||||
|
compute_nodes = itertools.cycle(controller.computes)
|
||||||
|
for node in topology["topology"]["nodes"]:
|
||||||
|
node["compute_id"] = next(compute_nodes)
|
||||||
|
|
||||||
|
compute_created = set()
|
||||||
|
for node in topology["topology"]["nodes"]:
|
||||||
|
if node["compute_id"] != "local":
|
||||||
|
# Project created on the remote GNS3 VM?
|
||||||
|
if node["compute_id"] not in compute_created:
|
||||||
|
compute = controller.get_compute(node["compute_id"])
|
||||||
|
yield from compute.post("/projects", data={"name": project_name, "project_id": project_id,})
|
||||||
|
compute_created.add(node["compute_id"])
|
||||||
|
yield from _move_files_to_compute(compute, project_id, path, os.path.join("project-files", node["node_type"], node["node_id"]))
|
||||||
|
|
||||||
|
# And we dump the updated.gns3
|
||||||
|
dot_gns3_path = os.path.join(path, project_name + ".gns3")
|
||||||
|
# We change the project_id to avoid erasing the project
|
||||||
|
topology["project_id"] = project_id
|
||||||
|
with open(dot_gns3_path, "w+") as f:
|
||||||
|
json.dump(topology, f, indent=4)
|
||||||
|
os.remove(os.path.join(path, "project.gns3"))
|
||||||
|
|
||||||
|
if os.path.exists(os.path.join(path, "images")):
|
||||||
|
_import_images(controller, path)
|
||||||
|
|
||||||
|
project = yield from controller.load_project(dot_gns3_path, load=False)
|
||||||
|
return project
|
||||||
|
|
||||||
|
|
||||||
def _move_node_file(path, old_id, new_id):
|
def _move_node_file(path, old_id, new_id):
|
||||||
"""
|
"""
|
||||||
Move the files from a node when changing his id
|
Move a file from a node when changing its id
|
||||||
|
|
||||||
:param path: Path of the project
|
:param path: Path of the project
|
||||||
:param old_id: ID before change
|
:param old_id: ID before change
|
||||||
:param new_id: New node UUID
|
:param new_id: New node UUID
|
||||||
"""
|
"""
|
||||||
|
|
||||||
root = os.path.join(path, "project-files")
|
root = os.path.join(path, "project-files")
|
||||||
if os.path.exists(root):
|
if os.path.exists(root):
|
||||||
for dirname in os.listdir(root):
|
for dirname in os.listdir(root):
|
||||||
@ -175,8 +179,9 @@ def _move_node_file(path, old_id, new_id):
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _move_files_to_compute(compute, project_id, directory, files_path):
|
def _move_files_to_compute(compute, project_id, directory, files_path):
|
||||||
"""
|
"""
|
||||||
Move the files to a remote compute
|
Move files to a remote compute
|
||||||
"""
|
"""
|
||||||
|
|
||||||
location = os.path.join(directory, files_path)
|
location = os.path.join(directory, files_path)
|
||||||
if os.path.exists(location):
|
if os.path.exists(location):
|
||||||
for (dirpath, dirnames, filenames) in os.walk(location):
|
for (dirpath, dirnames, filenames) in os.walk(location):
|
||||||
@ -184,7 +189,7 @@ def _move_files_to_compute(compute, project_id, directory, files_path):
|
|||||||
path = os.path.join(dirpath, filename)
|
path = os.path.join(dirpath, filename)
|
||||||
dst = os.path.relpath(path, directory)
|
dst = os.path.relpath(path, directory)
|
||||||
yield from _upload_file(compute, project_id, path, dst)
|
yield from _upload_file(compute, project_id, path, dst)
|
||||||
shutil.rmtree(os.path.join(directory, files_path))
|
yield from wait_run_in_executor(shutil.rmtree, os.path.join(directory, files_path))
|
||||||
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
@ -195,6 +200,7 @@ def _upload_file(compute, project_id, file_path, path):
|
|||||||
:param file_path: File path on the controller file system
|
:param file_path: File path on the controller file system
|
||||||
:param path: File path on the remote system relative to project directory
|
:param path: File path on the remote system relative to project directory
|
||||||
"""
|
"""
|
||||||
|
|
||||||
path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/"))
|
path = "/projects/{}/files/{}".format(project_id, path.replace("\\", "/"))
|
||||||
with open(file_path, "rb") as f:
|
with open(file_path, "rb") as f:
|
||||||
yield from compute.http_query("POST", path, f, timeout=None)
|
yield from compute.http_query("POST", path, f, timeout=None)
|
||||||
@ -202,11 +208,10 @@ def _upload_file(compute, project_id, file_path, path):
|
|||||||
|
|
||||||
def _import_images(controller, path):
|
def _import_images(controller, path):
|
||||||
"""
|
"""
|
||||||
Copy images to the images directory or delete them if they
|
Copy images to the images directory or delete them if they already exists.
|
||||||
already exists.
|
|
||||||
"""
|
"""
|
||||||
image_dir = controller.images_path()
|
|
||||||
|
|
||||||
|
image_dir = controller.images_path()
|
||||||
root = os.path.join(path, "images")
|
root = os.path.join(path, "images")
|
||||||
for (dirpath, dirnames, filenames) in os.walk(root):
|
for (dirpath, dirnames, filenames) in os.walk(root):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
|
@ -37,6 +37,7 @@ from ..config import Config
|
|||||||
from ..utils.path import check_path_allowed, get_default_project_directory
|
from ..utils.path import check_path_allowed, get_default_project_directory
|
||||||
from ..utils.asyncio.pool import Pool
|
from ..utils.asyncio.pool import Pool
|
||||||
from ..utils.asyncio import locked_coroutine, asyncio_ensure_future
|
from ..utils.asyncio import locked_coroutine, asyncio_ensure_future
|
||||||
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
from .export_project import export_project
|
from .export_project import export_project
|
||||||
from .import_project import import_project
|
from .import_project import import_project
|
||||||
|
|
||||||
@ -662,27 +663,10 @@ class Project:
|
|||||||
:param name: Name of the snapshot
|
:param name: Name of the snapshot
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if name in [snap.name for snap in self.snapshots.values()]:
|
if name in [snap.name for snap in self._snapshots.values()]:
|
||||||
raise aiohttp.web_exceptions.HTTPConflict(text="The snapshot {} already exist".format(name))
|
raise aiohttp.web.HTTPConflict(text="The snapshot name {} already exists".format(name))
|
||||||
|
|
||||||
snapshot = Snapshot(self, name=name)
|
snapshot = Snapshot(self, name=name)
|
||||||
try:
|
yield from snapshot.create()
|
||||||
if os.path.exists(snapshot.path):
|
|
||||||
raise aiohttp.web_exceptions.HTTPConflict(text="The snapshot {} already exist".format(name))
|
|
||||||
|
|
||||||
os.makedirs(os.path.join(self.path, "snapshots"), exist_ok=True)
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
|
||||||
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
|
|
||||||
try:
|
|
||||||
with open(snapshot.path, "wb") as f:
|
|
||||||
for data in zipstream:
|
|
||||||
f.write(data)
|
|
||||||
except OSError as e:
|
|
||||||
raise aiohttp.web.HTTPConflict(text="Could not write snapshot file '{}': {}".format(snapshot.path, e))
|
|
||||||
except OSError as e:
|
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not create project directory: {}".format(e))
|
|
||||||
|
|
||||||
self._snapshots[snapshot.id] = snapshot
|
self._snapshots[snapshot.id] = snapshot
|
||||||
return snapshot
|
return snapshot
|
||||||
|
|
||||||
@ -891,6 +875,15 @@ class Project:
|
|||||||
while self._loading:
|
while self._loading:
|
||||||
yield from asyncio.sleep(0.5)
|
yield from asyncio.sleep(0.5)
|
||||||
|
|
||||||
|
def _create_duplicate_project_file(self, path, zipstream):
|
||||||
|
"""
|
||||||
|
Creates the project file (to be run in its own thread)
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(path, "wb") as f:
|
||||||
|
for data in zipstream:
|
||||||
|
f.write(data)
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def duplicate(self, name=None, location=None):
|
def duplicate(self, name=None, location=None):
|
||||||
"""
|
"""
|
||||||
@ -913,10 +906,9 @@ class Project:
|
|||||||
try:
|
try:
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
|
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
|
||||||
with open(os.path.join(tmpdir, "project.gns3p"), "wb") as f:
|
project_path = os.path.join(tmpdir, "project.gns3p")
|
||||||
for data in zipstream:
|
yield from wait_run_in_executor(self._create_duplicate_project_file, project_path, zipstream)
|
||||||
f.write(data)
|
with open(project_path, "rb") as f:
|
||||||
with open(os.path.join(tmpdir, "project.gns3p"), "rb") as f:
|
|
||||||
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
|
project = yield from import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
|
||||||
except (OSError, UnicodeEncodeError) as e:
|
except (OSError, UnicodeEncodeError) as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Can not duplicate project: {}".format(str(e)))
|
raise aiohttp.web.HTTPConflict(text="Can not duplicate project: {}".format(str(e)))
|
||||||
|
@ -19,11 +19,13 @@
|
|||||||
import os
|
import os
|
||||||
import uuid
|
import uuid
|
||||||
import shutil
|
import shutil
|
||||||
|
import tempfile
|
||||||
import asyncio
|
import asyncio
|
||||||
import aiohttp.web
|
import aiohttp.web
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
|
from .export_project import export_project
|
||||||
from .import_project import import_project
|
from .import_project import import_project
|
||||||
|
|
||||||
|
|
||||||
@ -71,6 +73,37 @@ class Snapshot:
|
|||||||
def created_at(self):
|
def created_at(self):
|
||||||
return int(self._created_at)
|
return int(self._created_at)
|
||||||
|
|
||||||
|
def _create_snapshot_file(self, zipstream):
|
||||||
|
"""
|
||||||
|
Creates the snapshot file (to be run in its own thread)
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(self.path, "wb") as f:
|
||||||
|
for data in zipstream:
|
||||||
|
f.write(data)
|
||||||
|
|
||||||
|
@asyncio.coroutine
|
||||||
|
def create(self):
|
||||||
|
"""
|
||||||
|
Create the snapshot
|
||||||
|
"""
|
||||||
|
|
||||||
|
if os.path.exists(self.path):
|
||||||
|
raise aiohttp.web.HTTPConflict(text="The snapshot file '{}' already exists".format(self.name))
|
||||||
|
|
||||||
|
snapshot_directory = os.path.join(self._project.path, "snapshots")
|
||||||
|
try:
|
||||||
|
os.makedirs(snapshot_directory, exist_ok=True)
|
||||||
|
except OSError as e:
|
||||||
|
raise aiohttp.web.HTTPInternalServerError(text="Could not create the snapshot directory '{}': {}".format(snapshot_directory, e))
|
||||||
|
|
||||||
|
try:
|
||||||
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
|
zipstream = yield from export_project(self._project, tmpdir, keep_compute_id=True, allow_all_nodes=True)
|
||||||
|
yield from wait_run_in_executor(self._create_snapshot_file, zipstream)
|
||||||
|
except OSError as e:
|
||||||
|
raise aiohttp.web.HTTPConflict(text="Could not create snapshot file '{}': {}".format(self.path, e))
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def restore(self):
|
def restore(self):
|
||||||
"""
|
"""
|
||||||
@ -78,18 +111,21 @@ class Snapshot:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
yield from self._project.delete_on_computes()
|
yield from self._project.delete_on_computes()
|
||||||
# We don't send close notif to clients because the close / open dance is purely internal
|
# We don't send close notification to clients because the close / open dance is purely internal
|
||||||
yield from self._project.close(ignore_notification=True)
|
yield from self._project.close(ignore_notification=True)
|
||||||
self._project.controller.notification.emit("snapshot.restored", self.__json__())
|
|
||||||
try:
|
try:
|
||||||
if os.path.exists(os.path.join(self._project.path, "project-files")):
|
# delete the current project files
|
||||||
shutil.rmtree(os.path.join(self._project.path, "project-files"))
|
project_files_path = os.path.join(self._project.path, "project-files")
|
||||||
|
if os.path.exists(project_files_path):
|
||||||
|
yield from wait_run_in_executor(shutil.rmtree, project_files_path)
|
||||||
with open(self._path, "rb") as f:
|
with open(self._path, "rb") as f:
|
||||||
project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path)
|
project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path)
|
||||||
except (OSError, PermissionError) as e:
|
except (OSError, PermissionError) as e:
|
||||||
raise aiohttp.web.HTTPConflict(text=str(e))
|
raise aiohttp.web.HTTPConflict(text=str(e))
|
||||||
yield from project.open()
|
yield from project.open()
|
||||||
return project
|
self._project.controller.notification.emit("snapshot.restored", self.__json__())
|
||||||
|
return self._project
|
||||||
|
|
||||||
def __json__(self):
|
def __json__(self):
|
||||||
return {
|
return {
|
||||||
|
@ -57,7 +57,7 @@ class CrashReport:
|
|||||||
Report crash to a third party service
|
Report crash to a third party service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DSN = "sync+https://6b6c2ce19b8545278f7ee00c333175a6:be17229ec8da460e9a126d02b82de5dc@sentry.io/38482"
|
DSN = "sync+https://f732825cd5004443b62a937d7d28c3bf:9e2bb2ac3f07496693fc9839c6193e20@sentry.io/38482"
|
||||||
if hasattr(sys, "frozen"):
|
if hasattr(sys, "frozen"):
|
||||||
cacert = get_resource("cacert.pem")
|
cacert = get_resource("cacert.pem")
|
||||||
if cacert is not None and os.path.isfile(cacert):
|
if cacert is not None and os.path.isfile(cacert):
|
||||||
|
@ -319,10 +319,13 @@ class ProjectHandler:
|
|||||||
os.makedirs(os.path.dirname(path), exist_ok=True)
|
os.makedirs(os.path.dirname(path), exist_ok=True)
|
||||||
with open(path, 'wb+') as f:
|
with open(path, 'wb+') as f:
|
||||||
while True:
|
while True:
|
||||||
packet = yield from request.content.read(512)
|
try:
|
||||||
if not packet:
|
chunk = yield from request.content.read(1024)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
|
||||||
|
if not chunk:
|
||||||
break
|
break
|
||||||
f.write(packet)
|
f.write(chunk)
|
||||||
|
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise aiohttp.web.HTTPNotFound()
|
raise aiohttp.web.HTTPNotFound()
|
||||||
@ -380,10 +383,10 @@ class ProjectHandler:
|
|||||||
try:
|
try:
|
||||||
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
||||||
while True:
|
while True:
|
||||||
packet = yield from request.content.read(512)
|
chunk = yield from request.content.read(1024)
|
||||||
if not packet:
|
if not chunk:
|
||||||
break
|
break
|
||||||
temp.write(packet)
|
temp.write(chunk)
|
||||||
project.import_zip(temp, gns3vm=bool(int(request.GET.get("gns3vm", "1"))))
|
project.import_zip(temp, gns3vm=bool(int(request.GET.get("gns3vm", "1"))))
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
||||||
|
@ -73,7 +73,7 @@ class LinkHandler:
|
|||||||
node.get("adapter_number", 0),
|
node.get("adapter_number", 0),
|
||||||
node.get("port_number", 0),
|
node.get("port_number", 0),
|
||||||
label=node.get("label"))
|
label=node.get("label"))
|
||||||
except aiohttp.web_exceptions.HTTPException as e:
|
except aiohttp.web.HTTPException as e:
|
||||||
yield from project.delete_link(link.id)
|
yield from project.delete_link(link.id)
|
||||||
raise e
|
raise e
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
|
@ -428,8 +428,6 @@ class NodeHandler:
|
|||||||
|
|
||||||
node_type = node.node_type
|
node_type = node.node_type
|
||||||
path = "/project-files/{}/{}/{}".format(node_type, node.id, path)
|
path = "/project-files/{}/{}/{}".format(node_type, node.id, path)
|
||||||
|
data = yield from request.content.read() #FIXME: are we handling timeout or large files correctly?
|
||||||
data = yield from request.content.read()
|
|
||||||
|
|
||||||
yield from node.compute.http_query("POST", "/projects/{project_id}/files{path}".format(project_id=project.id, path=path), data=data, timeout=None, raw=True)
|
yield from node.compute.http_query("POST", "/projects/{project_id}/files{path}".format(project_id=project.id, path=path), data=data, timeout=None, raw=True)
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
|
@ -350,10 +350,10 @@ class ProjectHandler:
|
|||||||
try:
|
try:
|
||||||
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
|
||||||
while True:
|
while True:
|
||||||
packet = yield from request.content.read(512)
|
chunk = yield from request.content.read(1024)
|
||||||
if not packet:
|
if not chunk:
|
||||||
break
|
break
|
||||||
temp.write(packet)
|
temp.write(chunk)
|
||||||
project = yield from import_project(controller, request.match_info["project_id"], temp, location=path, name=name)
|
project = yield from import_project(controller, request.match_info["project_id"], temp, location=path, name=name)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
|
||||||
@ -463,10 +463,13 @@ class ProjectHandler:
|
|||||||
try:
|
try:
|
||||||
with open(path, 'wb+') as f:
|
with open(path, 'wb+') as f:
|
||||||
while True:
|
while True:
|
||||||
packet = yield from request.content.read(512)
|
try:
|
||||||
if not packet:
|
chunk = yield from request.content.read(1024)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to file '{}'".format(path))
|
||||||
|
if not chunk:
|
||||||
break
|
break
|
||||||
f.write(packet)
|
f.write(chunk)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
raise aiohttp.web.HTTPNotFound()
|
raise aiohttp.web.HTTPNotFound()
|
||||||
except PermissionError:
|
except PermissionError:
|
||||||
|
@ -134,7 +134,7 @@ class ServerHandler:
|
|||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
r"/debug",
|
r"/debug",
|
||||||
description="Dump debug informations to disk (debug directory in config directory). Work only for local server",
|
description="Dump debug information to disk (debug directory in config directory). Work only for local server",
|
||||||
status_codes={
|
status_codes={
|
||||||
201: "Writed"
|
201: "Writed"
|
||||||
})
|
})
|
||||||
|
@ -17,6 +17,8 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import aiohttp
|
import aiohttp
|
||||||
|
import asyncio
|
||||||
|
|
||||||
from gns3server.web.route import Route
|
from gns3server.web.route import Route
|
||||||
from gns3server.controller import Controller
|
from gns3server.controller import Controller
|
||||||
|
|
||||||
@ -66,10 +68,13 @@ class SymbolHandler:
|
|||||||
try:
|
try:
|
||||||
with open(path, 'wb') as f:
|
with open(path, 'wb') as f:
|
||||||
while True:
|
while True:
|
||||||
packet = yield from request.content.read(512)
|
try:
|
||||||
if not packet:
|
chunk = yield from request.content.read(1024)
|
||||||
|
except asyncio.TimeoutError:
|
||||||
|
raise aiohttp.web.HTTPRequestTimeout(text="Timeout when writing to symbol '{}'".format(path))
|
||||||
|
if not chunk:
|
||||||
break
|
break
|
||||||
f.write(packet)
|
f.write(chunk)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Could not write symbol file '{}': {}".format(path, e))
|
raise aiohttp.web.HTTPConflict(text="Could not write symbol file '{}': {}".format(path, e))
|
||||||
# Reset the symbol list
|
# Reset the symbol list
|
||||||
|
@ -208,8 +208,8 @@ class UBridgeHypervisor:
|
|||||||
self._writer.write(command.encode())
|
self._writer.write(command.encode())
|
||||||
yield from self._writer.drain()
|
yield from self._writer.drain()
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UbridgeError("Lost communication with {host}:{port} :{error}, Dynamips process running: {run}"
|
raise UbridgeError("Lost communication with {host}:{port} when sending command '{command}': {error}, uBridge process running: {run}"
|
||||||
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
|
.format(host=self._host, port=self._port, command=command, error=e, run=self.is_running()))
|
||||||
|
|
||||||
# Now retrieve the result
|
# Now retrieve the result
|
||||||
data = []
|
data = []
|
||||||
@ -232,8 +232,8 @@ class UBridgeHypervisor:
|
|||||||
continue
|
continue
|
||||||
if not chunk:
|
if not chunk:
|
||||||
if retries > max_retries:
|
if retries > max_retries:
|
||||||
raise UbridgeError("No data returned from {host}:{port}, uBridge process running: {run}"
|
raise UbridgeError("No data returned from {host}:{port} after sending command '{command}', uBridge process running: {run}"
|
||||||
.format(host=self._host, port=self._port, run=self.is_running()))
|
.format(host=self._host, port=self._port, command=command, run=self.is_running()))
|
||||||
else:
|
else:
|
||||||
retries += 1
|
retries += 1
|
||||||
yield from asyncio.sleep(0.1)
|
yield from asyncio.sleep(0.1)
|
||||||
@ -241,16 +241,16 @@ class UBridgeHypervisor:
|
|||||||
retries = 0
|
retries = 0
|
||||||
buf += chunk.decode("utf-8")
|
buf += chunk.decode("utf-8")
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise UbridgeError("Lost communication with {host}:{port} :{error}, uBridge process running: {run}"
|
raise UbridgeError("Lost communication with {host}:{port} after sending command '{command}': {error}, uBridge process running: {run}"
|
||||||
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
|
.format(host=self._host, port=self._port, command=command, error=e, run=self.is_running()))
|
||||||
|
|
||||||
# If the buffer doesn't end in '\n' then we can't be done
|
# If the buffer doesn't end in '\n' then we can't be done
|
||||||
try:
|
try:
|
||||||
if buf[-1] != '\n':
|
if buf[-1] != '\n':
|
||||||
continue
|
continue
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise UbridgeError("Could not communicate with {host}:{port}, uBridge process running: {run}"
|
raise UbridgeError("Could not communicate with {host}:{port} after sending command '{command}', uBridge process running: {run}"
|
||||||
.format(host=self._host, port=self._port, run=self.is_running()))
|
.format(host=self._host, port=self._port, command=command, run=self.is_running()))
|
||||||
|
|
||||||
data += buf.split('\r\n')
|
data += buf.split('\r\n')
|
||||||
if data[-1] == '':
|
if data[-1] == '':
|
||||||
|
@ -28,7 +28,7 @@ from unittest.mock import MagicMock
|
|||||||
from tests.utils import AsyncioMagicMock, AsyncioBytesIO
|
from tests.utils import AsyncioMagicMock, AsyncioBytesIO
|
||||||
|
|
||||||
from gns3server.controller.project import Project
|
from gns3server.controller.project import Project
|
||||||
from gns3server.controller.export_project import export_project, _filter_files
|
from gns3server.controller.export_project import export_project, _is_exportable
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@ -51,14 +51,14 @@ def node(controller, project, async_run):
|
|||||||
return node
|
return node
|
||||||
|
|
||||||
|
|
||||||
def test_filter_files():
|
def test_exportable_files():
|
||||||
assert not _filter_files("hello/world")
|
assert _is_exportable("hello/world")
|
||||||
assert _filter_files("project-files/tmp")
|
assert not _is_exportable("project-files/tmp")
|
||||||
assert _filter_files("project-files/test_log.txt")
|
assert not _is_exportable("project-files/test_log.txt")
|
||||||
assert _filter_files("project-files/test.log")
|
assert not _is_exportable("project-files/test.log")
|
||||||
assert _filter_files("test/snapshots")
|
assert not _is_exportable("test/snapshots")
|
||||||
assert _filter_files("test/project-files/snapshots")
|
assert not _is_exportable("test/project-files/snapshots")
|
||||||
assert _filter_files("test/project-files/snapshots/test.gns3p")
|
assert not _is_exportable("test/project-files/snapshots/test.gns3p")
|
||||||
|
|
||||||
|
|
||||||
def test_export(tmpdir, project, async_run):
|
def test_export(tmpdir, project, async_run):
|
||||||
|
Loading…
Reference in New Issue
Block a user