mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-28 03:08:14 +00:00
iExport files from remote server, Fixes: gui/#2271
This commit is contained in:
parent
135c529d4b
commit
b48f5df53f
@ -538,7 +538,7 @@ class BaseManager:
|
||||
path = os.path.abspath(os.path.join(directory, *os.path.split(filename)))
|
||||
if os.path.commonprefix([directory, path]) != directory:
|
||||
raise aiohttp.web.HTTPForbidden(text="Could not write image: {}, {} is forbiden".format(filename, path))
|
||||
log.info("Writting image file %s", path)
|
||||
log.info("Writing image file %s", path)
|
||||
try:
|
||||
remove_checksum(path)
|
||||
# We store the file under his final name only when the upload is finished
|
||||
|
@ -320,6 +320,22 @@ class Compute:
|
||||
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(path))
|
||||
return response
|
||||
|
||||
@asyncio.coroutine
|
||||
def download_image(self, image_type, image):
|
||||
"""
|
||||
Read file of a project and download it
|
||||
|
||||
:param image_type: Image type
|
||||
:param image: The path of the image
|
||||
:returns: A file stream
|
||||
"""
|
||||
|
||||
url = self._getUrl("/{}/images/{}".format(image_type, image))
|
||||
response = yield from self._session().request("GET", url, auth=self._auth)
|
||||
if response.status == 404:
|
||||
raise aiohttp.web.HTTPNotFound(text="{} not found on compute".format(image))
|
||||
return response
|
||||
|
||||
@asyncio.coroutine
|
||||
def stream_file(self, project, path):
|
||||
"""
|
||||
|
@ -58,7 +58,8 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id
|
||||
# First we process the .gns3 in order to be sure we don't have an error
|
||||
for file in os.listdir(project._path):
|
||||
if file.endswith(".gns3"):
|
||||
_export_project_file(project, os.path.join(project._path, file), z, include_images, keep_compute_id, allow_all_nodes)
|
||||
images = yield from _export_project_file(project, os.path.join(project._path, file),
|
||||
z, include_images, keep_compute_id, allow_all_nodes, temporary_dir)
|
||||
|
||||
for root, dirs, files in os.walk(project._path, topdown=True):
|
||||
files = [f for f in files if not _filter_files(os.path.join(root, f))]
|
||||
@ -78,6 +79,8 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id
|
||||
else:
|
||||
z.write(path, os.path.relpath(path, project._path), compress_type=zipfile.ZIP_DEFLATED)
|
||||
|
||||
downloaded_files = set()
|
||||
|
||||
for compute in project.computes:
|
||||
if compute.id != "local":
|
||||
compute_files = yield from compute.list_files(project)
|
||||
@ -94,6 +97,8 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id
|
||||
response.close()
|
||||
f.close()
|
||||
z.write(temp_path, arcname=compute_file["path"], compress_type=zipfile.ZIP_DEFLATED)
|
||||
downloaded_files.add(compute_file['path'])
|
||||
|
||||
return z
|
||||
|
||||
|
||||
@ -121,7 +126,8 @@ def _filter_files(path):
|
||||
return False
|
||||
|
||||
|
||||
def _export_project_file(project, path, z, include_images, keep_compute_id, allow_all_nodes):
|
||||
@asyncio.coroutine
|
||||
def _export_project_file(project, path, z, include_images, keep_compute_id, allow_all_nodes, temporary_dir):
|
||||
"""
|
||||
Take a project file (.gns3) and patch it for the export
|
||||
|
||||
@ -131,7 +137,7 @@ def _export_project_file(project, path, z, include_images, keep_compute_id, allo
|
||||
"""
|
||||
|
||||
# Image file that we need to include in the exported archive
|
||||
images = set()
|
||||
images = []
|
||||
|
||||
with open(path) as f:
|
||||
topology = json.load(f)
|
||||
@ -139,6 +145,8 @@ def _export_project_file(project, path, z, include_images, keep_compute_id, allo
|
||||
if "topology" in topology:
|
||||
if "nodes" in topology["topology"]:
|
||||
for node in topology["topology"]["nodes"]:
|
||||
compute_id = node.get('compute_id', 'local')
|
||||
|
||||
if node["node_type"] == "virtualbox" and node.get("properties", {}).get("linked_clone"):
|
||||
raise aiohttp.web.HTTPConflict(text="Topology with a linked {} clone could not be exported. Use qemu instead.".format(node["node_type"]))
|
||||
if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware", "cloud"]:
|
||||
@ -149,22 +157,42 @@ def _export_project_file(project, path, z, include_images, keep_compute_id, allo
|
||||
|
||||
if "properties" in node and node["node_type"] != "docker":
|
||||
for prop, value in node["properties"].items():
|
||||
if prop.endswith("image"):
|
||||
if not keep_compute_id: # If we keep the original compute we can keep the image path
|
||||
node["properties"][prop] = os.path.basename(value)
|
||||
if include_images is True:
|
||||
images.add(value)
|
||||
if not prop.endswith("image"):
|
||||
continue
|
||||
|
||||
if value is None or value.strip() == '':
|
||||
continue
|
||||
|
||||
if not keep_compute_id: # If we keep the original compute we can keep the image path
|
||||
node["properties"][prop] = os.path.basename(value)
|
||||
|
||||
if include_images is True:
|
||||
images.append({
|
||||
'compute_id': compute_id,
|
||||
'image': value,
|
||||
'image_type': node['node_type']
|
||||
})
|
||||
|
||||
if not keep_compute_id:
|
||||
topology["topology"]["computes"] = [] # Strip compute informations because could contain secret info like password
|
||||
topology["topology"]["computes"] = [] # Strip compute information because could contain secret info like password
|
||||
|
||||
for image in images:
|
||||
_export_images(project, image, z)
|
||||
local_images = set([i['image'] for i in images if i['compute_id'] == 'local'])
|
||||
|
||||
for image in local_images:
|
||||
_export_local_images(project, image, z)
|
||||
|
||||
remote_images = set([
|
||||
(i['compute_id'], i['image_type'], i['image'])
|
||||
for i in images if i['compute_id'] != 'local'])
|
||||
|
||||
for compute_id, image_type, image in remote_images:
|
||||
yield from _export_remote_images(project, compute_id, image_type, image, z, temporary_dir)
|
||||
|
||||
z.writestr("project.gns3", json.dumps(topology).encode())
|
||||
|
||||
return images
|
||||
|
||||
def _export_images(project, image, z):
|
||||
def _export_local_images(project, image, z):
|
||||
"""
|
||||
Take a project file (.gns3) and export images to the zip
|
||||
|
||||
@ -191,4 +219,45 @@ def _export_images(project, image, z):
|
||||
arcname = os.path.join("images", directory, os.path.basename(image))
|
||||
z.write(path, arcname)
|
||||
return
|
||||
raise aiohttp.web.HTTPConflict(text="Topology could not be exported because the image {} is not available. If you use multiple server, we need a copy of the image on the main server.".format(image))
|
||||
|
||||
|
||||
@asyncio.coroutine
|
||||
def _export_remote_images(project, compute_id, image_type, image, project_zipfile, temporary_dir):
|
||||
"""
|
||||
Export specific image from remote compute
|
||||
:param project:
|
||||
:param compute_id:
|
||||
:param image_type:
|
||||
:param image:
|
||||
:param project_zipfile:
|
||||
:return:
|
||||
"""
|
||||
|
||||
log.info("Obtaining image `{}` from `{}`".format(image, compute_id))
|
||||
|
||||
try:
|
||||
compute = [compute for compute in project.computes if compute.id == compute_id][0]
|
||||
except IndexError:
|
||||
raise aiohttp.web.HTTPConflict(
|
||||
text="Cannot export image from `{}` compute. Compute doesn't exist.".format(compute_id))
|
||||
|
||||
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
|
||||
f = open(fd, "wb", closefd=True)
|
||||
response = yield from compute.download_image(image_type, image)
|
||||
|
||||
if response.status != 200:
|
||||
raise aiohttp.web.HTTPConflict(
|
||||
text="Cannot export image from `{}` compute. Compute sent `{}` status.".format(
|
||||
compute_id, response.status))
|
||||
|
||||
while True:
|
||||
data = yield from response.content.read(512)
|
||||
if not data:
|
||||
break
|
||||
f.write(data)
|
||||
response.close()
|
||||
f.close()
|
||||
arcname = os.path.join("images", image_type, image)
|
||||
log.info("Saved {}".format(arcname))
|
||||
project_zipfile.write(temp_path, arcname=arcname, compress_type=zipfile.ZIP_DEFLATED)
|
||||
|
||||
|
@ -448,6 +448,28 @@ class DynamipsVMHandler:
|
||||
yield from dynamips_manager.write_image(request.match_info["filename"], request.content)
|
||||
response.set_status(204)
|
||||
|
||||
@Route.get(
|
||||
r"/dynamips/images/{filename:.+}",
|
||||
parameters={
|
||||
"filename": "Image filename"
|
||||
},
|
||||
status_codes={
|
||||
200: "Image returned",
|
||||
},
|
||||
raw=True,
|
||||
description="Download a Dynamips IOS image")
|
||||
def download_image(request, response):
|
||||
filename = request.match_info["filename"]
|
||||
|
||||
dynamips_manager = Dynamips.instance()
|
||||
image_path = dynamips_manager.get_abs_image_path(filename)
|
||||
|
||||
# Raise error if user try to escape
|
||||
if filename[0] == ".":
|
||||
raise aiohttp.web.HTTPForbidden
|
||||
|
||||
yield from response.file(image_path)
|
||||
|
||||
@Route.post(
|
||||
r"/projects/{project_id}/dynamips/nodes/{node_id}/duplicate",
|
||||
parameters={
|
||||
@ -467,3 +489,4 @@ class DynamipsVMHandler:
|
||||
)
|
||||
response.set_status(201)
|
||||
response.json(new_node)
|
||||
|
||||
|
@ -16,7 +16,8 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
from aiohttp.web import HTTPConflict
|
||||
|
||||
import aiohttp.web
|
||||
|
||||
from gns3server.web.route import Route
|
||||
from gns3server.schemas.nio import NIO_SCHEMA
|
||||
@ -239,7 +240,7 @@ class IOUHandler:
|
||||
vm = iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap", "nio_ethernet", "nio_generic_ethernet"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = iou_manager.create_nio(request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
|
||||
response.set_status(201)
|
||||
@ -378,3 +379,26 @@ class IOUHandler:
|
||||
iou_manager = IOU.instance()
|
||||
yield from iou_manager.write_image(request.match_info["filename"], request.content)
|
||||
response.set_status(204)
|
||||
|
||||
|
||||
@Route.get(
|
||||
r"/iou/images/{filename:.+}",
|
||||
parameters={
|
||||
"filename": "Image filename"
|
||||
},
|
||||
status_codes={
|
||||
200: "Image returned",
|
||||
},
|
||||
raw=True,
|
||||
description="Download an IOU image")
|
||||
def download_image(request, response):
|
||||
filename = request.match_info["filename"]
|
||||
|
||||
iou_manager = IOU.instance()
|
||||
image_path = iou_manager.get_abs_image_path(filename)
|
||||
|
||||
# Raise error if user try to escape
|
||||
if filename[0] == ".":
|
||||
raise aiohttp.web.HTTPForbidden
|
||||
|
||||
yield from response.file(image_path)
|
||||
|
@ -18,7 +18,7 @@
|
||||
import sys
|
||||
import os.path
|
||||
|
||||
from aiohttp.web import HTTPConflict
|
||||
import aiohttp.web
|
||||
|
||||
from gns3server.web.route import Route
|
||||
from gns3server.compute.project_manager import ProjectManager
|
||||
@ -183,7 +183,7 @@ class QEMUHandler:
|
||||
if sys.platform.startswith("linux") and qemu_manager.config.get_section_config("Qemu").getboolean("enable_kvm", True) and "-no-kvm" not in vm.options:
|
||||
pm = ProjectManager.instance()
|
||||
if pm.check_hardware_virtualization(vm) is False:
|
||||
raise HTTPConflict(text="Cannot start VM with KVM enabled because hardware virtualization (VT-x/AMD-V) is already used by another software like VMware or VirtualBox")
|
||||
raise aiohttp.web.HTTPConflict(text="Cannot start VM with KVM enabled because hardware virtualization (VT-x/AMD-V) is already used by another software like VMware or VirtualBox")
|
||||
yield from vm.start()
|
||||
response.json(vm)
|
||||
|
||||
@ -285,7 +285,7 @@ class QEMUHandler:
|
||||
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap", "nio_nat"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
raise aiohttp.web.HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = qemu_manager.create_nio(request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
response.set_status(201)
|
||||
@ -479,3 +479,25 @@ class QEMUHandler:
|
||||
qemu_manager = Qemu.instance()
|
||||
yield from qemu_manager.write_image(request.match_info["filename"], request.content)
|
||||
response.set_status(204)
|
||||
|
||||
@Route.get(
|
||||
r"/qemu/images/{filename:.+}",
|
||||
parameters={
|
||||
"filename": "Image filename"
|
||||
},
|
||||
status_codes={
|
||||
200: "Image returned",
|
||||
},
|
||||
raw=True,
|
||||
description="Download Qemu image")
|
||||
def download_image(request, response):
|
||||
filename = request.match_info["filename"]
|
||||
|
||||
iou_manager = Qemu.instance()
|
||||
image_path = iou_manager.get_abs_image_path(filename)
|
||||
|
||||
# Raise error if user try to escape
|
||||
if filename[0] == ".":
|
||||
raise aiohttp.web.HTTPForbidden
|
||||
|
||||
yield from response.file(image_path)
|
||||
|
@ -113,7 +113,7 @@ class Response(aiohttp.web.Response):
|
||||
self.body = json.dumps(answer, indent=4, sort_keys=True).encode('utf-8')
|
||||
|
||||
@asyncio.coroutine
|
||||
def file(self, path):
|
||||
def file(self, path, status=200, set_content_length=True):
|
||||
"""
|
||||
Return a file as a response
|
||||
"""
|
||||
@ -124,27 +124,34 @@ class Response(aiohttp.web.Response):
|
||||
self.headers[aiohttp.hdrs.CONTENT_ENCODING] = encoding
|
||||
self.content_type = ct
|
||||
|
||||
st = os.stat(path)
|
||||
self.last_modified = st.st_mtime
|
||||
self.headers[aiohttp.hdrs.CONTENT_LENGTH] = str(st.st_size)
|
||||
if set_content_length:
|
||||
st = os.stat(path)
|
||||
self.last_modified = st.st_mtime
|
||||
self.headers[aiohttp.hdrs.CONTENT_LENGTH] = str(st.st_size)
|
||||
else:
|
||||
self.enable_chunked_encoding()
|
||||
|
||||
with open(path, 'rb') as fobj:
|
||||
yield from self.prepare(self._request)
|
||||
chunk_size = 4096
|
||||
chunk = fobj.read(chunk_size)
|
||||
while chunk:
|
||||
self.write(chunk)
|
||||
yield from self.drain()
|
||||
chunk = fobj.read(chunk_size)
|
||||
self.set_status(status)
|
||||
|
||||
if chunk:
|
||||
self.write(chunk[:count])
|
||||
yield from self.drain()
|
||||
try:
|
||||
with open(path, 'rb') as fobj:
|
||||
yield from self.prepare(self._request)
|
||||
|
||||
while True:
|
||||
data = fobj.read(4096)
|
||||
if not data:
|
||||
break
|
||||
yield from self.write(data)
|
||||
yield from self.drain()
|
||||
|
||||
except FileNotFoundError:
|
||||
raise aiohttp.web.HTTPNotFound()
|
||||
except PermissionError:
|
||||
raise aiohttp.web.HTTPForbidden()
|
||||
|
||||
def redirect(self, url):
|
||||
"""
|
||||
Redirect to url
|
||||
|
||||
:params url: Redirection URL
|
||||
"""
|
||||
raise aiohttp.web.HTTPFound(url)
|
||||
|
@ -351,3 +351,69 @@ def test_export_keep_compute_id(tmpdir, project, async_run):
|
||||
topo = json.loads(myfile.read().decode())["topology"]
|
||||
assert topo["nodes"][0]["compute_id"] == "6b7149c8-7d6e-4ca0-ab6b-daa8ab567be0"
|
||||
assert len(topo["computes"]) == 1
|
||||
|
||||
def test_export_images_from_vm(tmpdir, project, async_run, controller):
|
||||
"""
|
||||
If data is on a remote server export it locally before
|
||||
sending it in the archive.
|
||||
"""
|
||||
|
||||
compute = MagicMock()
|
||||
compute.id = "vm"
|
||||
compute.list_files = AsyncioMagicMock(return_value=[
|
||||
{"path": "vm-1/dynamips/test"}
|
||||
])
|
||||
|
||||
# Fake file that will be download from the vm
|
||||
mock_response = AsyncioMagicMock()
|
||||
mock_response.content = AsyncioBytesIO()
|
||||
async_run(mock_response.content.write(b"HELLO"))
|
||||
mock_response.content.seek(0)
|
||||
mock_response.status = 200
|
||||
compute.download_file = AsyncioMagicMock(return_value=mock_response)
|
||||
|
||||
mock_response = AsyncioMagicMock()
|
||||
mock_response.content = AsyncioBytesIO()
|
||||
async_run(mock_response.content.write(b"IMAGE"))
|
||||
mock_response.content.seek(0)
|
||||
mock_response.status = 200
|
||||
compute.download_image = AsyncioMagicMock(return_value=mock_response)
|
||||
|
||||
project._project_created_on_compute.add(compute)
|
||||
|
||||
path = project.path
|
||||
os.makedirs(os.path.join(path, "vm-1", "dynamips"))
|
||||
|
||||
topology = {
|
||||
"topology": {
|
||||
"nodes": [
|
||||
{
|
||||
"compute_id": "vm",
|
||||
"properties": {
|
||||
"image": "test.image"
|
||||
},
|
||||
"node_type": "dynamips"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
# The .gns3 should be renamed project.gns3 in order to simplify import
|
||||
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
||||
f.write(json.dumps(topology))
|
||||
|
||||
z = async_run(export_project(project, str(tmpdir), include_images=True))
|
||||
assert compute.list_files.called
|
||||
|
||||
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
|
||||
for data in z:
|
||||
f.write(data)
|
||||
|
||||
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
|
||||
with myzip.open("vm-1/dynamips/test") as myfile:
|
||||
content = myfile.read()
|
||||
assert content == b"HELLO"
|
||||
|
||||
with myzip.open("images/dynamips/test.image") as myfile:
|
||||
content = myfile.read()
|
||||
assert content == b"IMAGE"
|
||||
|
Loading…
Reference in New Issue
Block a user