mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-24 17:28:08 +00:00
Merging 2.1 into 2.2
This commit is contained in:
commit
5754747a90
12
CHANGELOG
12
CHANGELOG
@ -140,6 +140,18 @@
|
|||||||
* Implement #1153 into 2.2 branch.
|
* Implement #1153 into 2.2 branch.
|
||||||
* Pin prompt-toolkit to latest version 1.0.15
|
* Pin prompt-toolkit to latest version 1.0.15
|
||||||
|
|
||||||
|
## 2.1.12 23/01/2019
|
||||||
|
|
||||||
|
* Tune how to get the size of SVG images. Ref https://github.com/GNS3/gns3-gui/issues/2674.
|
||||||
|
* Automatically create a symbolic link to the IOU image in the IOU working directory. Fixes #1484
|
||||||
|
* Fix link pause/filters only work for the first interface of Docker containers. Fixes #1482
|
||||||
|
* Telnet console resize support for Docker VM.
|
||||||
|
* Fix _fix_permissions() garbles permissions in Docker VM. Ref #1428
|
||||||
|
* Fix "None is not of type 'integer'" when opening project containing a Qemu VM. Fixes #2610.
|
||||||
|
* Only require Xtigervnc or Xvfb+x11vnc for Docker with vnc console. Ref #1438
|
||||||
|
* Support tigervnc in Docker VM. Ref #1438
|
||||||
|
* Update minimum VIX version requirements for VMware. Ref #1415.
|
||||||
|
|
||||||
## 2.1.11 28/09/2018
|
## 2.1.11 28/09/2018
|
||||||
|
|
||||||
* Catch some exceptions.
|
* Catch some exceptions.
|
||||||
|
@ -279,7 +279,7 @@ class BaseManager:
|
|||||||
destination_dir = destination_node.working_dir
|
destination_dir = destination_node.working_dir
|
||||||
try:
|
try:
|
||||||
shutil.rmtree(destination_dir)
|
shutil.rmtree(destination_dir)
|
||||||
shutil.copytree(source_node.working_dir, destination_dir)
|
shutil.copytree(source_node.working_dir, destination_dir, symlinks=True, ignore_dangling_symlinks=True)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise aiohttp.web.HTTPConflict(text="Cannot duplicate node data: {}".format(e))
|
raise aiohttp.web.HTTPConflict(text="Cannot duplicate node data: {}".format(e))
|
||||||
|
|
||||||
|
@ -198,7 +198,10 @@ class Docker(BaseManager):
|
|||||||
|
|
||||||
if progress_callback:
|
if progress_callback:
|
||||||
progress_callback("Pulling '{}' from docker hub".format(image))
|
progress_callback("Pulling '{}' from docker hub".format(image))
|
||||||
|
try:
|
||||||
response = await self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
|
response = await self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
|
||||||
|
except DockerError as e:
|
||||||
|
raise DockerError("Could not pull the '{}' image from Docker Hub, please check your Internet connection (original error: {})".format(image, e))
|
||||||
# The pull api will stream status via an HTTP JSON stream
|
# The pull api will stream status via an HTTP JSON stream
|
||||||
content = ""
|
content = ""
|
||||||
while True:
|
while True:
|
||||||
|
@ -353,6 +353,9 @@ class DockerVM(BaseNode):
|
|||||||
if self._environment:
|
if self._environment:
|
||||||
for e in self._environment.strip().split("\n"):
|
for e in self._environment.strip().split("\n"):
|
||||||
e = e.strip()
|
e = e.strip()
|
||||||
|
if e.split("=")[0] == "":
|
||||||
|
self.project.emit("log.warning", {"message": "{} has invalid environment variable: {}".format(self.name, e)})
|
||||||
|
continue
|
||||||
if not e.startswith("GNS3_"):
|
if not e.startswith("GNS3_"):
|
||||||
formatted = self._format_env(variables, e)
|
formatted = self._format_env(variables, e)
|
||||||
params["Env"].append(formatted)
|
params["Env"].append(formatted)
|
||||||
|
@ -535,7 +535,8 @@ class IOUVM(BaseNode):
|
|||||||
# on newer images, see https://github.com/GNS3/gns3-server/issues/1484
|
# on newer images, see https://github.com/GNS3/gns3-server/issues/1484
|
||||||
try:
|
try:
|
||||||
symlink = os.path.join(self.working_dir, os.path.basename(self.path))
|
symlink = os.path.join(self.working_dir, os.path.basename(self.path))
|
||||||
if not os.path.islink(symlink):
|
if os.path.islink(symlink):
|
||||||
|
os.unlink(symlink)
|
||||||
os.symlink(self.path, symlink)
|
os.symlink(self.path, symlink)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise IOUError("Could not create symbolic link: {}".format(e))
|
raise IOUError("Could not create symbolic link: {}".format(e))
|
||||||
|
@ -185,8 +185,8 @@ class Qemu(BaseManager):
|
|||||||
return ""
|
return ""
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
output = await subprocess_check_output(qemu_path, "-version")
|
output = await subprocess_check_output(qemu_path, "-version", "-nographic")
|
||||||
match = re.search(r"version\s+([0-9a-z\-\.]+)", output)
|
match = re.search("version\s+([0-9a-z\-\.]+)", output)
|
||||||
if match:
|
if match:
|
||||||
version = match.group(1)
|
version = match.group(1)
|
||||||
return version
|
return version
|
||||||
|
@ -1736,18 +1736,18 @@ class QemuVM(BaseNode):
|
|||||||
|
|
||||||
return network_options
|
return network_options
|
||||||
|
|
||||||
def _graphic(self):
|
async def _disable_graphics(self):
|
||||||
"""
|
"""
|
||||||
Adds the correct graphic options depending of the OS
|
Disable graphics depending of the QEMU version
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if sys.platform.startswith("win"):
|
if any(opt in self._options for opt in ["-display", "-nographic", "-curses", "-sdl" "-spice", "-vnc"]):
|
||||||
return []
|
return []
|
||||||
if len(os.environ.get("DISPLAY", "")) > 0:
|
version = await self.manager.get_qemu_version(self.qemu_path)
|
||||||
return []
|
if version and parse_version(version) >= parse_version("3.0"):
|
||||||
if "-nographic" not in self._options:
|
return ["-display", "none"]
|
||||||
|
else:
|
||||||
return ["-nographic"]
|
return ["-nographic"]
|
||||||
return []
|
|
||||||
|
|
||||||
async def _run_with_hardware_acceleration(self, qemu_path, options):
|
async def _run_with_hardware_acceleration(self, qemu_path, options):
|
||||||
"""
|
"""
|
||||||
@ -1920,12 +1920,12 @@ class QemuVM(BaseNode):
|
|||||||
raise QemuError("Console type {} is unknown".format(self._console_type))
|
raise QemuError("Console type {} is unknown".format(self._console_type))
|
||||||
command.extend(self._monitor_options())
|
command.extend(self._monitor_options())
|
||||||
command.extend((await self._network_options()))
|
command.extend((await self._network_options()))
|
||||||
command.extend(self._graphic())
|
|
||||||
if self.on_close != "save_vm_state":
|
if self.on_close != "save_vm_state":
|
||||||
await self._clear_save_vm_stated()
|
await self._clear_save_vm_stated()
|
||||||
else:
|
else:
|
||||||
command.extend((await self._saved_state_option()))
|
command.extend((await self._saved_state_option()))
|
||||||
|
if self._console_type == "telnet":
|
||||||
|
command.extend((await self._disable_graphics()))
|
||||||
if additional_options:
|
if additional_options:
|
||||||
try:
|
try:
|
||||||
command.extend(shlex.split(additional_options))
|
command.extend(shlex.split(additional_options))
|
||||||
|
@ -55,7 +55,7 @@ class Drawing:
|
|||||||
return self._id
|
return self._id
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def ressource_filename(self):
|
def resource_filename(self):
|
||||||
"""
|
"""
|
||||||
If the svg content has been dump to an external file return is name otherwise None
|
If the svg content has been dump to an external file return is name otherwise None
|
||||||
"""
|
"""
|
||||||
|
@ -30,7 +30,7 @@ import logging
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
async def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False):
|
async def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False, reset_mac_addresses=False):
|
||||||
"""
|
"""
|
||||||
Export a project to a zip file.
|
Export a project to a zip file.
|
||||||
|
|
||||||
@ -41,6 +41,7 @@ async def export_project(project, temporary_dir, include_images=False, keep_comp
|
|||||||
:param include images: save OS images to the zip file
|
:param include images: save OS images to the zip file
|
||||||
:param keep_compute_id: If false replace all compute id by local (standard behavior for .gns3project to make it portable)
|
:param keep_compute_id: If false replace all compute id by local (standard behavior for .gns3project to make it portable)
|
||||||
:param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable
|
:param allow_all_nodes: Allow all nodes type to be include in the zip even if not portable
|
||||||
|
:param reset_mac_addresses: Reset MAC addresses for every nodes.
|
||||||
|
|
||||||
:returns: ZipStream object
|
:returns: ZipStream object
|
||||||
"""
|
"""
|
||||||
@ -60,10 +61,10 @@ async def export_project(project, temporary_dir, include_images=False, keep_comp
|
|||||||
# First we process the .gns3 in order to be sure we don't have an error
|
# First we process the .gns3 in order to be sure we don't have an error
|
||||||
for file in os.listdir(project._path):
|
for file in os.listdir(project._path):
|
||||||
if file.endswith(".gns3"):
|
if file.endswith(".gns3"):
|
||||||
await _patch_project_file(project, os.path.join(project._path, file), zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir)
|
await _patch_project_file(project, os.path.join(project._path, file), zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir, reset_mac_addresses)
|
||||||
|
|
||||||
# Export the local files
|
# Export the local files
|
||||||
for root, dirs, files in os.walk(project._path, topdown=True):
|
for root, dirs, files in os.walk(project._path, topdown=True, followlinks=False):
|
||||||
files = [f for f in files if _is_exportable(os.path.join(root, f))]
|
files = [f for f in files if _is_exportable(os.path.join(root, f))]
|
||||||
for file in files:
|
for file in files:
|
||||||
path = os.path.join(root, file)
|
path = os.path.join(root, file)
|
||||||
@ -124,6 +125,7 @@ def _patch_mtime(path):
|
|||||||
new_mtime = file_date.replace(year=1980).timestamp()
|
new_mtime = file_date.replace(year=1980).timestamp()
|
||||||
os.utime(path, (st.st_atime, new_mtime))
|
os.utime(path, (st.st_atime, new_mtime))
|
||||||
|
|
||||||
|
|
||||||
def _is_exportable(path):
|
def _is_exportable(path):
|
||||||
"""
|
"""
|
||||||
:returns: True if file should not be included in the final archive
|
:returns: True if file should not be included in the final archive
|
||||||
@ -133,6 +135,10 @@ def _is_exportable(path):
|
|||||||
if path.endswith("snapshots"):
|
if path.endswith("snapshots"):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
# do not export symlinks
|
||||||
|
if os.path.islink(path):
|
||||||
|
return False
|
||||||
|
|
||||||
# do not export directories of snapshots
|
# do not export directories of snapshots
|
||||||
if "{sep}snapshots{sep}".format(sep=os.path.sep) in path:
|
if "{sep}snapshots{sep}".format(sep=os.path.sep) in path:
|
||||||
return False
|
return False
|
||||||
@ -153,7 +159,7 @@ def _is_exportable(path):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
async def _patch_project_file(project, path, zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir):
|
async def _patch_project_file(project, path, zstream, include_images, keep_compute_id, allow_all_nodes, temporary_dir, reset_mac_addresses):
|
||||||
"""
|
"""
|
||||||
Patch a project file (.gns3) to export a project.
|
Patch a project file (.gns3) to export a project.
|
||||||
The .gns3 file is renamed to project.gns3
|
The .gns3 file is renamed to project.gns3
|
||||||
@ -186,6 +192,10 @@ async def _patch_project_file(project, path, zstream, include_images, keep_compu
|
|||||||
if "properties" in node and node["node_type"] != "docker":
|
if "properties" in node and node["node_type"] != "docker":
|
||||||
for prop, value in node["properties"].items():
|
for prop, value in node["properties"].items():
|
||||||
|
|
||||||
|
# reset the MAC address
|
||||||
|
if reset_mac_addresses and prop in ("mac_addr", "mac_address"):
|
||||||
|
node["properties"][prop] = None
|
||||||
|
|
||||||
if node["node_type"] == "iou":
|
if node["node_type"] == "iou":
|
||||||
if not prop == "path":
|
if not prop == "path":
|
||||||
continue
|
continue
|
||||||
|
@ -69,13 +69,15 @@ class VMwareGNS3VM(BaseGNS3VM):
|
|||||||
if ram % 4 != 0:
|
if ram % 4 != 0:
|
||||||
raise GNS3VMError("Allocated memory {} for the GNS3 VM must be a multiple of 4".format(ram))
|
raise GNS3VMError("Allocated memory {} for the GNS3 VM must be a multiple of 4".format(ram))
|
||||||
|
|
||||||
available_vcpus = psutil.cpu_count(logical=False)
|
available_vcpus = psutil.cpu_count()
|
||||||
if vcpus > available_vcpus:
|
if vcpus > available_vcpus:
|
||||||
raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus))
|
raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus))
|
||||||
|
|
||||||
|
cores_per_sockets = int(available_vcpus / psutil.cpu_count(logical=False))
|
||||||
try:
|
try:
|
||||||
pairs = VMware.parse_vmware_file(self._vmx_path)
|
pairs = VMware.parse_vmware_file(self._vmx_path)
|
||||||
pairs["numvcpus"] = str(vcpus)
|
pairs["numvcpus"] = str(vcpus)
|
||||||
|
pairs["cpuid.coresPerSocket"] = str(cores_per_sockets)
|
||||||
pairs["memsize"] = str(ram)
|
pairs["memsize"] = str(ram)
|
||||||
VMware.write_vmx_file(self._vmx_path, pairs)
|
VMware.write_vmx_file(self._vmx_path, pairs)
|
||||||
log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram))
|
log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram))
|
||||||
|
@ -182,9 +182,11 @@ async def _move_files_to_compute(compute, project_id, directory, files_path):
|
|||||||
|
|
||||||
location = os.path.join(directory, files_path)
|
location = os.path.join(directory, files_path)
|
||||||
if os.path.exists(location):
|
if os.path.exists(location):
|
||||||
for (dirpath, dirnames, filenames) in os.walk(location):
|
for (dirpath, dirnames, filenames) in os.walk(location, followlinks=False):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
path = os.path.join(dirpath, filename)
|
path = os.path.join(dirpath, filename)
|
||||||
|
if os.path.islink(path):
|
||||||
|
continue
|
||||||
dst = os.path.relpath(path, directory)
|
dst = os.path.relpath(path, directory)
|
||||||
await _upload_file(compute, project_id, path, dst)
|
await _upload_file(compute, project_id, path, dst)
|
||||||
await wait_run_in_executor(shutil.rmtree, os.path.join(directory, files_path))
|
await wait_run_in_executor(shutil.rmtree, os.path.join(directory, files_path))
|
||||||
@ -210,9 +212,11 @@ def _import_images(controller, path):
|
|||||||
|
|
||||||
image_dir = controller.images_path()
|
image_dir = controller.images_path()
|
||||||
root = os.path.join(path, "images")
|
root = os.path.join(path, "images")
|
||||||
for (dirpath, dirnames, filenames) in os.walk(root):
|
for (dirpath, dirnames, filenames) in os.walk(root, followlinks=False):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
path = os.path.join(dirpath, filename)
|
path = os.path.join(dirpath, filename)
|
||||||
|
if os.path.islink(path):
|
||||||
|
continue
|
||||||
dst = os.path.join(image_dir, os.path.relpath(path, root))
|
dst = os.path.join(image_dir, os.path.relpath(path, root))
|
||||||
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
os.makedirs(os.path.dirname(dst), exist_ok=True)
|
||||||
shutil.move(path, dst)
|
shutil.move(path, dst)
|
||||||
|
@ -740,25 +740,27 @@ class Project:
|
|||||||
# We don't care if a compute is down at this step
|
# We don't care if a compute is down at this step
|
||||||
except (ComputeError, aiohttp.web.HTTPError, aiohttp.ClientResponseError, TimeoutError):
|
except (ComputeError, aiohttp.web.HTTPError, aiohttp.ClientResponseError, TimeoutError):
|
||||||
pass
|
pass
|
||||||
self._cleanPictures()
|
self._clean_pictures()
|
||||||
self._status = "closed"
|
self._status = "closed"
|
||||||
if not ignore_notification:
|
if not ignore_notification:
|
||||||
self.controller.notification.project_emit("project.closed", self.__json__())
|
self.controller.notification.project_emit("project.closed", self.__json__())
|
||||||
self.reset()
|
self.reset()
|
||||||
|
|
||||||
def _cleanPictures(self):
|
def _clean_pictures(self):
|
||||||
"""
|
"""
|
||||||
Delete unused images
|
Delete unused pictures.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Project have been deleted
|
# Project have been deleted or is loading or is not opened
|
||||||
if not os.path.exists(self.path):
|
if not os.path.exists(self.path) or self._loading or self._status != "opened":
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
pictures = set(os.listdir(self.pictures_directory))
|
pictures = set(os.listdir(self.pictures_directory))
|
||||||
for drawing in self._drawings.values():
|
for drawing in self._drawings.values():
|
||||||
try:
|
try:
|
||||||
pictures.remove(drawing.ressource_filename)
|
resource_filename = drawing.resource_filename
|
||||||
|
if resource_filename:
|
||||||
|
pictures.remove(resource_filename)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -770,10 +772,12 @@ class Project:
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
for pict in pictures:
|
for pic_filename in pictures:
|
||||||
os.remove(os.path.join(self.pictures_directory, pict))
|
path = os.path.join(self.pictures_directory, pic_filename)
|
||||||
|
log.info("Deleting unused picture '{}'".format(path))
|
||||||
|
os.remove(path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.warning(str(e))
|
log.warning("Could not delete unused pictures: {}".format(e))
|
||||||
|
|
||||||
async def delete(self):
|
async def delete(self):
|
||||||
|
|
||||||
@ -962,7 +966,7 @@ class Project:
|
|||||||
assert self._status != "closed"
|
assert self._status != "closed"
|
||||||
try:
|
try:
|
||||||
with tempfile.TemporaryDirectory() as tmpdir:
|
with tempfile.TemporaryDirectory() as tmpdir:
|
||||||
zipstream = await export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
|
zipstream = await export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True, reset_mac_addresses=True)
|
||||||
project_path = os.path.join(tmpdir, "project.gns3p")
|
project_path = os.path.join(tmpdir, "project.gns3p")
|
||||||
await wait_run_in_executor(self._create_duplicate_project_file, project_path, zipstream)
|
await wait_run_in_executor(self._create_duplicate_project_file, project_path, zipstream)
|
||||||
with open(project_path, "rb") as f:
|
with open(project_path, "rb") as f:
|
||||||
|
@ -164,8 +164,9 @@ class TemplateHandler:
|
|||||||
|
|
||||||
controller = Controller.instance()
|
controller = Controller.instance()
|
||||||
project = controller.get_project(request.match_info["project_id"])
|
project = controller.get_project(request.match_info["project_id"])
|
||||||
await project.add_node_from_template(request.match_info["template_id"],
|
node = await project.add_node_from_template(request.match_info["template_id"],
|
||||||
x=request.json["x"],
|
x=request.json["x"],
|
||||||
y=request.json["y"],
|
y=request.json["y"],
|
||||||
compute_id=request.json.get("compute_id"))
|
compute_id=request.json.get("compute_id"))
|
||||||
response.set_status(201)
|
response.set_status(201)
|
||||||
|
response.json(node)
|
||||||
|
@ -180,7 +180,7 @@ VM_CREATE_SCHEMA = {
|
|||||||
},
|
},
|
||||||
"mac_addr": {
|
"mac_addr": {
|
||||||
"description": "Base MAC address",
|
"description": "Base MAC address",
|
||||||
"type": "string",
|
"type": ["null", "string"],
|
||||||
"minLength": 1,
|
"minLength": 1,
|
||||||
"pattern": "^([0-9a-fA-F]{4}\\.){2}[0-9a-fA-F]{4}$"
|
"pattern": "^([0-9a-fA-F]{4}\\.){2}[0-9a-fA-F]{4}$"
|
||||||
},
|
},
|
||||||
@ -402,7 +402,7 @@ VM_UPDATE_SCHEMA = {
|
|||||||
},
|
},
|
||||||
"mac_addr": {
|
"mac_addr": {
|
||||||
"description": "Base MAC address",
|
"description": "Base MAC address",
|
||||||
"type": "string",
|
"type": ["null", "string"],
|
||||||
"minLength": 1,
|
"minLength": 1,
|
||||||
"pattern": "^([0-9a-fA-F]{4}\\.){2}[0-9a-fA-F]{4}$"
|
"pattern": "^([0-9a-fA-F]{4}\\.){2}[0-9a-fA-F]{4}$"
|
||||||
},
|
},
|
||||||
@ -646,7 +646,7 @@ VM_OBJECT_SCHEMA = {
|
|||||||
},
|
},
|
||||||
"mac_addr": {
|
"mac_addr": {
|
||||||
"description": "Base MAC address",
|
"description": "Base MAC address",
|
||||||
"type": "string",
|
"type": ["null", "string"]
|
||||||
#"minLength": 1,
|
#"minLength": 1,
|
||||||
#"pattern": "^([0-9a-fA-F]{4}\\.){2}[0-9a-fA-F]{4}$"
|
#"pattern": "^([0-9a-fA-F]{4}\\.){2}[0-9a-fA-F]{4}$"
|
||||||
},
|
},
|
||||||
|
@ -68,7 +68,7 @@ class UBridgeHypervisor:
|
|||||||
connection_success = False
|
connection_success = False
|
||||||
last_exception = None
|
last_exception = None
|
||||||
while time.time() - begin < timeout:
|
while time.time() - begin < timeout:
|
||||||
await asyncio.sleep(0.01)
|
await asyncio.sleep(0.1)
|
||||||
try:
|
try:
|
||||||
self._reader, self._writer = await asyncio.open_connection(host, self._port)
|
self._reader, self._writer = await asyncio.open_connection(host, self._port)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
@ -83,6 +83,7 @@ class UBridgeHypervisor:
|
|||||||
log.info("Connected to uBridge hypervisor on {}:{} after {:.4f} seconds".format(host, self._port, time.time() - begin))
|
log.info("Connected to uBridge hypervisor on {}:{} after {:.4f} seconds".format(host, self._port, time.time() - begin))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
await asyncio.sleep(0.1)
|
||||||
version = await self.send("hypervisor version")
|
version = await self.send("hypervisor version")
|
||||||
self._version = version[0].split("-", 1)[0]
|
self._version = version[0].split("-", 1)[0]
|
||||||
except IndexError:
|
except IndexError:
|
||||||
@ -232,7 +233,7 @@ class UBridgeHypervisor:
|
|||||||
.format(host=self._host, port=self._port, command=command, run=self.is_running()))
|
.format(host=self._host, port=self._port, command=command, run=self.is_running()))
|
||||||
else:
|
else:
|
||||||
retries += 1
|
retries += 1
|
||||||
await asyncio.sleep(0.1)
|
await asyncio.sleep(0.5)
|
||||||
continue
|
continue
|
||||||
retries = 0
|
retries = 0
|
||||||
buf += chunk.decode("utf-8")
|
buf += chunk.decode("utf-8")
|
||||||
|
@ -132,6 +132,7 @@ def test_is_running(vm, running_subprocess_mock):
|
|||||||
|
|
||||||
|
|
||||||
def test_start(loop, vm, running_subprocess_mock):
|
def test_start(loop, vm, running_subprocess_mock):
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
|
||||||
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
|
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=running_subprocess_mock) as mock:
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=running_subprocess_mock) as mock:
|
||||||
loop.run_until_complete(asyncio.ensure_future(vm.start()))
|
loop.run_until_complete(asyncio.ensure_future(vm.start()))
|
||||||
@ -146,6 +147,7 @@ def test_stop(loop, vm, running_subprocess_mock):
|
|||||||
future = asyncio.Future()
|
future = asyncio.Future()
|
||||||
future.set_result(True)
|
future.set_result(True)
|
||||||
process.wait.return_value = future
|
process.wait.return_value = future
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
|
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
||||||
@ -229,6 +231,7 @@ def test_port_remove_nio_binding(vm, loop):
|
|||||||
|
|
||||||
|
|
||||||
def test_close(vm, port_manager, loop):
|
def test_close(vm, port_manager, loop):
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
|
||||||
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
|
with asyncio_patch("gns3server.compute.qemu.QemuVM.start_wrap_console"):
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
|
||||||
loop.run_until_complete(asyncio.ensure_future(vm.start()))
|
loop.run_until_complete(asyncio.ensure_future(vm.start()))
|
||||||
@ -354,6 +357,7 @@ def test_disk_options(vm, tmpdir, loop, fake_qemu_img_binary):
|
|||||||
|
|
||||||
def test_cdrom_option(vm, tmpdir, loop, fake_qemu_img_binary):
|
def test_cdrom_option(vm, tmpdir, loop, fake_qemu_img_binary):
|
||||||
|
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
|
||||||
vm._cdrom_image = str(tmpdir / "test.iso")
|
vm._cdrom_image = str(tmpdir / "test.iso")
|
||||||
open(vm._cdrom_image, "w+").close()
|
open(vm._cdrom_image, "w+").close()
|
||||||
|
|
||||||
@ -364,6 +368,7 @@ def test_cdrom_option(vm, tmpdir, loop, fake_qemu_img_binary):
|
|||||||
|
|
||||||
def test_bios_option(vm, tmpdir, loop, fake_qemu_img_binary):
|
def test_bios_option(vm, tmpdir, loop, fake_qemu_img_binary):
|
||||||
|
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
|
||||||
vm._bios_image = str(tmpdir / "test.img")
|
vm._bios_image = str(tmpdir / "test.img")
|
||||||
open(vm._bios_image, "w+").close()
|
open(vm._bios_image, "w+").close()
|
||||||
|
|
||||||
@ -470,6 +475,7 @@ def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
|
|||||||
|
|
||||||
def test_build_command(vm, loop, fake_qemu_binary, port_manager):
|
def test_build_command(vm, loop, fake_qemu_binary, port_manager):
|
||||||
|
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
|
||||||
os.environ["DISPLAY"] = "0:0"
|
os.environ["DISPLAY"] = "0:0"
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
||||||
cmd = loop.run_until_complete(asyncio.ensure_future(vm._build_command()))
|
cmd = loop.run_until_complete(asyncio.ensure_future(vm._build_command()))
|
||||||
@ -493,7 +499,9 @@ def test_build_command(vm, loop, fake_qemu_binary, port_manager):
|
|||||||
"-device",
|
"-device",
|
||||||
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
|
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
|
||||||
"-netdev",
|
"-netdev",
|
||||||
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport)
|
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport),
|
||||||
|
"-display",
|
||||||
|
"none"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -502,6 +510,7 @@ def test_build_command_manual_uuid(vm, loop, fake_qemu_binary, port_manager):
|
|||||||
If user has set a uuid we keep it
|
If user has set a uuid we keep it
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
|
||||||
vm.options = "-uuid e1c307a4-896f-11e6-81a5-3c07547807cc"
|
vm.options = "-uuid e1c307a4-896f-11e6-81a5-3c07547807cc"
|
||||||
os.environ["DISPLAY"] = "0:0"
|
os.environ["DISPLAY"] = "0:0"
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
||||||
@ -541,7 +550,8 @@ def test_build_command_kvm(linux_platform, vm, loop, fake_qemu_binary, port_mana
|
|||||||
"-device",
|
"-device",
|
||||||
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
|
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
|
||||||
"-netdev",
|
"-netdev",
|
||||||
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport)
|
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport),
|
||||||
|
"-nographic"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -578,13 +588,15 @@ def test_build_command_kvm_2_4(linux_platform, vm, loop, fake_qemu_binary, port_
|
|||||||
"-device",
|
"-device",
|
||||||
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
|
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
|
||||||
"-netdev",
|
"-netdev",
|
||||||
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport)
|
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport),
|
||||||
|
"-nographic"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
|
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
|
||||||
def test_build_command_without_display(vm, loop, fake_qemu_binary):
|
def test_build_command_without_display(vm, loop, fake_qemu_binary):
|
||||||
|
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.5.0")
|
||||||
os.environ["DISPLAY"] = ""
|
os.environ["DISPLAY"] = ""
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
||||||
cmd = loop.run_until_complete(asyncio.ensure_future(vm._build_command()))
|
cmd = loop.run_until_complete(asyncio.ensure_future(vm._build_command()))
|
||||||
@ -593,6 +605,7 @@ def test_build_command_without_display(vm, loop, fake_qemu_binary):
|
|||||||
|
|
||||||
def test_build_command_two_adapters(vm, loop, fake_qemu_binary, port_manager):
|
def test_build_command_two_adapters(vm, loop, fake_qemu_binary, port_manager):
|
||||||
|
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.5.0")
|
||||||
os.environ["DISPLAY"] = "0:0"
|
os.environ["DISPLAY"] = "0:0"
|
||||||
vm.adapters = 2
|
vm.adapters = 2
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
||||||
@ -622,7 +635,8 @@ def test_build_command_two_adapters(vm, loop, fake_qemu_binary, port_manager):
|
|||||||
"-device",
|
"-device",
|
||||||
"e1000,mac={},netdev=gns3-1".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)),
|
"e1000,mac={},netdev=gns3-1".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)),
|
||||||
"-netdev",
|
"-netdev",
|
||||||
"socket,id=gns3-1,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio2.rport, nio2.lport)
|
"socket,id=gns3-1,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio2.rport, nio2.lport),
|
||||||
|
"-nographic"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -631,6 +645,7 @@ def test_build_command_two_adapters_mac_address(vm, loop, fake_qemu_binary, port
|
|||||||
Should support multiple base vmac address
|
Should support multiple base vmac address
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.5.0")
|
||||||
vm.adapters = 2
|
vm.adapters = 2
|
||||||
vm.mac_address = "00:00:ab:0e:0f:09"
|
vm.mac_address = "00:00:ab:0e:0f:09"
|
||||||
mac_0 = vm._mac_address
|
mac_0 = vm._mac_address
|
||||||
|
Loading…
Reference in New Issue
Block a user