mirror of
https://github.com/GNS3/gns3-server
synced 2024-12-01 04:38:12 +00:00
Fix Qemu VM state support after closing a project and check for JSON data returned by qemu-img. Fixes #1591
This commit is contained in:
parent
424db843ca
commit
e8b8554c1a
@ -966,6 +966,9 @@ class QemuVM(BaseNode):
|
|||||||
set_link_commands.append("set_link gns3-{} off".format(adapter_number))
|
set_link_commands.append("set_link gns3-{} off".format(adapter_number))
|
||||||
else:
|
else:
|
||||||
set_link_commands.append("set_link gns3-{} off".format(adapter_number))
|
set_link_commands.append("set_link gns3-{} off".format(adapter_number))
|
||||||
|
|
||||||
|
if "-loadvm" not in command_string:
|
||||||
|
# only set the link statuses if not restoring a previous VM state
|
||||||
await self._control_vm_commands(set_link_commands)
|
await self._control_vm_commands(set_link_commands)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -1143,6 +1146,8 @@ class QemuVM(BaseNode):
|
|||||||
if not (await super().close()):
|
if not (await super().close()):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
#FIXME: Don't wait for ACPI shutdown when closing the project, should we?
|
||||||
|
if self.on_close == "shutdown_signal":
|
||||||
self.on_close = "power_off"
|
self.on_close = "power_off"
|
||||||
await self.stop()
|
await self.stop()
|
||||||
|
|
||||||
@ -1848,7 +1853,11 @@ class QemuVM(BaseNode):
|
|||||||
if not os.path.exists(disk):
|
if not os.path.exists(disk):
|
||||||
continue
|
continue
|
||||||
output = await subprocess_check_output(qemu_img_path, "info", "--output=json", disk)
|
output = await subprocess_check_output(qemu_img_path, "info", "--output=json", disk)
|
||||||
|
if output:
|
||||||
|
try:
|
||||||
json_data = json.loads(output)
|
json_data = json.loads(output)
|
||||||
|
except ValueError as e:
|
||||||
|
raise QemuError("Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {}".format(e))
|
||||||
if "snapshots" in json_data:
|
if "snapshots" in json_data:
|
||||||
for snapshot in json_data["snapshots"]:
|
for snapshot in json_data["snapshots"]:
|
||||||
if snapshot["name"] == snapshot_name:
|
if snapshot["name"] == snapshot_name:
|
||||||
@ -1879,7 +1888,11 @@ class QemuVM(BaseNode):
|
|||||||
if not os.path.exists(disk):
|
if not os.path.exists(disk):
|
||||||
continue
|
continue
|
||||||
output = await subprocess_check_output(qemu_img_path, "info", "--output=json", disk)
|
output = await subprocess_check_output(qemu_img_path, "info", "--output=json", disk)
|
||||||
|
if output:
|
||||||
|
try:
|
||||||
json_data = json.loads(output)
|
json_data = json.loads(output)
|
||||||
|
except ValueError as e:
|
||||||
|
raise QemuError("Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {}".format(e))
|
||||||
if "snapshots" in json_data:
|
if "snapshots" in json_data:
|
||||||
for snapshot in json_data["snapshots"]:
|
for snapshot in json_data["snapshots"]:
|
||||||
if snapshot["name"] == snapshot_name:
|
if snapshot["name"] == snapshot_name:
|
||||||
@ -1887,6 +1900,7 @@ class QemuVM(BaseNode):
|
|||||||
id=self.id,
|
id=self.id,
|
||||||
snapshot=snapshot_name))
|
snapshot=snapshot_name))
|
||||||
return ["-loadvm", snapshot_name]
|
return ["-loadvm", snapshot_name]
|
||||||
|
|
||||||
except subprocess.SubprocessError as e:
|
except subprocess.SubprocessError as e:
|
||||||
raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e))
|
raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e))
|
||||||
return []
|
return []
|
||||||
|
@ -125,7 +125,7 @@ class Config:
|
|||||||
|
|
||||||
if self._main_config_file is None:
|
if self._main_config_file is None:
|
||||||
|
|
||||||
# TODO: migrate versioned config file from a previous version of GNS3 (for instance 2.2.0 -> 2.2.1)
|
# TODO: migrate versioned config file from a previous version of GNS3 (for instance 2.2 -> 2.3) + support profiles
|
||||||
# migrate post version 2.2.0 config files if they exist
|
# migrate post version 2.2.0 config files if they exist
|
||||||
os.makedirs(versioned_user_dir, exist_ok=True)
|
os.makedirs(versioned_user_dir, exist_ok=True)
|
||||||
try:
|
try:
|
||||||
|
@ -27,6 +27,9 @@ import itertools
|
|||||||
from .topology import load_topology
|
from .topology import load_topology
|
||||||
from ..utils.asyncio import wait_run_in_executor
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
|
|
||||||
|
import logging
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
Handle the import of project from a .gns3project
|
Handle the import of project from a .gns3project
|
||||||
"""
|
"""
|
||||||
@ -126,6 +129,14 @@ async def import_project(controller, project_id, stream, location=None, name=Non
|
|||||||
node["compute_id"] = "vm"
|
node["compute_id"] = "vm"
|
||||||
else:
|
else:
|
||||||
# Round-robin through available compute resources.
|
# Round-robin through available compute resources.
|
||||||
|
# computes = []
|
||||||
|
# for compute_id in controller.computes:
|
||||||
|
# compute = controller.get_compute(compute_id)
|
||||||
|
# # only use the local compute or any connected compute
|
||||||
|
# if compute_id == "local" or compute.connected:
|
||||||
|
# computes.append(compute_id)
|
||||||
|
# else:
|
||||||
|
# log.warning(compute.name, "is not connected!")
|
||||||
compute_nodes = itertools.cycle(controller.computes)
|
compute_nodes = itertools.cycle(controller.computes)
|
||||||
for node in topology["topology"]["nodes"]:
|
for node in topology["topology"]["nodes"]:
|
||||||
node["compute_id"] = next(compute_nodes)
|
node["compute_id"] = next(compute_nodes)
|
||||||
|
Loading…
Reference in New Issue
Block a user