mirror of
https://github.com/GNS3/gns3-server
synced 2024-12-29 02:08:10 +00:00
Fix Qemu VM state support after closing a project and check for JSON data returned by qemu-img. Fixes #1591
This commit is contained in:
parent
424db843ca
commit
e8b8554c1a
@ -966,7 +966,10 @@ class QemuVM(BaseNode):
|
||||
set_link_commands.append("set_link gns3-{} off".format(adapter_number))
|
||||
else:
|
||||
set_link_commands.append("set_link gns3-{} off".format(adapter_number))
|
||||
await self._control_vm_commands(set_link_commands)
|
||||
|
||||
if "-loadvm" not in command_string:
|
||||
# only set the link statuses if not restoring a previous VM state
|
||||
await self._control_vm_commands(set_link_commands)
|
||||
|
||||
try:
|
||||
await self.start_wrap_console()
|
||||
@ -1143,7 +1146,9 @@ class QemuVM(BaseNode):
|
||||
if not (await super().close()):
|
||||
return False
|
||||
|
||||
self.on_close = "power_off"
|
||||
#FIXME: Don't wait for ACPI shutdown when closing the project, should we?
|
||||
if self.on_close == "shutdown_signal":
|
||||
self.on_close = "power_off"
|
||||
await self.stop()
|
||||
|
||||
for adapter in self._ethernet_adapters:
|
||||
@ -1848,18 +1853,22 @@ class QemuVM(BaseNode):
|
||||
if not os.path.exists(disk):
|
||||
continue
|
||||
output = await subprocess_check_output(qemu_img_path, "info", "--output=json", disk)
|
||||
json_data = json.loads(output)
|
||||
if "snapshots" in json_data:
|
||||
for snapshot in json_data["snapshots"]:
|
||||
if snapshot["name"] == snapshot_name:
|
||||
# delete the snapshot
|
||||
command = [qemu_img_path, "snapshot", "-d", snapshot_name, disk]
|
||||
retcode = await self._qemu_img_exec(command)
|
||||
if retcode:
|
||||
stdout = self.read_qemu_img_stdout()
|
||||
log.warning("Could not delete saved VM state from disk {}: {}".format(disk, stdout))
|
||||
else:
|
||||
log.info("Deleted saved VM state from disk {}".format(disk))
|
||||
if output:
|
||||
try:
|
||||
json_data = json.loads(output)
|
||||
except ValueError as e:
|
||||
raise QemuError("Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {}".format(e))
|
||||
if "snapshots" in json_data:
|
||||
for snapshot in json_data["snapshots"]:
|
||||
if snapshot["name"] == snapshot_name:
|
||||
# delete the snapshot
|
||||
command = [qemu_img_path, "snapshot", "-d", snapshot_name, disk]
|
||||
retcode = await self._qemu_img_exec(command)
|
||||
if retcode:
|
||||
stdout = self.read_qemu_img_stdout()
|
||||
log.warning("Could not delete saved VM state from disk {}: {}".format(disk, stdout))
|
||||
else:
|
||||
log.info("Deleted saved VM state from disk {}".format(disk))
|
||||
except subprocess.SubprocessError as e:
|
||||
raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e))
|
||||
|
||||
@ -1879,14 +1888,19 @@ class QemuVM(BaseNode):
|
||||
if not os.path.exists(disk):
|
||||
continue
|
||||
output = await subprocess_check_output(qemu_img_path, "info", "--output=json", disk)
|
||||
json_data = json.loads(output)
|
||||
if "snapshots" in json_data:
|
||||
for snapshot in json_data["snapshots"]:
|
||||
if snapshot["name"] == snapshot_name:
|
||||
log.info('QEMU VM "{name}" [{id}] VM saved state detected (snapshot name: {snapshot})'.format(name=self._name,
|
||||
id=self.id,
|
||||
snapshot=snapshot_name))
|
||||
return ["-loadvm", snapshot_name]
|
||||
if output:
|
||||
try:
|
||||
json_data = json.loads(output)
|
||||
except ValueError as e:
|
||||
raise QemuError("Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {}".format(e))
|
||||
if "snapshots" in json_data:
|
||||
for snapshot in json_data["snapshots"]:
|
||||
if snapshot["name"] == snapshot_name:
|
||||
log.info('QEMU VM "{name}" [{id}] VM saved state detected (snapshot name: {snapshot})'.format(name=self._name,
|
||||
id=self.id,
|
||||
snapshot=snapshot_name))
|
||||
return ["-loadvm", snapshot_name]
|
||||
|
||||
except subprocess.SubprocessError as e:
|
||||
raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e))
|
||||
return []
|
||||
|
@ -125,7 +125,7 @@ class Config:
|
||||
|
||||
if self._main_config_file is None:
|
||||
|
||||
# TODO: migrate versioned config file from a previous version of GNS3 (for instance 2.2.0 -> 2.2.1)
|
||||
# TODO: migrate versioned config file from a previous version of GNS3 (for instance 2.2 -> 2.3) + support profiles
|
||||
# migrate post version 2.2.0 config files if they exist
|
||||
os.makedirs(versioned_user_dir, exist_ok=True)
|
||||
try:
|
||||
|
@ -27,6 +27,9 @@ import itertools
|
||||
from .topology import load_topology
|
||||
from ..utils.asyncio import wait_run_in_executor
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
"""
|
||||
Handle the import of project from a .gns3project
|
||||
"""
|
||||
@ -126,6 +129,14 @@ async def import_project(controller, project_id, stream, location=None, name=Non
|
||||
node["compute_id"] = "vm"
|
||||
else:
|
||||
# Round-robin through available compute resources.
|
||||
# computes = []
|
||||
# for compute_id in controller.computes:
|
||||
# compute = controller.get_compute(compute_id)
|
||||
# # only use the local compute or any connected compute
|
||||
# if compute_id == "local" or compute.connected:
|
||||
# computes.append(compute_id)
|
||||
# else:
|
||||
# log.warning(compute.name, "is not connected!")
|
||||
compute_nodes = itertools.cycle(controller.computes)
|
||||
for node in topology["topology"]["nodes"]:
|
||||
node["compute_id"] = next(compute_nodes)
|
||||
|
Loading…
Reference in New Issue
Block a user