mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-28 11:18:11 +00:00
Merge remote-tracking branch 'origin/master'
This commit is contained in:
commit
7d8ff54c78
@ -181,7 +181,10 @@ class DynamipsDeviceHandler:
|
|||||||
dynamips_manager = Dynamips.instance()
|
dynamips_manager = Dynamips.instance()
|
||||||
device = dynamips_manager.get_device(request.match_info["device_id"], project_id=request.match_info["project_id"])
|
device = dynamips_manager.get_device(request.match_info["device_id"], project_id=request.match_info["project_id"])
|
||||||
port_number = int(request.match_info["port_number"])
|
port_number = int(request.match_info["port_number"])
|
||||||
yield from device.remove_nio(port_number)
|
if asyncio.iscoroutinefunction(device.remove_nio):
|
||||||
|
yield from device.remove_nio(port_number)
|
||||||
|
else:
|
||||||
|
device.remove_nio(port_number)
|
||||||
response.set_status(204)
|
response.set_status(204)
|
||||||
|
|
||||||
@Route.post(
|
@Route.post(
|
||||||
|
@ -28,6 +28,12 @@ from ...schemas.dynamips_vm import VM_CONFIGS_SCHEMA
|
|||||||
from ...modules.dynamips import Dynamips
|
from ...modules.dynamips import Dynamips
|
||||||
from ...modules.project_manager import ProjectManager
|
from ...modules.project_manager import ProjectManager
|
||||||
|
|
||||||
|
DEFAULT_CHASSIS = {
|
||||||
|
"c1700": "1720",
|
||||||
|
"c2600": "2610",
|
||||||
|
"c3600": "3640"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class DynamipsVMHandler:
|
class DynamipsVMHandler:
|
||||||
|
|
||||||
@ -52,14 +58,18 @@ class DynamipsVMHandler:
|
|||||||
def create(request, response):
|
def create(request, response):
|
||||||
|
|
||||||
dynamips_manager = Dynamips.instance()
|
dynamips_manager = Dynamips.instance()
|
||||||
|
platform = request.json.pop("platform")
|
||||||
|
default_chassis = None
|
||||||
|
if platform in DEFAULT_CHASSIS:
|
||||||
|
default_chassis = DEFAULT_CHASSIS[platform]
|
||||||
vm = yield from dynamips_manager.create_vm(request.json.pop("name"),
|
vm = yield from dynamips_manager.create_vm(request.json.pop("name"),
|
||||||
request.match_info["project_id"],
|
request.match_info["project_id"],
|
||||||
request.json.get("vm_id"),
|
request.json.get("vm_id"),
|
||||||
request.json.get("dynamips_id"),
|
request.json.get("dynamips_id"),
|
||||||
request.json.pop("platform"),
|
platform,
|
||||||
console=request.json.get("console"),
|
console=request.json.get("console"),
|
||||||
aux=request.json.get("aux"),
|
aux=request.json.get("aux"),
|
||||||
chassis=request.json.pop("chassis", None))
|
chassis=request.json.pop("chassis", default_chassis))
|
||||||
|
|
||||||
yield from dynamips_manager.update_vm_settings(vm, request.json)
|
yield from dynamips_manager.update_vm_settings(vm, request.json)
|
||||||
yield from dynamips_manager.ghost_ios_support(vm)
|
yield from dynamips_manager.ghost_ios_support(vm)
|
||||||
|
@ -51,12 +51,11 @@ class QEMUHandler:
|
|||||||
request.match_info["project_id"],
|
request.match_info["project_id"],
|
||||||
request.json.get("vm_id"),
|
request.json.get("vm_id"),
|
||||||
qemu_path=request.json.get("qemu_path"),
|
qemu_path=request.json.get("qemu_path"),
|
||||||
console=request.json.get("console"),
|
console=request.json.get("console"))
|
||||||
monitor=request.json.get("monitor"))
|
|
||||||
|
|
||||||
# Clear already used keys
|
# Clear already used keys
|
||||||
map(request.json.__delitem__, ["name", "project_id", "vm_id",
|
map(request.json.__delitem__, ["name", "project_id", "vm_id",
|
||||||
"qemu_path", "console", "monitor"])
|
"qemu_path", "console"])
|
||||||
|
|
||||||
for field in request.json:
|
for field in request.json:
|
||||||
setattr(vm, field, request.json[field])
|
setattr(vm, field, request.json[field])
|
||||||
|
@ -597,42 +597,39 @@ class IOUVM(BaseVM):
|
|||||||
if self._iou_process.returncode is None:
|
if self._iou_process.returncode is None:
|
||||||
log.warn("IOU process {} is still running... killing it".format(self._iou_process.pid))
|
log.warn("IOU process {} is still running... killing it".format(self._iou_process.pid))
|
||||||
self._iou_process.kill()
|
self._iou_process.kill()
|
||||||
|
|
||||||
self._iou_process = None
|
self._iou_process = None
|
||||||
|
|
||||||
if self._iouyap_process is not None:
|
if self.is_iouyap_running():
|
||||||
self._terminate_process_iouyap()
|
self._terminate_process_iouyap()
|
||||||
try:
|
try:
|
||||||
yield from gns3server.utils.asyncio.wait_for_process_termination(self._iouyap_process, timeout=3)
|
yield from gns3server.utils.asyncio.wait_for_process_termination(self._iouyap_process, timeout=3)
|
||||||
except asyncio.TimeoutError:
|
except asyncio.TimeoutError:
|
||||||
if self._iouyap_process.returncode is None:
|
if self._iouyap_process.returncode is None:
|
||||||
log.warn("IOUYAP process {} is still running... killing it".format(self._iouyap_process.pid))
|
log.warn("IOUYAP process {} is still running... killing it".format(self._iouyap_process.pid))
|
||||||
self._iouyap_process.kill()
|
self._iouyap_process.kill()
|
||||||
|
|
||||||
self._iouyap_process = None
|
self._iouyap_process = None
|
||||||
self._started = False
|
|
||||||
|
self._started = False
|
||||||
|
|
||||||
def _terminate_process_iouyap(self):
|
def _terminate_process_iouyap(self):
|
||||||
"""Terminate the process if running"""
|
"""Terminate the process if running"""
|
||||||
|
|
||||||
if self._iouyap_process:
|
log.info("Stopping IOUYAP instance {} PID={}".format(self.name, self._iouyap_process.pid))
|
||||||
log.info("Stopping IOUYAP instance {} PID={}".format(self.name, self._iouyap_process.pid))
|
try:
|
||||||
try:
|
self._iouyap_process.terminate()
|
||||||
self._iouyap_process.terminate()
|
# Sometime the process can already be dead when we garbage collect
|
||||||
# Sometime the process can already be dead when we garbage collect
|
except ProcessLookupError:
|
||||||
except ProcessLookupError:
|
pass
|
||||||
pass
|
|
||||||
|
|
||||||
def _terminate_process_iou(self):
|
def _terminate_process_iou(self):
|
||||||
"""Terminate the process if running"""
|
"""Terminate the process if running"""
|
||||||
|
|
||||||
if self._iou_process:
|
log.info("Stopping IOU instance {} PID={}".format(self.name, self._iou_process.pid))
|
||||||
log.info("Stopping IOU instance {} PID={}".format(self.name, self._iou_process.pid))
|
try:
|
||||||
try:
|
self._iou_process.terminate()
|
||||||
self._iou_process.terminate()
|
# Sometime the process can already be dead when we garbage collect
|
||||||
# Sometime the process can already be dead when we garbage collect
|
except ProcessLookupError:
|
||||||
except ProcessLookupError:
|
pass
|
||||||
pass
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def reload(self):
|
def reload(self):
|
||||||
@ -650,7 +647,7 @@ class IOUVM(BaseVM):
|
|||||||
:returns: True or False
|
:returns: True or False
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self._iou_process:
|
if self._iou_process and self._iou_process.returncode is None:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -661,7 +658,7 @@ class IOUVM(BaseVM):
|
|||||||
:returns: True or False
|
:returns: True or False
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if self._iouyap_process:
|
if self._iouyap_process and self._iouyap_process.returncode is None:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ import random
|
|||||||
import subprocess
|
import subprocess
|
||||||
import shlex
|
import shlex
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import socket
|
||||||
|
|
||||||
from .qemu_error import QemuError
|
from .qemu_error import QemuError
|
||||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||||
@ -51,7 +52,6 @@ class QemuVM(BaseVM):
|
|||||||
:param qemu_path: path to the QEMU binary
|
:param qemu_path: path to the QEMU binary
|
||||||
:param qemu_id: QEMU VM instance ID
|
:param qemu_id: QEMU VM instance ID
|
||||||
:param console: TCP console port
|
:param console: TCP console port
|
||||||
:param monitor: TCP monitor port
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
@ -60,8 +60,7 @@ class QemuVM(BaseVM):
|
|||||||
project,
|
project,
|
||||||
manager,
|
manager,
|
||||||
qemu_path=None,
|
qemu_path=None,
|
||||||
console=None,
|
console=None):
|
||||||
monitor=None):
|
|
||||||
|
|
||||||
super().__init__(name, vm_id, project, manager, console=console)
|
super().__init__(name, vm_id, project, manager, console=console)
|
||||||
|
|
||||||
@ -72,6 +71,7 @@ class QemuVM(BaseVM):
|
|||||||
self._started = False
|
self._started = False
|
||||||
self._process = None
|
self._process = None
|
||||||
self._cpulimit_process = None
|
self._cpulimit_process = None
|
||||||
|
self._monitor = None
|
||||||
self._stdout_file = ""
|
self._stdout_file = ""
|
||||||
|
|
||||||
# QEMU settings
|
# QEMU settings
|
||||||
@ -82,7 +82,6 @@ class QemuVM(BaseVM):
|
|||||||
self._hdd_disk_image = ""
|
self._hdd_disk_image = ""
|
||||||
self._options = ""
|
self._options = ""
|
||||||
self._ram = 256
|
self._ram = 256
|
||||||
self._monitor = monitor
|
|
||||||
self._ethernet_adapters = []
|
self._ethernet_adapters = []
|
||||||
self._adapter_type = "e1000"
|
self._adapter_type = "e1000"
|
||||||
self._initrd = ""
|
self._initrd = ""
|
||||||
@ -92,11 +91,6 @@ class QemuVM(BaseVM):
|
|||||||
self._cpu_throttling = 0 # means no CPU throttling
|
self._cpu_throttling = 0 # means no CPU throttling
|
||||||
self._process_priority = "low"
|
self._process_priority = "low"
|
||||||
|
|
||||||
if self._monitor is not None:
|
|
||||||
self._monitor = self._manager.port_manager.reserve_tcp_port(self._monitor, self._project)
|
|
||||||
else:
|
|
||||||
self._monitor = self._manager.port_manager.get_free_tcp_port(self._project)
|
|
||||||
|
|
||||||
self.adapters = 1 # creates 1 adapter by default
|
self.adapters = 1 # creates 1 adapter by default
|
||||||
log.info("QEMU VM {name} [id={id}] has been created".format(name=self._name,
|
log.info("QEMU VM {name} [id={id}] has been created".format(name=self._name,
|
||||||
id=self._id))
|
id=self._id))
|
||||||
@ -111,25 +105,6 @@ class QemuVM(BaseVM):
|
|||||||
|
|
||||||
return self._monitor
|
return self._monitor
|
||||||
|
|
||||||
@monitor.setter
|
|
||||||
def monitor(self, monitor):
|
|
||||||
"""
|
|
||||||
Sets the TCP monitor port.
|
|
||||||
|
|
||||||
:param monitor: monitor port (integer)
|
|
||||||
"""
|
|
||||||
|
|
||||||
if monitor == self._monitor:
|
|
||||||
return
|
|
||||||
if self._monitor:
|
|
||||||
self._manager.port_manager.release_monitor_port(self._monitor, self._project)
|
|
||||||
self._monitor = self._manager.port_manager.reserve_monitor_port(monitor, self._project)
|
|
||||||
log.info("{module}: '{name}' [{id}]: monitor port set to {port}".format(
|
|
||||||
module=self.manager.module_name,
|
|
||||||
name=self.name,
|
|
||||||
id=self.id,
|
|
||||||
port=monitor))
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def qemu_path(self):
|
def qemu_path(self):
|
||||||
"""
|
"""
|
||||||
@ -610,6 +585,16 @@ class QemuVM(BaseVM):
|
|||||||
return
|
return
|
||||||
|
|
||||||
else:
|
else:
|
||||||
|
|
||||||
|
if self._manager.config.get_section_config("Qemu").getboolean("monitor", True):
|
||||||
|
try:
|
||||||
|
# let the OS find an unused port for the Qemu monitor
|
||||||
|
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
||||||
|
sock.bind((self._monitor_host, 0))
|
||||||
|
self._monitor = sock.getsockname()[1]
|
||||||
|
except OSError as e:
|
||||||
|
raise QemuError("Could not find free port for the Qemu monitor: {}".format(e))
|
||||||
|
|
||||||
self._command = yield from self._build_command()
|
self._command = yield from self._build_command()
|
||||||
try:
|
try:
|
||||||
log.info("starting QEMU: {}".format(self._command))
|
log.info("starting QEMU: {}".format(self._command))
|
||||||
@ -701,9 +686,6 @@ class QemuVM(BaseVM):
|
|||||||
if self._console:
|
if self._console:
|
||||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||||
self._console = None
|
self._console = None
|
||||||
if self._monitor:
|
|
||||||
self._manager.port_manager.release_tcp_port(self._monitor, self._project)
|
|
||||||
self._monitor = None
|
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def _get_vm_status(self):
|
def _get_vm_status(self):
|
||||||
@ -732,14 +714,15 @@ class QemuVM(BaseVM):
|
|||||||
Suspends this QEMU VM.
|
Suspends this QEMU VM.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
vm_status = yield from self._get_vm_status()
|
if self.is_running():
|
||||||
if vm_status is None:
|
vm_status = yield from self._get_vm_status()
|
||||||
raise QemuError("Suspending a QEMU VM is not supported")
|
if vm_status is None:
|
||||||
elif vm_status == "running":
|
raise QemuError("Suspending a QEMU VM is not supported")
|
||||||
yield from self._control_vm("stop")
|
elif vm_status == "running":
|
||||||
log.debug("QEMU VM has been suspended")
|
yield from self._control_vm("stop")
|
||||||
else:
|
log.debug("QEMU VM has been suspended")
|
||||||
log.info("QEMU VM is not running to be suspended, current status is {}".format(vm_status))
|
else:
|
||||||
|
log.info("QEMU VM is not running to be suspended, current status is {}".format(vm_status))
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def reload(self):
|
def reload(self):
|
||||||
|
@ -47,12 +47,6 @@ QEMU_CREATE_SCHEMA = {
|
|||||||
"maximum": 65535,
|
"maximum": 65535,
|
||||||
"type": ["integer", "null"]
|
"type": ["integer", "null"]
|
||||||
},
|
},
|
||||||
"monitor": {
|
|
||||||
"description": "monitor TCP port",
|
|
||||||
"minimum": 1,
|
|
||||||
"maximum": 65535,
|
|
||||||
"type": ["integer", "null"]
|
|
||||||
},
|
|
||||||
"hda_disk_image": {
|
"hda_disk_image": {
|
||||||
"description": "QEMU hda disk image path",
|
"description": "QEMU hda disk image path",
|
||||||
"type": ["string", "null"],
|
"type": ["string", "null"],
|
||||||
@ -146,12 +140,6 @@ QEMU_UPDATE_SCHEMA = {
|
|||||||
"maximum": 65535,
|
"maximum": 65535,
|
||||||
"type": ["integer", "null"]
|
"type": ["integer", "null"]
|
||||||
},
|
},
|
||||||
"monitor": {
|
|
||||||
"description": "monitor TCP port",
|
|
||||||
"minimum": 1,
|
|
||||||
"maximum": 65535,
|
|
||||||
"type": ["integer", "null"]
|
|
||||||
},
|
|
||||||
"hda_disk_image": {
|
"hda_disk_image": {
|
||||||
"description": "QEMU hda disk image path",
|
"description": "QEMU hda disk image path",
|
||||||
"type": ["string", "null"],
|
"type": ["string", "null"],
|
||||||
@ -341,12 +329,6 @@ QEMU_OBJECT_SCHEMA = {
|
|||||||
"maximum": 65535,
|
"maximum": 65535,
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
"monitor": {
|
|
||||||
"description": "monitor TCP port",
|
|
||||||
"minimum": 1,
|
|
||||||
"maximum": 65535,
|
|
||||||
"type": "integer"
|
|
||||||
},
|
|
||||||
"initrd": {
|
"initrd": {
|
||||||
"description": "QEMU initrd path",
|
"description": "QEMU initrd path",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@ -386,7 +368,7 @@ QEMU_OBJECT_SCHEMA = {
|
|||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["vm_id", "project_id", "name", "qemu_path", "hda_disk_image", "hdb_disk_image",
|
"required": ["vm_id", "project_id", "name", "qemu_path", "hda_disk_image", "hdb_disk_image",
|
||||||
"hdc_disk_image", "hdd_disk_image", "ram", "adapters", "adapter_type", "console",
|
"hdc_disk_image", "hdd_disk_image", "ram", "adapters", "adapter_type", "console",
|
||||||
"monitor", "initrd", "kernel_image", "kernel_command_line",
|
"initrd", "kernel_image", "kernel_command_line",
|
||||||
"legacy_networking", "cpu_throttling", "process_priority", "options"
|
"legacy_networking", "cpu_throttling", "process_priority", "options"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -151,14 +151,11 @@ def test_close(vm, port_manager, loop):
|
|||||||
loop.run_until_complete(asyncio.async(vm.start()))
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
|
|
||||||
console_port = vm.console
|
console_port = vm.console
|
||||||
monitor_port = vm.monitor
|
|
||||||
|
|
||||||
loop.run_until_complete(asyncio.async(vm.close()))
|
loop.run_until_complete(asyncio.async(vm.close()))
|
||||||
|
|
||||||
# Raise an exception if the port is not free
|
# Raise an exception if the port is not free
|
||||||
port_manager.reserve_tcp_port(console_port, vm.project)
|
port_manager.reserve_tcp_port(console_port, vm.project)
|
||||||
# Raise an exception if the port is not free
|
|
||||||
port_manager.reserve_tcp_port(monitor_port, vm.project)
|
|
||||||
|
|
||||||
assert vm.is_running() is False
|
assert vm.is_running() is False
|
||||||
|
|
||||||
@ -226,7 +223,6 @@ def test_json(vm, project):
|
|||||||
def test_control_vm(vm, loop):
|
def test_control_vm(vm, loop):
|
||||||
|
|
||||||
vm._process = MagicMock()
|
vm._process = MagicMock()
|
||||||
vm._monitor = 4242
|
|
||||||
reader = MagicMock()
|
reader = MagicMock()
|
||||||
writer = MagicMock()
|
writer = MagicMock()
|
||||||
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
|
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
|
||||||
@ -238,7 +234,6 @@ def test_control_vm(vm, loop):
|
|||||||
def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
|
def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
|
||||||
|
|
||||||
vm._process = running_subprocess_mock
|
vm._process = running_subprocess_mock
|
||||||
vm._monitor = 4242
|
|
||||||
reader = MagicMock()
|
reader = MagicMock()
|
||||||
writer = MagicMock()
|
writer = MagicMock()
|
||||||
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
|
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
|
||||||
@ -247,6 +242,7 @@ def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
|
|||||||
future.set_result(b"epic product")
|
future.set_result(b"epic product")
|
||||||
reader.readline.return_value = future
|
reader.readline.return_value = future
|
||||||
|
|
||||||
|
vm._monitor = 4242
|
||||||
res = loop.run_until_complete(asyncio.async(vm._control_vm("test", [b"epic"])))
|
res = loop.run_until_complete(asyncio.async(vm._control_vm("test", [b"epic"])))
|
||||||
assert writer.write.called_with("test")
|
assert writer.write.called_with("test")
|
||||||
|
|
||||||
@ -269,8 +265,6 @@ def test_build_command(vm, loop, fake_qemu_binary, port_manager):
|
|||||||
os.path.join(vm.working_dir, "flash.qcow2"),
|
os.path.join(vm.working_dir, "flash.qcow2"),
|
||||||
"-serial",
|
"-serial",
|
||||||
"telnet:127.0.0.1:{},server,nowait".format(vm.console),
|
"telnet:127.0.0.1:{},server,nowait".format(vm.console),
|
||||||
"-monitor",
|
|
||||||
"tcp:127.0.0.1:{},server,nowait".format(vm.monitor),
|
|
||||||
"-device",
|
"-device",
|
||||||
"e1000,mac=00:00:ab:7e:b5:00,netdev=gns3-0",
|
"e1000,mac=00:00:ab:7e:b5:00,netdev=gns3-0",
|
||||||
"-netdev",
|
"-netdev",
|
||||||
|
Loading…
Reference in New Issue
Block a user