1
0
mirror of https://github.com/GNS3/gns3-server synced 2024-12-25 16:28:11 +00:00

Merge remote-tracking branch 'origin/master'

This commit is contained in:
Jeremy 2015-03-25 14:37:10 -06:00
commit 7d8ff54c78
7 changed files with 66 additions and 98 deletions

View File

@ -181,7 +181,10 @@ class DynamipsDeviceHandler:
dynamips_manager = Dynamips.instance()
device = dynamips_manager.get_device(request.match_info["device_id"], project_id=request.match_info["project_id"])
port_number = int(request.match_info["port_number"])
yield from device.remove_nio(port_number)
if asyncio.iscoroutinefunction(device.remove_nio):
yield from device.remove_nio(port_number)
else:
device.remove_nio(port_number)
response.set_status(204)
@Route.post(

View File

@ -28,6 +28,12 @@ from ...schemas.dynamips_vm import VM_CONFIGS_SCHEMA
from ...modules.dynamips import Dynamips
from ...modules.project_manager import ProjectManager
DEFAULT_CHASSIS = {
"c1700": "1720",
"c2600": "2610",
"c3600": "3640"
}
class DynamipsVMHandler:
@ -52,14 +58,18 @@ class DynamipsVMHandler:
def create(request, response):
dynamips_manager = Dynamips.instance()
platform = request.json.pop("platform")
default_chassis = None
if platform in DEFAULT_CHASSIS:
default_chassis = DEFAULT_CHASSIS[platform]
vm = yield from dynamips_manager.create_vm(request.json.pop("name"),
request.match_info["project_id"],
request.json.get("vm_id"),
request.json.get("dynamips_id"),
request.json.pop("platform"),
platform,
console=request.json.get("console"),
aux=request.json.get("aux"),
chassis=request.json.pop("chassis", None))
chassis=request.json.pop("chassis", default_chassis))
yield from dynamips_manager.update_vm_settings(vm, request.json)
yield from dynamips_manager.ghost_ios_support(vm)

View File

@ -51,12 +51,11 @@ class QEMUHandler:
request.match_info["project_id"],
request.json.get("vm_id"),
qemu_path=request.json.get("qemu_path"),
console=request.json.get("console"),
monitor=request.json.get("monitor"))
console=request.json.get("console"))
# Clear already used keys
map(request.json.__delitem__, ["name", "project_id", "vm_id",
"qemu_path", "console", "monitor"])
"qemu_path", "console"])
for field in request.json:
setattr(vm, field, request.json[field])

View File

@ -597,42 +597,39 @@ class IOUVM(BaseVM):
if self._iou_process.returncode is None:
log.warn("IOU process {} is still running... killing it".format(self._iou_process.pid))
self._iou_process.kill()
self._iou_process = None
if self._iouyap_process is not None:
self._terminate_process_iouyap()
try:
yield from gns3server.utils.asyncio.wait_for_process_termination(self._iouyap_process, timeout=3)
except asyncio.TimeoutError:
if self._iouyap_process.returncode is None:
log.warn("IOUYAP process {} is still running... killing it".format(self._iouyap_process.pid))
self._iouyap_process.kill()
if self.is_iouyap_running():
self._terminate_process_iouyap()
try:
yield from gns3server.utils.asyncio.wait_for_process_termination(self._iouyap_process, timeout=3)
except asyncio.TimeoutError:
if self._iouyap_process.returncode is None:
log.warn("IOUYAP process {} is still running... killing it".format(self._iouyap_process.pid))
self._iouyap_process.kill()
self._iouyap_process = None
self._started = False
self._started = False
def _terminate_process_iouyap(self):
"""Terminate the process if running"""
if self._iouyap_process:
log.info("Stopping IOUYAP instance {} PID={}".format(self.name, self._iouyap_process.pid))
try:
self._iouyap_process.terminate()
# Sometime the process can already be dead when we garbage collect
except ProcessLookupError:
pass
log.info("Stopping IOUYAP instance {} PID={}".format(self.name, self._iouyap_process.pid))
try:
self._iouyap_process.terminate()
# Sometime the process can already be dead when we garbage collect
except ProcessLookupError:
pass
def _terminate_process_iou(self):
"""Terminate the process if running"""
if self._iou_process:
log.info("Stopping IOU instance {} PID={}".format(self.name, self._iou_process.pid))
try:
self._iou_process.terminate()
# Sometime the process can already be dead when we garbage collect
except ProcessLookupError:
pass
log.info("Stopping IOU instance {} PID={}".format(self.name, self._iou_process.pid))
try:
self._iou_process.terminate()
# Sometime the process can already be dead when we garbage collect
except ProcessLookupError:
pass
@asyncio.coroutine
def reload(self):
@ -650,7 +647,7 @@ class IOUVM(BaseVM):
:returns: True or False
"""
if self._iou_process:
if self._iou_process and self._iou_process.returncode is None:
return True
return False
@ -661,7 +658,7 @@ class IOUVM(BaseVM):
:returns: True or False
"""
if self._iouyap_process:
if self._iouyap_process and self._iouyap_process.returncode is None:
return True
return False

View File

@ -26,6 +26,7 @@ import random
import subprocess
import shlex
import asyncio
import socket
from .qemu_error import QemuError
from ..adapters.ethernet_adapter import EthernetAdapter
@ -51,7 +52,6 @@ class QemuVM(BaseVM):
:param qemu_path: path to the QEMU binary
:param qemu_id: QEMU VM instance ID
:param console: TCP console port
:param monitor: TCP monitor port
"""
def __init__(self,
@ -60,8 +60,7 @@ class QemuVM(BaseVM):
project,
manager,
qemu_path=None,
console=None,
monitor=None):
console=None):
super().__init__(name, vm_id, project, manager, console=console)
@ -72,6 +71,7 @@ class QemuVM(BaseVM):
self._started = False
self._process = None
self._cpulimit_process = None
self._monitor = None
self._stdout_file = ""
# QEMU settings
@ -82,7 +82,6 @@ class QemuVM(BaseVM):
self._hdd_disk_image = ""
self._options = ""
self._ram = 256
self._monitor = monitor
self._ethernet_adapters = []
self._adapter_type = "e1000"
self._initrd = ""
@ -92,11 +91,6 @@ class QemuVM(BaseVM):
self._cpu_throttling = 0 # means no CPU throttling
self._process_priority = "low"
if self._monitor is not None:
self._monitor = self._manager.port_manager.reserve_tcp_port(self._monitor, self._project)
else:
self._monitor = self._manager.port_manager.get_free_tcp_port(self._project)
self.adapters = 1 # creates 1 adapter by default
log.info("QEMU VM {name} [id={id}] has been created".format(name=self._name,
id=self._id))
@ -111,25 +105,6 @@ class QemuVM(BaseVM):
return self._monitor
@monitor.setter
def monitor(self, monitor):
"""
Sets the TCP monitor port.
:param monitor: monitor port (integer)
"""
if monitor == self._monitor:
return
if self._monitor:
self._manager.port_manager.release_monitor_port(self._monitor, self._project)
self._monitor = self._manager.port_manager.reserve_monitor_port(monitor, self._project)
log.info("{module}: '{name}' [{id}]: monitor port set to {port}".format(
module=self.manager.module_name,
name=self.name,
id=self.id,
port=monitor))
@property
def qemu_path(self):
"""
@ -610,6 +585,16 @@ class QemuVM(BaseVM):
return
else:
if self._manager.config.get_section_config("Qemu").getboolean("monitor", True):
try:
# let the OS find an unused port for the Qemu monitor
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.bind((self._monitor_host, 0))
self._monitor = sock.getsockname()[1]
except OSError as e:
raise QemuError("Could not find free port for the Qemu monitor: {}".format(e))
self._command = yield from self._build_command()
try:
log.info("starting QEMU: {}".format(self._command))
@ -701,9 +686,6 @@ class QemuVM(BaseVM):
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
if self._monitor:
self._manager.port_manager.release_tcp_port(self._monitor, self._project)
self._monitor = None
@asyncio.coroutine
def _get_vm_status(self):
@ -732,14 +714,15 @@ class QemuVM(BaseVM):
Suspends this QEMU VM.
"""
vm_status = yield from self._get_vm_status()
if vm_status is None:
raise QemuError("Suspending a QEMU VM is not supported")
elif vm_status == "running":
yield from self._control_vm("stop")
log.debug("QEMU VM has been suspended")
else:
log.info("QEMU VM is not running to be suspended, current status is {}".format(vm_status))
if self.is_running():
vm_status = yield from self._get_vm_status()
if vm_status is None:
raise QemuError("Suspending a QEMU VM is not supported")
elif vm_status == "running":
yield from self._control_vm("stop")
log.debug("QEMU VM has been suspended")
else:
log.info("QEMU VM is not running to be suspended, current status is {}".format(vm_status))
@asyncio.coroutine
def reload(self):

View File

@ -47,12 +47,6 @@ QEMU_CREATE_SCHEMA = {
"maximum": 65535,
"type": ["integer", "null"]
},
"monitor": {
"description": "monitor TCP port",
"minimum": 1,
"maximum": 65535,
"type": ["integer", "null"]
},
"hda_disk_image": {
"description": "QEMU hda disk image path",
"type": ["string", "null"],
@ -146,12 +140,6 @@ QEMU_UPDATE_SCHEMA = {
"maximum": 65535,
"type": ["integer", "null"]
},
"monitor": {
"description": "monitor TCP port",
"minimum": 1,
"maximum": 65535,
"type": ["integer", "null"]
},
"hda_disk_image": {
"description": "QEMU hda disk image path",
"type": ["string", "null"],
@ -341,12 +329,6 @@ QEMU_OBJECT_SCHEMA = {
"maximum": 65535,
"type": "integer"
},
"monitor": {
"description": "monitor TCP port",
"minimum": 1,
"maximum": 65535,
"type": "integer"
},
"initrd": {
"description": "QEMU initrd path",
"type": "string",
@ -386,7 +368,7 @@ QEMU_OBJECT_SCHEMA = {
"additionalProperties": False,
"required": ["vm_id", "project_id", "name", "qemu_path", "hda_disk_image", "hdb_disk_image",
"hdc_disk_image", "hdd_disk_image", "ram", "adapters", "adapter_type", "console",
"monitor", "initrd", "kernel_image", "kernel_command_line",
"initrd", "kernel_image", "kernel_command_line",
"legacy_networking", "cpu_throttling", "process_priority", "options"
]
}

View File

@ -151,14 +151,11 @@ def test_close(vm, port_manager, loop):
loop.run_until_complete(asyncio.async(vm.start()))
console_port = vm.console
monitor_port = vm.monitor
loop.run_until_complete(asyncio.async(vm.close()))
# Raise an exception if the port is not free
port_manager.reserve_tcp_port(console_port, vm.project)
# Raise an exception if the port is not free
port_manager.reserve_tcp_port(monitor_port, vm.project)
assert vm.is_running() is False
@ -226,7 +223,6 @@ def test_json(vm, project):
def test_control_vm(vm, loop):
vm._process = MagicMock()
vm._monitor = 4242
reader = MagicMock()
writer = MagicMock()
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
@ -238,7 +234,6 @@ def test_control_vm(vm, loop):
def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
vm._process = running_subprocess_mock
vm._monitor = 4242
reader = MagicMock()
writer = MagicMock()
with asyncio_patch("asyncio.open_connection", return_value=(reader, writer)) as open_connect:
@ -247,6 +242,7 @@ def test_control_vm_expect_text(vm, loop, running_subprocess_mock):
future.set_result(b"epic product")
reader.readline.return_value = future
vm._monitor = 4242
res = loop.run_until_complete(asyncio.async(vm._control_vm("test", [b"epic"])))
assert writer.write.called_with("test")
@ -269,8 +265,6 @@ def test_build_command(vm, loop, fake_qemu_binary, port_manager):
os.path.join(vm.working_dir, "flash.qcow2"),
"-serial",
"telnet:127.0.0.1:{},server,nowait".format(vm.console),
"-monitor",
"tcp:127.0.0.1:{},server,nowait".format(vm.monitor),
"-device",
"e1000,mac=00:00:ab:7e:b5:00,netdev=gns3-0",
"-netdev",