mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-24 09:18:08 +00:00
Use uBridge for VirtualBox connections plus some cleaning. Ref #267.
This commit is contained in:
parent
d28fad223f
commit
d79fb231d3
@ -338,7 +338,7 @@ class BaseManager:
|
||||
|
||||
return False
|
||||
|
||||
def create_nio(self, executable, nio_settings):
|
||||
def create_nio(self, nio_settings):
|
||||
"""
|
||||
Creates a new NIO.
|
||||
|
||||
|
@ -544,8 +544,8 @@ class BaseNode:
|
||||
rport = m.get_free_udp_port(self.project)
|
||||
source_nio_settings = {'lport': lport, 'rhost': '127.0.0.1', 'rport': rport, 'type': 'nio_udp'}
|
||||
destination_nio_settings = {'lport': rport, 'rhost': '127.0.0.1', 'rport': lport, 'type': 'nio_udp'}
|
||||
source_nio = self.manager.create_nio(self.ubridge_path, source_nio_settings)
|
||||
destination_nio = self.manager.create_nio(self.ubridge_path, destination_nio_settings)
|
||||
source_nio = self.manager.create_nio(source_nio_settings)
|
||||
destination_nio = self.manager.create_nio(destination_nio_settings)
|
||||
log.info("{module}: '{name}' [{id}]:local UDP tunnel created between port {port1} and {port2}".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
|
@ -1096,13 +1096,12 @@ class QemuVM(BaseNode):
|
||||
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
if self.is_running():
|
||||
if self.ubridge:
|
||||
yield from self._add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
|
||||
self._local_udp_tunnels[adapter_number][1],
|
||||
nio)
|
||||
else:
|
||||
raise QemuError("Sorry, adding a link to a started Qemu VM is not supported without using uBridge.")
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
|
||||
self._local_udp_tunnels[adapter_number][1],
|
||||
nio)
|
||||
elif self.is_running():
|
||||
raise QemuError("Sorry, adding a link to a started Qemu VM is not supported without using uBridge.")
|
||||
|
||||
adapter.add_nio(0, nio)
|
||||
log.info('QEMU VM "{name}" [{id}]: {nio} added to adapter {adapter_number}'.format(name=self._name,
|
||||
@ -1126,11 +1125,10 @@ class QemuVM(BaseNode):
|
||||
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
if self.is_running():
|
||||
if self.ubridge:
|
||||
yield from self._ubridge_send("bridge delete {name}".format(name="QEMU-{}-{}".format(self._id, adapter_number)))
|
||||
else:
|
||||
raise QemuError("Sorry, removing a link to a started Qemu VM is not supported without using uBridge.")
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send("bridge delete {name}".format(name="QEMU-{}-{}".format(self._id, adapter_number)))
|
||||
elif self.is_running():
|
||||
raise QemuError("Sorry, removing a link to a started Qemu VM is not supported without using uBridge.")
|
||||
|
||||
nio = adapter.get_nio(0)
|
||||
if isinstance(nio, NIOUDP):
|
||||
@ -1171,7 +1169,7 @@ class QemuVM(BaseNode):
|
||||
|
||||
nio.startPacketCapture(output_file)
|
||||
|
||||
if self.is_running() and self.ubridge:
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="QEMU-{}-{}".format(self._id, adapter_number),
|
||||
output_file=output_file))
|
||||
|
||||
@ -1198,7 +1196,7 @@ class QemuVM(BaseNode):
|
||||
|
||||
nio.stopPacketCapture()
|
||||
|
||||
if self.is_running() and self.ubridge:
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send('bridge stop_capture {name}'.format(name="QEMU-{}-{}".format(self._id, adapter_number)))
|
||||
|
||||
log.info("QEMU VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name,
|
||||
|
@ -59,6 +59,7 @@ class VirtualBoxVM(BaseNode):
|
||||
self._system_properties = {}
|
||||
self._telnet_server_thread = None
|
||||
self._serial_pipe = None
|
||||
self._local_udp_tunnels = {}
|
||||
|
||||
# VirtualBox settings
|
||||
self._adapters = adapters
|
||||
@ -217,6 +218,15 @@ class VirtualBoxVM(BaseNode):
|
||||
# add a guest property to let the VM know about the GNS3 project directory
|
||||
yield from self.manager.execute("guestproperty", ["set", self._vmname, "ProjectDirInGNS3", self.working_dir])
|
||||
|
||||
if self.use_ubridge:
|
||||
yield from self._start_ubridge()
|
||||
for adapter_number in range(0, self._adapters):
|
||||
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||
if nio:
|
||||
yield from self._add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
|
||||
self._local_udp_tunnels[adapter_number][1],
|
||||
nio)
|
||||
|
||||
if self._enable_remote_console and self._console is not None:
|
||||
try:
|
||||
# wait for VirtualBox to create the pipe file.
|
||||
@ -238,6 +248,7 @@ class VirtualBoxVM(BaseNode):
|
||||
"""
|
||||
|
||||
self._hw_virtualization = False
|
||||
yield from self._stop_ubridge()
|
||||
self._stop_remote_console()
|
||||
vm_state = yield from self._get_vm_state()
|
||||
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
|
||||
@ -426,6 +437,11 @@ class VirtualBoxVM(BaseNode):
|
||||
if nio and isinstance(nio, NIOUDP):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
|
||||
for udp_tunnel in self._local_udp_tunnels.values():
|
||||
self.manager.port_manager.release_udp_port(udp_tunnel[0].lport, self._project)
|
||||
self.manager.port_manager.release_udp_port(udp_tunnel[1].lport, self._project)
|
||||
self._local_udp_tunnels = {}
|
||||
|
||||
self.acpi_shutdown = False
|
||||
yield from self.stop()
|
||||
|
||||
@ -774,7 +790,15 @@ class VirtualBoxVM(BaseNode):
|
||||
# set the backend to null to avoid a difference in the number of interfaces in the Guest.
|
||||
yield from self._modify_vm("--nic{} null".format(adapter_number + 1))
|
||||
yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
|
||||
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||
|
||||
if self.use_ubridge:
|
||||
# use a local UDP tunnel to connect to uBridge instead
|
||||
if adapter_number not in self._local_udp_tunnels:
|
||||
self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel()
|
||||
nio = self._local_udp_tunnels[adapter_number][0]
|
||||
else:
|
||||
nio = self._ethernet_adapters[adapter_number].get_nio(0)
|
||||
|
||||
if nio:
|
||||
if not self._use_any_adapter and attachment not in ("none", "null", "generic"):
|
||||
raise VirtualBoxError("Attachment ({}) already configured on adapter {}. "
|
||||
@ -916,22 +940,27 @@ class VirtualBoxVM(BaseNode):
|
||||
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
vm_state = yield from self._get_vm_state()
|
||||
if vm_state == "running":
|
||||
if isinstance(nio, NIOUDP):
|
||||
# dynamically configure an UDP tunnel on the VirtualBox adapter
|
||||
yield from self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1))
|
||||
yield from self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, nio.lport))
|
||||
yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, nio.rhost))
|
||||
yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, nio.rport))
|
||||
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
|
||||
self._local_udp_tunnels[adapter_number][1],
|
||||
nio)
|
||||
else:
|
||||
vm_state = yield from self._get_vm_state()
|
||||
if vm_state == "running":
|
||||
if isinstance(nio, NIOUDP):
|
||||
# dynamically configure an UDP tunnel on the VirtualBox adapter
|
||||
yield from self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1))
|
||||
yield from self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, nio.lport))
|
||||
yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, nio.rhost))
|
||||
yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, nio.rport))
|
||||
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
||||
|
||||
# check if the UDP tunnel has been correctly set
|
||||
vm_info = yield from self._get_vm_info()
|
||||
generic_driver_number = "generic{}".format(adapter_number + 1)
|
||||
if generic_driver_number not in vm_info and vm_info[generic_driver_number] != "UDPTunnel":
|
||||
log.warning("UDP tunnel has not been set on nic: {}".format(adapter_number + 1))
|
||||
self.project.emit("log.warning", {"message": "UDP tunnel has not been set on nic: {}".format(adapter_number + 1)})
|
||||
# check if the UDP tunnel has been correctly set
|
||||
vm_info = yield from self._get_vm_info()
|
||||
generic_driver_number = "generic{}".format(adapter_number + 1)
|
||||
if generic_driver_number not in vm_info and vm_info[generic_driver_number] != "UDPTunnel":
|
||||
log.warning("UDP tunnel has not been set on nic: {}".format(adapter_number + 1))
|
||||
self.project.emit("log.warning", {"message": "UDP tunnel has not been set on nic: {}".format(adapter_number + 1)})
|
||||
|
||||
adapter.add_nio(0, nio)
|
||||
log.info("VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name,
|
||||
@ -955,11 +984,14 @@ class VirtualBoxVM(BaseNode):
|
||||
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
vm_state = yield from self._get_vm_state()
|
||||
if vm_state == "running":
|
||||
# dynamically disable the VirtualBox adapter
|
||||
yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1))
|
||||
yield from self._control_vm("nic{} null".format(adapter_number + 1))
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send("bridge delete {name}".format(name="VBOX-{}-{}".format(self._id, adapter_number)))
|
||||
else:
|
||||
vm_state = yield from self._get_vm_state()
|
||||
if vm_state == "running":
|
||||
# dynamically disable the VirtualBox adapter
|
||||
yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1))
|
||||
yield from self._control_vm("nic{} null".format(adapter_number + 1))
|
||||
|
||||
nio = adapter.get_nio(0)
|
||||
if isinstance(nio, NIOUDP):
|
||||
@ -987,9 +1019,10 @@ class VirtualBoxVM(BaseNode):
|
||||
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
vm_state = yield from self._get_vm_state()
|
||||
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
|
||||
raise VirtualBoxError("Sorry, packet capturing on a started VirtualBox VM is not supported.")
|
||||
if not self.use_ubridge:
|
||||
vm_state = yield from self._get_vm_state()
|
||||
if vm_state == "running" or vm_state == "paused" or vm_state == "stuck":
|
||||
raise VirtualBoxError("Sorry, packet capturing on a started VirtualBox VM is not supported without using uBridge")
|
||||
|
||||
nio = adapter.get_nio(0)
|
||||
|
||||
@ -1000,6 +1033,11 @@ class VirtualBoxVM(BaseNode):
|
||||
raise VirtualBoxError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
|
||||
|
||||
nio.startPacketCapture(output_file)
|
||||
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VBOX-{}-{}".format(self._id, adapter_number),
|
||||
output_file=output_file))
|
||||
|
||||
log.info("VirtualBox VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name,
|
||||
id=self.id,
|
||||
adapter_number=adapter_number))
|
||||
@ -1024,6 +1062,9 @@ class VirtualBoxVM(BaseNode):
|
||||
|
||||
nio.stopPacketCapture()
|
||||
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send('bridge stop_capture {name}'.format(name="VBOX-{}-{}".format(self._id, adapter_number)))
|
||||
|
||||
log.info("VirtualBox VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name,
|
||||
id=self.id,
|
||||
adapter_number=adapter_number))
|
||||
|
@ -370,13 +370,16 @@ class VPCSVM(BaseNode):
|
||||
raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter,
|
||||
port_number=port_number))
|
||||
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
|
||||
elif self.is_running():
|
||||
raise VPCSError("Sorry, adding a link to a started VPCS instance is not supported without using uBridge.")
|
||||
|
||||
self._ethernet_adapter.add_nio(port_number, nio)
|
||||
log.info('VPCS "{name}" [{id}]: {nio} added to port {port_number}'.format(name=self._name,
|
||||
id=self.id,
|
||||
nio=nio,
|
||||
port_number=port_number))
|
||||
if self._started and self.ubridge:
|
||||
yield from self._add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
|
||||
|
||||
return nio
|
||||
|
||||
@ -394,14 +397,16 @@ class VPCSVM(BaseNode):
|
||||
raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter,
|
||||
port_number=port_number))
|
||||
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send("bridge delete {name}".format(name="VPCS-{}".format(self._id)))
|
||||
elif self.is_running():
|
||||
raise VPCSError("Sorry, adding a link to a started VPCS instance is not supported without using uBridge.")
|
||||
|
||||
nio = self._ethernet_adapter.get_nio(port_number)
|
||||
if isinstance(nio, NIOUDP):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
self._ethernet_adapter.remove_nio(port_number)
|
||||
|
||||
if self._started and self.ubridge:
|
||||
yield from self._ubridge_send("bridge delete {name}".format(name="VPCS-{}".format(self._id)))
|
||||
|
||||
log.info('VPCS "{name}" [{id}]: {nio} removed from port {port_number}'.format(name=self._name,
|
||||
id=self.id,
|
||||
nio=nio,
|
||||
@ -434,7 +439,7 @@ class VPCSVM(BaseNode):
|
||||
|
||||
nio.startPacketCapture(output_file)
|
||||
|
||||
if self._started and self.ubridge:
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VPCS-{}".format(self._id),
|
||||
output_file=output_file))
|
||||
|
||||
@ -461,7 +466,7 @@ class VPCSVM(BaseNode):
|
||||
|
||||
nio.stopPacketCapture()
|
||||
|
||||
if self._started and self.ubridge:
|
||||
if self.ubridge and self.ubridge.is_running():
|
||||
yield from self._ubridge_send('bridge stop_capture {name}'.format(name="VPCS-{}".format(self._id)))
|
||||
|
||||
log.info("VPCS '{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name,
|
||||
|
@ -192,7 +192,7 @@ class CloudHandler:
|
||||
|
||||
builtin_manager = Builtin.instance()
|
||||
node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
nio = builtin_manager.create_nio(node, request.json)
|
||||
nio = builtin_manager.create_nio(request.json)
|
||||
port_number = int(request.match_info["port_number"])
|
||||
yield from node.add_nio(nio, port_number)
|
||||
response.set_status(201)
|
||||
|
@ -200,7 +200,7 @@ class DockerHandler:
|
||||
nio_type = request.json["type"]
|
||||
if nio_type != "nio_udp":
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = docker_manager.create_nio(int(request.match_info["adapter_number"]), request.json)
|
||||
nio = docker_manager.create_nio(request.json)
|
||||
yield from container.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -217,7 +217,7 @@ class EthernetHubHandler:
|
||||
|
||||
#builtin_manager = Builtin.instance()
|
||||
#node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
#nio = yield from builtin_manager.create_nio(node, request.json["nio"])
|
||||
#nio = yield from builtin_manager.create_nio(request.json["nio"])
|
||||
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -218,7 +218,7 @@ class EthernetSwitchHandler:
|
||||
|
||||
#builtin_manager = Builtin.instance()
|
||||
#node = builtin_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
|
||||
#nio = yield from builtin_manager.create_nio(node, request.json["nio"])
|
||||
#nio = yield from builtin_manager.create_nio(request.json["nio"])
|
||||
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -222,7 +222,7 @@ class IOUHandler:
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap", "nio_ethernet", "nio_generic_ethernet"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = iou_manager.create_nio(vm.iouyap_path, request.json)
|
||||
nio = iou_manager.create_nio(request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), int(request.match_info["port_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -266,7 +266,7 @@ class QEMUHandler:
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap", "nio_nat"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = qemu_manager.create_nio(vm.qemu_path, request.json)
|
||||
nio = qemu_manager.create_nio(request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -282,7 +282,7 @@ class VirtualBoxHandler:
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_nat"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = vbox_manager.create_nio(vbox_manager.vboxmanage_path, request.json)
|
||||
nio = vbox_manager.create_nio(request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -255,7 +255,7 @@ class VMwareHandler:
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_vmnet", "nio_nat"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = vmware_manager.create_nio(None, request.json)
|
||||
nio = vmware_manager.create_nio(request.json)
|
||||
yield from vm.adapter_add_nio_binding(int(request.match_info["adapter_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -201,7 +201,7 @@ class VPCSHandler:
|
||||
nio_type = request.json["type"]
|
||||
if nio_type not in ("nio_udp", "nio_tap"):
|
||||
raise HTTPConflict(text="NIO of type {} is not supported".format(nio_type))
|
||||
nio = vpcs_manager.create_nio(vm.vpcs_path, request.json)
|
||||
nio = vpcs_manager.create_nio(request.json)
|
||||
yield from vm.port_add_nio_binding(int(request.match_info["port_number"]), nio)
|
||||
response.set_status(201)
|
||||
response.json(nio)
|
||||
|
@ -411,7 +411,7 @@ def test_start(loop, vm, manager, free_console_port):
|
||||
vm._add_ubridge_connection = AsyncioMagicMock()
|
||||
vm._start_console = AsyncioMagicMock()
|
||||
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
|
||||
with asyncio_patch("gns3server.compute.docker.Docker.query") as mock_query:
|
||||
@ -430,7 +430,7 @@ def test_start_namespace_failed(loop, vm, manager, free_console_port):
|
||||
assert vm.status != "started"
|
||||
vm.adapters = 1
|
||||
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
|
||||
with asyncio_patch("gns3server.compute.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
@ -641,7 +641,7 @@ def test_close(loop, vm, port_manager):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
|
||||
with asyncio_patch("gns3server.compute.docker.DockerVM._get_container_state", return_value="stopped"):
|
||||
@ -685,7 +685,7 @@ def test_add_ubridge_connection(loop, vm):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
nio.startPacketCapture("/tmp/capture.pcap")
|
||||
vm._ubridge_hypervisor = MagicMock()
|
||||
|
||||
@ -725,7 +725,7 @@ def test_add_ubridge_connection_invalid_adapter_number(loop, vm):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
with pytest.raises(DockerError):
|
||||
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 12, 42)))
|
||||
|
||||
@ -736,7 +736,7 @@ def test_add_ubridge_connection_no_free_interface(loop, vm):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
with pytest.raises(DockerError):
|
||||
|
||||
# We create fake ethernet interfaces for docker
|
||||
@ -753,7 +753,7 @@ def test_delete_ubridge_connection(loop, vm):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
|
||||
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0, 42)))
|
||||
loop.run_until_complete(asyncio.async(vm._delete_ubridge_connection(0)))
|
||||
@ -770,7 +770,7 @@ def test_adapter_add_nio_binding(vm, loop):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
assert vm._ethernet_adapters[0].get_nio(0) == nio
|
||||
|
||||
@ -780,7 +780,7 @@ def test_adapter_add_nio_binding_invalid_adapter(vm, loop):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
with pytest.raises(DockerError):
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(12, nio)))
|
||||
|
||||
@ -790,7 +790,7 @@ def test_adapter_remove_nio_binding(vm, loop):
|
||||
"lport": 4242,
|
||||
"rport": 4343,
|
||||
"rhost": "127.0.0.1"}
|
||||
nio = vm.manager.create_nio(0, nio)
|
||||
nio = vm.manager.create_nio(nio)
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
with asyncio_patch("gns3server.compute.docker.DockerVM._delete_ubridge_connection") as delete_ubridge_mock:
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
|
||||
@ -833,7 +833,7 @@ def test_pull_image(loop, vm):
|
||||
def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
loop.run_until_complete(asyncio.async(vm.start_capture(0, output_file)))
|
||||
assert vm._ethernet_adapters[0].get_nio(0).capturing
|
||||
@ -842,7 +842,7 @@ def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
def test_stop_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
loop.run_until_complete(vm.start_capture(0, output_file))
|
||||
assert vm._ethernet_adapters[0].get_nio(0).capturing
|
||||
|
@ -325,7 +325,7 @@ def test_enable_l1_keepalives(loop, vm):
|
||||
def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
||||
loop.run_until_complete(asyncio.async(vm.start_capture(0, 0, output_file)))
|
||||
assert vm._adapters[0].get_nio(0).capturing
|
||||
@ -334,7 +334,7 @@ def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
def test_stop_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(vm.iouyap_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, 0, nio)))
|
||||
loop.run_until_complete(vm.start_capture(0, 0, output_file))
|
||||
assert vm._adapters[0].get_nio(0).capturing
|
||||
|
@ -128,7 +128,7 @@ def test_stop(loop, vm, running_subprocess_mock):
|
||||
process.wait.return_value = future
|
||||
|
||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.adapter_add_nio_binding(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
assert vm.is_running()
|
||||
@ -192,7 +192,7 @@ def test_suspend(loop, vm):
|
||||
|
||||
|
||||
def test_add_nio_binding_udp(vm, loop):
|
||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
assert nio.lport == 4242
|
||||
|
||||
@ -200,13 +200,13 @@ def test_add_nio_binding_udp(vm, loop):
|
||||
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
|
||||
def test_add_nio_binding_ethernet(vm, loop, ethernet_device):
|
||||
with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True):
|
||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_ethernet", "ethernet_device": ethernet_device})
|
||||
nio = Qemu.instance().create_nio({"type": "nio_ethernet", "ethernet_device": ethernet_device})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
assert nio.ethernet_device == ethernet_device
|
||||
|
||||
|
||||
def test_port_remove_nio_binding(vm, loop):
|
||||
nio = Qemu.instance().create_nio(vm.qemu_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
|
||||
assert vm._ethernet_adapters[0].ports[0] is None
|
||||
|
@ -57,7 +57,7 @@ def test_vm_invalid_virtualbox_api_version(loop, project, manager):
|
||||
|
||||
|
||||
def test_vm_adapter_add_nio_binding_adapter_not_exist(loop, vm, manager, free_console_port):
|
||||
nio = manager.create_nio(manager.vboxmanage_path, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "192.168.1.2"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "192.168.1.2"})
|
||||
with pytest.raises(VirtualBoxError):
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(15, nio)))
|
||||
|
||||
|
@ -54,7 +54,7 @@ def test_json(vm, tmpdir, project):
|
||||
def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
vm.adapters = 1
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
loop.run_until_complete(asyncio.async(vm.start_capture(0, output_file)))
|
||||
@ -64,7 +64,7 @@ def test_start_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
def test_stop_capture(vm, tmpdir, manager, free_console_port, loop):
|
||||
|
||||
output_file = str(tmpdir / "test.pcap")
|
||||
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
|
||||
vm.adapters = 1
|
||||
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
|
||||
loop.run_until_complete(vm.start_capture(0, output_file))
|
||||
|
@ -67,7 +67,7 @@ def test_vm_check_vpcs_version_0_6_1(loop, vm, manager):
|
||||
def test_vm_invalid_vpcs_version(loop, manager, vm):
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.subprocess_check_output", return_value="Welcome to Virtual PC Simulator, version 0.1"):
|
||||
with pytest.raises(VPCSError):
|
||||
nio = manager.create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm._check_vpcs_version()))
|
||||
assert vm.name == "test"
|
||||
@ -77,7 +77,7 @@ def test_vm_invalid_vpcs_version(loop, manager, vm):
|
||||
def test_vm_invalid_vpcs_path(vm, manager, loop):
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.vpcs_path", return_value="/tmp/fake/path/vpcs"):
|
||||
with pytest.raises(VPCSError):
|
||||
nio = manager.create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = manager.create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
assert vm.name == "test"
|
||||
@ -93,7 +93,7 @@ def test_start(loop, vm, async_run):
|
||||
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
|
||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec:
|
||||
nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
assert mock_exec.call_args[0] == (vm.vpcs_path,
|
||||
@ -128,7 +128,7 @@ def test_start_0_6_1(loop, vm):
|
||||
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
|
||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec:
|
||||
nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
assert mock_exec.call_args[0] == (vm.vpcs_path,
|
||||
@ -159,7 +159,7 @@ def test_stop(loop, vm, async_run):
|
||||
with NotificationManager.instance().queue() as queue:
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
|
||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
||||
nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
@ -193,7 +193,7 @@ def test_reload(loop, vm):
|
||||
|
||||
with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True):
|
||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
||||
nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
loop.run_until_complete(asyncio.async(vm.start()))
|
||||
assert vm.is_running()
|
||||
@ -209,7 +209,7 @@ def test_reload(loop, vm):
|
||||
|
||||
|
||||
def test_add_nio_binding_udp(vm):
|
||||
nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
assert nio.lport == 4242
|
||||
|
||||
@ -217,7 +217,7 @@ def test_add_nio_binding_udp(vm):
|
||||
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
|
||||
def test_add_nio_binding_tap(vm, ethernet_device):
|
||||
with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True):
|
||||
nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_tap", "tap_device": ethernet_device})
|
||||
nio = VPCS.instance().create_nio({"type": "nio_tap", "tap_device": ethernet_device})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
assert nio.tap_device == ethernet_device
|
||||
|
||||
@ -225,13 +225,13 @@ def test_add_nio_binding_tap(vm, ethernet_device):
|
||||
# def test_add_nio_binding_tap_no_privileged_access(vm):
|
||||
# with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=False):
|
||||
# with pytest.raises(aiohttp.web.HTTPForbidden):
|
||||
# nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_tap", "tap_device": "test"})
|
||||
# nio = VPCS.instance().create_nio({"type": "nio_tap", "tap_device": "test"})
|
||||
# vm.port_add_nio_binding(0, nio)
|
||||
# assert vm._ethernet_adapter.ports[0] is None
|
||||
#
|
||||
|
||||
def test_port_remove_nio_binding(vm):
|
||||
nio = VPCS.instance().create_nio(vm.vpcs_path, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||
vm.port_add_nio_binding(0, nio)
|
||||
vm.port_remove_nio_binding(0)
|
||||
assert vm._ethernet_adapter.ports[0] is None
|
||||
|
Loading…
Reference in New Issue
Block a user