diff --git a/gns3server/compute/base_node.py b/gns3server/compute/base_node.py index 56444f43..0021d3cb 100644 --- a/gns3server/compute/base_node.py +++ b/gns3server/compute/base_node.py @@ -77,10 +77,6 @@ class BaseNode: self._wrap_console = wrap_console self._wrapper_telnet_server = None - # check if the node will use uBridge or not - server_config = Config.instance().get_section_config("Server") - self._use_ubridge = server_config.getboolean("use_ubridge") - if self._console is not None: if console_type == "vnc": self._console = self._manager.port_manager.reserve_tcp_port(self._console, self._project, port_range_start=5900, port_range_end=6000) @@ -460,16 +456,6 @@ class BaseNode: id=self.id, console_type=console_type)) - @property - def use_ubridge(self): - """ - Returns if uBridge is used for this node or not - - :returns: boolean - """ - - return self._use_ubridge - @property def ubridge(self): """ @@ -591,6 +577,10 @@ class BaseNode: yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters) @asyncio.coroutine + def update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): + if destination_nio: + yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters) + def ubridge_delete_bridge(self, name): """ :params name: Delete the bridge with this name @@ -598,10 +588,6 @@ class BaseNode: if self.ubridge: yield from self._ubridge_send("bridge delete {name}".format(name=name)) - @asyncio.coroutine - def update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): - yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters) - @asyncio.coroutine def _ubridge_apply_filters(self, bridge_name, filters): """ diff --git a/gns3server/compute/qemu/qemu_vm.py b/gns3server/compute/qemu/qemu_vm.py index 136929a6..117a340e 100644 --- a/gns3server/compute/qemu/qemu_vm.py +++ b/gns3server/compute/qemu/qemu_vm.py @@ -907,14 +907,13 @@ class QemuVM(BaseNode): stderr=subprocess.STDOUT, cwd=self.working_dir) - if self.use_ubridge: - yield from self._start_ubridge() - for adapter_number, adapter in enumerate(self._ethernet_adapters): - nio = adapter.get_nio(0) - if nio: - yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), - self._local_udp_tunnels[adapter_number][1], - nio) + yield from self._start_ubridge() + for adapter_number, adapter in enumerate(self._ethernet_adapters): + nio = adapter.get_nio(0) + if nio: + yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), + self._local_udp_tunnels[adapter_number][1], + nio) log.info('QEMU VM "{}" started PID={}'.format(self._name, self._process.pid)) self.status = "started" @@ -1131,16 +1130,14 @@ class QemuVM(BaseNode): raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name, adapter_number=adapter_number)) - if self.ubridge: + if self.is_running(): try: yield from self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number), - self._local_udp_tunnels[adapter_number][1], - nio) + self._local_udp_tunnels[adapter_number][1], + nio) except IndexError: raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name, adapter_number=adapter_number)) - elif self.is_running(): - raise QemuError("Sorry, adding a link to a started Qemu VM is not supported without using uBridge.") adapter.add_nio(0, nio) log.info('QEMU VM "{name}" [{id}]: {nio} added to adapter {adapter_number}'.format(name=self._name, @@ -1164,10 +1161,8 @@ class QemuVM(BaseNode): raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name, adapter_number=adapter_number)) - if self.ubridge: + if self.is_running(): yield from self._ubridge_send("bridge delete {name}".format(name="QEMU-{}-{}".format(self._id, adapter_number))) - elif self.is_running(): - raise QemuError("Sorry, removing a link to a started Qemu VM is not supported without using uBridge.") nio = adapter.get_nio(0) if isinstance(nio, NIOUDP): @@ -1195,9 +1190,6 @@ class QemuVM(BaseNode): raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name, adapter_number=adapter_number)) - if not self.use_ubridge: - raise QemuError("uBridge must be enabled in order to start packet capture") - nio = adapter.get_nio(0) if not nio: @@ -1476,13 +1468,10 @@ class QemuVM(BaseNode): for adapter_number, adapter in enumerate(self._ethernet_adapters): mac = int_to_macaddress(macaddress_to_int(self._mac_address) + adapter_number) - if self.use_ubridge: - # use a local UDP tunnel to connect to uBridge instead - if adapter_number not in self._local_udp_tunnels: - self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel() - nio = self._local_udp_tunnels[adapter_number][0] - else: - nio = adapter.get_nio(0) + # use a local UDP tunnel to connect to uBridge instead + if adapter_number not in self._local_udp_tunnels: + self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel() + nio = self._local_udp_tunnels[adapter_number][0] if self._legacy_networking: # legacy QEMU networking syntax (-net) if nio: diff --git a/gns3server/compute/virtualbox/virtualbox_vm.py b/gns3server/compute/virtualbox/virtualbox_vm.py index f4dcd829..5b114fa5 100644 --- a/gns3server/compute/virtualbox/virtualbox_vm.py +++ b/gns3server/compute/virtualbox/virtualbox_vm.py @@ -26,7 +26,6 @@ import json import uuid import shlex import shutil -import socket import asyncio import tempfile import xml.etree.ElementTree as ET @@ -280,14 +279,13 @@ class VirtualBoxVM(BaseNode): # add a guest property to let the VM know about the GNS3 project directory yield from self.manager.execute("guestproperty", ["set", self._vmname, "ProjectDirInGNS3", self.working_dir]) - if self.use_ubridge: - yield from self._start_ubridge() - for adapter_number in range(0, self._adapters): - nio = self._ethernet_adapters[adapter_number].get_nio(0) - if nio: - yield from self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), - self._local_udp_tunnels[adapter_number][1], - nio) + yield from self._start_ubridge() + for adapter_number in range(0, self._adapters): + nio = self._ethernet_adapters[adapter_number].get_nio(0) + if nio: + yield from self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), + self._local_udp_tunnels[adapter_number][1], + nio) yield from self._start_console() @@ -837,13 +835,10 @@ class VirtualBoxVM(BaseNode): yield from self._modify_vm("--nic{} null".format(adapter_number + 1)) yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) - if self.use_ubridge: - # use a local UDP tunnel to connect to uBridge instead - if adapter_number not in self._local_udp_tunnels: - self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel() - nio = self._local_udp_tunnels[adapter_number][0] - else: - nio = self._ethernet_adapters[adapter_number].get_nio(0) + # use a local UDP tunnel to connect to uBridge instead + if adapter_number not in self._local_udp_tunnels: + self._local_udp_tunnels[adapter_number] = self._create_local_udp_tunnel() + nio = self._local_udp_tunnels[adapter_number][0] if nio: if not self._use_any_adapter and attachment not in ("none", "null", "generic"): @@ -881,7 +876,7 @@ class VirtualBoxVM(BaseNode): yield from self._modify_vm("--nictrace{} on".format(adapter_number + 1)) yield from self._modify_vm('--nictracefile{} "{}"'.format(adapter_number + 1, nio.pcap_output_file)) - if self.use_ubridge and not self._ethernet_adapters[adapter_number].get_nio(0): + if not self._ethernet_adapters[adapter_number].get_nio(0): yield from self._modify_vm("--cableconnected{} off".format(adapter_number + 1)) for adapter_number in range(self._adapters, self._maximum_adapters): @@ -972,32 +967,15 @@ class VirtualBoxVM(BaseNode): raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name, adapter_number=adapter_number)) - if self.ubridge: + if self.is_running(): try: yield from self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number), - self._local_udp_tunnels[adapter_number][1], - nio) + self._local_udp_tunnels[adapter_number][1], + nio) except KeyError: raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name, adapter_number=adapter_number)) yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1)) - else: - vm_state = yield from self._get_vm_state() - if vm_state == "running": - if isinstance(nio, NIOUDP): - # dynamically configure an UDP tunnel on the VirtualBox adapter - yield from self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1)) - yield from self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, nio.lport)) - yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, nio.rhost)) - yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, nio.rport)) - yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1)) - - # check if the UDP tunnel has been correctly set - vm_info = yield from self._get_vm_info() - generic_driver_number = "generic{}".format(adapter_number + 1) - if generic_driver_number not in vm_info and vm_info[generic_driver_number] != "UDPTunnel": - log.warning("UDP tunnel has not been set on nic: {}".format(adapter_number + 1)) - self.project.emit("log.warning", {"message": "UDP tunnel has not been set on nic: {}".format(adapter_number + 1)}) adapter.add_nio(0, nio) log.info("VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name, @@ -1021,17 +999,11 @@ class VirtualBoxVM(BaseNode): raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name, adapter_number=adapter_number)) - if self.ubridge: + if self.is_running(): yield from self._ubridge_send("bridge delete {name}".format(name="VBOX-{}-{}".format(self._id, adapter_number))) - vm_state = yield from self._get_vm_state() - if vm_state == "running": - yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1)) - else: - vm_state = yield from self._get_vm_state() - if vm_state == "running": - # dynamically disable the VirtualBox adapter - yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1)) - yield from self._control_vm("nic{} null".format(adapter_number + 1)) + vm_state = yield from self._get_vm_state() + if vm_state == "running": + yield from self._control_vm("setlinkstate{} off".format(adapter_number + 1)) nio = adapter.get_nio(0) if isinstance(nio, NIOUDP): @@ -1044,6 +1016,12 @@ class VirtualBoxVM(BaseNode): adapter_number=adapter_number)) return nio + def is_running(self): + """ + :returns: True if the vm is not stopped + """ + return self.ubridge is not None + @asyncio.coroutine def start_capture(self, adapter_number, output_file): """ @@ -1059,11 +1037,6 @@ class VirtualBoxVM(BaseNode): raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name, adapter_number=adapter_number)) - if not self.use_ubridge: - vm_state = yield from self._get_vm_state() - if vm_state == "running" or vm_state == "paused" or vm_state == "stuck": - raise VirtualBoxError("Sorry, packet capturing on a started VirtualBox VM is not supported without using uBridge") - nio = adapter.get_nio(0) if not nio: diff --git a/gns3server/compute/vpcs/vpcs_vm.py b/gns3server/compute/vpcs/vpcs_vm.py index f45d8985..9633d716 100644 --- a/gns3server/compute/vpcs/vpcs_vm.py +++ b/gns3server/compute/vpcs/vpcs_vm.py @@ -241,9 +241,6 @@ class VPCSVM(BaseNode): yield from self._check_requirements() if not self.is_running(): nio = self._ethernet_adapter.get_nio(0) - if not self.use_ubridge and not nio: - raise VPCSError("This VPCS instance must be connected in order to start") - command = self._build_command() try: log.info("Starting VPCS: {}".format(command)) @@ -261,10 +258,9 @@ class VPCSVM(BaseNode): creationflags=flags) monitor_process(self._process, self._termination_callback) - if self.use_ubridge: - yield from self._start_ubridge() - if nio: - yield from self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) + yield from self._start_ubridge() + if nio: + yield from self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) yield from self.start_wrap_console() @@ -378,10 +374,8 @@ class VPCSVM(BaseNode): raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter, port_number=port_number)) - if self.ubridge: + if self.is_running(): yield from self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) - elif self.is_running(): - raise VPCSError("Sorry, updating a link to a started VPCS instance is not supported without using uBridge.") self._ethernet_adapter.add_nio(port_number, nio) log.info('VPCS "{name}" [{id}]: {nio} added to port {port_number}'.format(name=self._name, @@ -396,10 +390,8 @@ class VPCSVM(BaseNode): if not self._ethernet_adapter.port_exists(port_number): raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter, port_number=port_number)) - if self.ubridge: + if self.is_running(): yield from self.update_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) - elif self.is_running(): - raise VPCSError("Sorry, adding a link to a started VPCS instance is not supported without using uBridge.") @asyncio.coroutine def port_remove_nio_binding(self, port_number): @@ -415,10 +407,8 @@ class VPCSVM(BaseNode): raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter, port_number=port_number)) - if self.ubridge: + if self.is_running(): yield from self._ubridge_send("bridge delete {name}".format(name="VPCS-{}".format(self._id))) - elif self.is_running(): - raise VPCSError("Sorry, adding a link to a started VPCS instance is not supported without using uBridge.") nio = self._ethernet_adapter.get_nio(port_number) if isinstance(nio, NIOUDP): @@ -444,9 +434,6 @@ class VPCSVM(BaseNode): raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter, port_number=port_number)) - if not self.use_ubridge: - raise VPCSError("uBridge must be enabled in order to start packet capture") - nio = self._ethernet_adapter.get_nio(0) if not nio: @@ -537,13 +524,10 @@ class VPCSVM(BaseNode): else: log.warn("The VPCS relay feature could not be disabled because the VPCS version is below 0.8b") - if self.use_ubridge: - # use the local UDP tunnel to uBridge instead - if not self._local_udp_tunnel: - self._local_udp_tunnel = self._create_local_udp_tunnel() - nio = self._local_udp_tunnel[0] - else: - nio = self._ethernet_adapter.get_nio(0) + # use the local UDP tunnel to uBridge instead + if not self._local_udp_tunnel: + self._local_udp_tunnel = self._create_local_udp_tunnel() + nio = self._local_udp_tunnel[0] if nio: if isinstance(nio, NIOUDP): diff --git a/gns3server/run.py b/gns3server/run.py index 635d696e..16cc9bc5 100644 --- a/gns3server/run.py +++ b/gns3server/run.py @@ -121,7 +121,6 @@ def parse_arguments(argv): "certkey": config.get("certkey", ""), "record": config.get("record", ""), "local": config.getboolean("local", False), - "use_ubridge": config.getboolean("use_ubridge", True), # this enables uBridge globally "allow": config.getboolean("allow_remote_console", False), "quiet": config.getboolean("quiet", False), "debug": config.getboolean("debug", False), @@ -137,7 +136,6 @@ def set_config(args): config = Config.instance() server_config = config.get_section_config("Server") server_config["local"] = str(args.local) - server_config["use_ubridge"] = str(args.no_ubridge) server_config["allow_remote_console"] = str(args.allow) server_config["host"] = args.host server_config["port"] = str(args.port) @@ -225,11 +223,6 @@ def run(): if server_config.getboolean("local"): log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem") - if server_config.getboolean("use_ubridge"): - log.info("uBridge will be used to handle node connections") - else: - log.warning("uBridge will NOT be used to handle node connections") - # we only support Python 3 version >= 3.4 if sys.version_info < (3, 4): raise SystemExit("Python 3.4 or higher is required") diff --git a/tests/compute/qemu/test_qemu_vm.py b/tests/compute/qemu/test_qemu_vm.py index d9c190e0..0b4fc227 100644 --- a/tests/compute/qemu/test_qemu_vm.py +++ b/tests/compute/qemu/test_qemu_vm.py @@ -70,6 +70,9 @@ def vm(project, manager, fake_qemu_binary, fake_qemu_img_binary): manager.port_manager.console_host = "127.0.0.1" vm = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager, qemu_path=fake_qemu_binary) vm._process_priority = "normal" # Avoid complexity for Windows tests + vm._start_ubridge = AsyncioMagicMock() + vm._ubridge_hypervisor = MagicMock() + vm._ubridge_hypervisor.is_running.return_value = True return vm @@ -195,21 +198,15 @@ def test_suspend(loop, vm): def test_add_nio_binding_udp(vm, loop): - nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) + nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}}) + assert nio.lport == 4242 loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio))) assert nio.lport == 4242 -@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows") -def test_add_nio_binding_ethernet(vm, loop, ethernet_device): - with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True): - nio = Qemu.instance().create_nio({"type": "nio_ethernet", "ethernet_device": ethernet_device}) - loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio))) - assert nio.ethernet_device == ethernet_device - def test_port_remove_nio_binding(vm, loop): - nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) + nio = Qemu.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}}) loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio))) loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0))) assert vm._ethernet_adapters[0].ports[0] is None @@ -460,6 +457,7 @@ def test_build_command(vm, loop, fake_qemu_binary, port_manager): os.environ["DISPLAY"] = "0:0" with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: cmd = loop.run_until_complete(asyncio.async(vm._build_command())) + nio = vm._local_udp_tunnels[0][0] assert cmd == [ fake_qemu_binary, "-name", @@ -477,7 +475,9 @@ def test_build_command(vm, loop, fake_qemu_binary, port_manager): "-net", "none", "-device", - "e1000,mac={}".format(vm._mac_address) + "e1000,mac={},netdev=gns3-0".format(vm._mac_address), + "-netdev", + "socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport) ] @@ -503,6 +503,7 @@ def test_build_command_kvm(linux_platform, vm, loop, fake_qemu_binary, port_mana os.environ["DISPLAY"] = "0:0" with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: cmd = loop.run_until_complete(asyncio.async(vm._build_command())) + nio = vm._local_udp_tunnels[0][0] assert cmd == [ fake_qemu_binary, "-name", @@ -521,7 +522,9 @@ def test_build_command_kvm(linux_platform, vm, loop, fake_qemu_binary, port_mana "-net", "none", "-device", - "e1000,mac={}".format(vm._mac_address) + "e1000,mac={},netdev=gns3-0".format(vm._mac_address), + "-netdev", + "socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport) ] @@ -534,6 +537,7 @@ def test_build_command_kvm_2_4(linux_platform, vm, loop, fake_qemu_binary, port_ os.environ["DISPLAY"] = "0:0" with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: cmd = loop.run_until_complete(asyncio.async(vm._build_command())) + nio = vm._local_udp_tunnels[0][0] assert cmd == [ fake_qemu_binary, "-name", @@ -554,7 +558,9 @@ def test_build_command_kvm_2_4(linux_platform, vm, loop, fake_qemu_binary, port_ "-net", "none", "-device", - "e1000,mac={}".format(vm._mac_address) + "e1000,mac={},netdev=gns3-0".format(vm._mac_address), + "-netdev", + "socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport) ] @@ -573,6 +579,8 @@ def test_build_command_two_adapters(vm, loop, fake_qemu_binary, port_manager): vm.adapters = 2 with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: cmd = loop.run_until_complete(asyncio.async(vm._build_command())) + nio1 = vm._local_udp_tunnels[0][0] + nio2 = vm._local_udp_tunnels[1][0] assert cmd == [ fake_qemu_binary, "-name", @@ -590,9 +598,13 @@ def test_build_command_two_adapters(vm, loop, fake_qemu_binary, port_manager): "-net", "none", "-device", - "e1000,mac={}".format(vm.mac_address), + "e1000,mac={},netdev=gns3-0".format(vm._mac_address), + "-netdev", + "socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio1.rport, nio1.lport), "-device", - "e1000,mac={}".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)) + "e1000,mac={},netdev=gns3-1".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)), + "-netdev", + "socket,id=gns3-1,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio2.rport, nio2.lport) ] @@ -608,17 +620,17 @@ def test_build_command_two_adapters_mac_address(vm, loop, fake_qemu_binary, port assert mac_0[:8] == "00:00:ab" with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: cmd = loop.run_until_complete(asyncio.async(vm._build_command())) - assert "e1000,mac={}".format(mac_0) in cmd - assert "e1000,mac={}".format(mac_1) in cmd + assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd + assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd vm.mac_address = "00:42:ab:0e:0f:0a" mac_0 = vm._mac_address - mac_1 = int_to_macaddress(macaddress_to_int(vm._mac_address)) + mac_1 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 1) assert mac_0[:8] == "00:42:ab" with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: cmd = loop.run_until_complete(asyncio.async(vm._build_command())) - assert "e1000,mac={}".format(mac_0) in cmd - assert "e1000,mac={}".format(mac_1) in cmd + assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd + assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd def test_build_command_large_number_of_adapters(vm, loop, fake_qemu_binary, port_manager): @@ -638,19 +650,23 @@ def test_build_command_large_number_of_adapters(vm, loop, fake_qemu_binary, port with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: cmd = loop.run_until_complete(asyncio.async(vm._build_command())) - assert "e1000,mac={}".format(mac_0) in cmd - assert "e1000,mac={}".format(mac_1) in cmd + # Count if we have 100 e1000 adapters in the command + assert len([l for l in cmd if "e1000" in l ]) == 100 + assert len(vm._ethernet_adapters) == 100 + + assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd + assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd assert "pci-bridge,id=pci-bridge0,bus=dmi_pci_bridge0,chassis_nr=0x1,addr=0x0,shpc=off" not in cmd assert "pci-bridge,id=pci-bridge1,bus=dmi_pci_bridge1,chassis_nr=0x1,addr=0x1,shpc=off" in cmd assert "pci-bridge,id=pci-bridge2,bus=dmi_pci_bridge2,chassis_nr=0x1,addr=0x2,shpc=off" in cmd assert "i82801b11-bridge,id=dmi_pci_bridge1" in cmd mac_29 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 29) - assert "e1000,mac={},bus=pci-bridge1,addr=0x04".format(mac_29) in cmd + assert "e1000,mac={},bus=pci-bridge1,addr=0x04,netdev=gns3-29".format(mac_29) in cmd mac_30 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 30) - assert "e1000,mac={},bus=pci-bridge1,addr=0x05".format(mac_30) in cmd + assert "e1000,mac={},bus=pci-bridge1,addr=0x05,netdev=gns3-30".format(mac_30) in cmd mac_74 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 74) - assert "e1000,mac={},bus=pci-bridge2,addr=0x11".format(mac_74) in cmd + assert "e1000,mac={},bus=pci-bridge2,addr=0x11,netdev=gns3-74".format(mac_74) in cmd # Qemu < 2.4 doesn't support large number of adapters vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.0.0") diff --git a/tests/compute/vpcs/test_vpcs_vm.py b/tests/compute/vpcs/test_vpcs_vm.py index edeea297..411c3346 100644 --- a/tests/compute/vpcs/test_vpcs_vm.py +++ b/tests/compute/vpcs/test_vpcs_vm.py @@ -43,6 +43,8 @@ def vm(project, manager, ubridge_path): vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager) vm._vpcs_version = parse_version("0.9") vm._start_ubridge = AsyncioMagicMock() + vm._ubridge_hypervisor = MagicMock() + vm._ubridge_hypervisor.is_running.return_value = True return vm @@ -67,7 +69,7 @@ def test_vm_check_vpcs_version_0_6_1(loop, vm, manager): def test_vm_invalid_vpcs_version(loop, manager, vm): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.subprocess_check_output", return_value="Welcome to Virtual PC Simulator, version 0.1"): with pytest.raises(VPCSError): - nio = manager.create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) + nio = manager.create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}}) vm.port_add_nio_binding(0, nio) loop.run_until_complete(asyncio.async(vm._check_vpcs_version())) assert vm.name == "test" @@ -94,8 +96,6 @@ def test_start(loop, vm, async_run): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True): with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec: with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"): - nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) - async_run(vm.port_add_nio_binding(0, nio)) loop.run_until_complete(asyncio.async(vm.start())) assert mock_exec.call_args[0] == (vm._vpcs_path(), '-p', @@ -130,7 +130,7 @@ def test_start_0_6_1(loop, vm, async_run): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"): with asyncio_patch("asyncio.create_subprocess_exec", return_value=process) as mock_exec: - nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) + nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}}) async_run(vm.port_add_nio_binding(0, nio)) async_run(vm.start()) assert mock_exec.call_args[0] == (vm._vpcs_path(), @@ -162,7 +162,7 @@ def test_stop(loop, vm, async_run): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"): with asyncio_patch("asyncio.create_subprocess_exec", return_value=process): - nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) + nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}}) async_run(vm.port_add_nio_binding(0, nio)) async_run(vm.start()) @@ -197,11 +197,12 @@ def test_reload(loop, vm, async_run): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._check_requirements", return_value=True): with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.start_wrap_console"): with asyncio_patch("asyncio.create_subprocess_exec", return_value=process): - nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) + nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}}) async_run(vm.port_add_nio_binding(0, nio)) async_run(vm.start()) assert vm.is_running() + vm._ubridge_send = AsyncioMagicMock() with asyncio_patch("gns3server.utils.asyncio.wait_for_process_termination"): async_run(vm.reload()) assert vm.is_running() is True @@ -213,7 +214,7 @@ def test_reload(loop, vm, async_run): def test_add_nio_binding_udp(vm, async_run): - nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"}) + nio = VPCS.instance().create_nio({"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1", "filters": {}}) async_run(vm.port_add_nio_binding(0, nio)) assert nio.lport == 4242 diff --git a/tests/conftest.py b/tests/conftest.py index 3308e5b1..2fbc9074 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -212,6 +212,7 @@ def run_around_tests(monkeypatch, port_manager, controller, config): config.set("VirtualBox", "vboxmanage_path", tmppath) config.set("VPCS", "vpcs_path", tmppath) config.set("VMware", "vmrun_path", tmppath) + config.set("Dynamips", "dynamips_path", tmppath) # Force turn off KVM because it's not available on CI config.set("Qemu", "enable_kvm", False) diff --git a/tests/controller/test_project_open.py b/tests/controller/test_project_open.py index ac0473d5..87a47cf2 100644 --- a/tests/controller/test_project_open.py +++ b/tests/controller/test_project_open.py @@ -105,7 +105,6 @@ def demo_topology(): "node_id": "64ba8408-afbf-4b66-9cdd-1fd854427478", "node_type": "vpcs", "properties": { - "startup_script": "", }, "symbol": ":/symbols/computer.svg", "width": 65, @@ -129,7 +128,6 @@ def demo_topology(): "node_id": "748bcd89-624a-40eb-a8d3-1d2e85c99b51", "node_type": "vpcs", "properties": { - "startup_script": "", }, "symbol": ":/symbols/computer.svg", "width": 65, @@ -151,7 +149,8 @@ def test_open(controller, tmpdir, demo_topology, async_run, http_server): controller._computes["local"] = Compute("local", controller=controller, host=http_server[0], port=http_server[1]) controller._computes["vm"] = controller._computes["local"] - project = async_run(controller.load_project(str(tmpdir / "demo.gns3"))) + with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.add_ubridge_udp_connection"): + project = async_run(controller.load_project(str(tmpdir / "demo.gns3"))) assert project.status == "opened" assert len(project.computes) == 1 assert len(project.nodes) == 2 diff --git a/tests/handlers/api/compute/test_qemu.py b/tests/handlers/api/compute/test_qemu.py index 41dd385a..2a4348e8 100644 --- a/tests/handlers/api/compute/test_qemu.py +++ b/tests/handlers/api/compute/test_qemu.py @@ -168,8 +168,9 @@ def test_qemu_update(http_compute, vm, free_console_port, project, fake_qemu_vm) def test_qemu_nio_create_udp(http_compute, vm): - http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2}) - response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", + with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.add_ubridge_udp_connection"): + http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2}) + response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", "lport": 4242, "rport": 4343, "rhost": "127.0.0.1"}, @@ -179,22 +180,14 @@ def test_qemu_nio_create_udp(http_compute, vm): assert response.json["type"] == "nio_udp" -def test_qemu_nio_create_ethernet(http_compute, vm): - http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2}) - response = http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_ethernet", - "ethernet_device": "eth0", - }, - example=True) - assert response.status == 409 - - def test_qemu_delete_nio(http_compute, vm): - http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2}) - http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", + with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM._ubridge_send"): + http_compute.put("/projects/{project_id}/qemu/nodes/{node_id}".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"adapters": 2}) + http_compute.post("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", "lport": 4242, "rport": 4343, "rhost": "127.0.0.1"}) - response = http_compute.delete("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True) + response = http_compute.delete("/projects/{project_id}/qemu/nodes/{node_id}/adapters/1/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True) assert response.status == 204 assert response.route == "/projects/{project_id}/qemu/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio" diff --git a/tests/handlers/api/compute/test_vpcs.py b/tests/handlers/api/compute/test_vpcs.py index 265a11fd..ae464814 100644 --- a/tests/handlers/api/compute/test_vpcs.py +++ b/tests/handlers/api/compute/test_vpcs.py @@ -64,7 +64,8 @@ def test_vpcs_create_port(http_compute, project, free_console_port): def test_vpcs_nio_create_udp(http_compute, vm): - response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", + with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM.add_ubridge_udp_connection"): + response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", "lport": 4242, "rport": 4343, "rhost": "127.0.0.1"}, @@ -83,28 +84,19 @@ def test_vpcs_nio_update_udp(http_compute, vm): "rhost": "127.0.0.1", "filters": {}}, example=True) - assert response.status == 201 + assert response.status == 201, response.body.decode("utf-8") assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio" assert response.json["type"] == "nio_udp" -@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows") -def test_vpcs_nio_create_tap(http_compute, vm, ethernet_device): - with patch("gns3server.compute.base_manager.BaseManager.has_privileged_access", return_value=True): - response = http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_tap", - "tap_device": ethernet_device}) - assert response.status == 201 - assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio" - assert response.json["type"] == "nio_tap" - - def test_vpcs_delete_nio(http_compute, vm): - http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", + with asyncio_patch("gns3server.compute.vpcs.vpcs_vm.VPCSVM._ubridge_send"): + http_compute.post("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), {"type": "nio_udp", "lport": 4242, "rport": 4343, "rhost": "127.0.0.1"}) - response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True) - assert response.status == 204 + response = http_compute.delete("/projects/{project_id}/vpcs/nodes/{node_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], node_id=vm["node_id"]), example=True) + assert response.status == 204, response.body.decode() assert response.route == "/projects/{project_id}/vpcs/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"