mirror of
https://github.com/GNS3/gns3-server
synced 2024-12-26 00:38:10 +00:00
Refactor port manager
This commit is contained in:
parent
ae8e2f4199
commit
73a481e510
@ -43,6 +43,21 @@ class BaseManager:
|
|||||||
cls._instance = cls()
|
cls._instance = cls()
|
||||||
return cls._instance
|
return cls._instance
|
||||||
|
|
||||||
|
@property
|
||||||
|
def port_manager(self):
|
||||||
|
"""
|
||||||
|
Returns the port_manager for this VMs
|
||||||
|
|
||||||
|
:returns: Port manager
|
||||||
|
"""
|
||||||
|
|
||||||
|
return self._port_manager
|
||||||
|
|
||||||
|
@port_manager.setter
|
||||||
|
def port_manager(self, new_port_manager):
|
||||||
|
self._port_manager = new_port_manager
|
||||||
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def destroy(cls):
|
def destroy(cls):
|
||||||
@ -73,7 +88,7 @@ class BaseManager:
|
|||||||
else:
|
else:
|
||||||
if identifier in self._vms:
|
if identifier in self._vms:
|
||||||
raise VMError("VM identifier {} is already used by another VM instance".format(identifier))
|
raise VMError("VM identifier {} is already used by another VM instance".format(identifier))
|
||||||
vm = self._VM_CLASS(vmname, identifier, self.port_manager)
|
vm = self._VM_CLASS(vmname, identifier, self)
|
||||||
yield from vm.wait_for_creation()
|
yield from vm.wait_for_creation()
|
||||||
self._vms[vm.id] = vm
|
self._vms[vm.id] = vm
|
||||||
return vm
|
return vm
|
||||||
|
@ -26,20 +26,23 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
class BaseVM:
|
class BaseVM:
|
||||||
|
|
||||||
def __init__(self, name, identifier, port_manager):
|
def __init__(self, name, identifier, manager):
|
||||||
|
|
||||||
self._loop = asyncio.get_event_loop()
|
self._loop = asyncio.get_event_loop()
|
||||||
self._queue = asyncio.Queue()
|
self._queue = asyncio.Queue()
|
||||||
self._name = name
|
self._name = name
|
||||||
self._id = identifier
|
self._id = identifier
|
||||||
self._created = asyncio.Future()
|
self._created = asyncio.Future()
|
||||||
self._port_manager = port_manager
|
self._manager = manager
|
||||||
self._config = Config.instance()
|
self._config = Config.instance()
|
||||||
self._worker = asyncio.async(self._run())
|
self._worker = asyncio.async(self._run())
|
||||||
log.info("{type} device {name} [id={id}] has been created".format(type=self.__class__.__name__,
|
log.info("{type} device {name} [id={id}] has been created".format(type=self.__class__.__name__,
|
||||||
name=self._name,
|
name=self._name,
|
||||||
id=self._id))
|
id=self._id))
|
||||||
|
|
||||||
|
#TODO: When delete release console ports
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def id(self):
|
def id(self):
|
||||||
"""
|
"""
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
import socket
|
import socket
|
||||||
import ipaddress
|
import ipaddress
|
||||||
|
import asyncio
|
||||||
|
|
||||||
class PortManager:
|
class PortManager:
|
||||||
"""
|
"""
|
||||||
@ -42,6 +42,23 @@ class PortManager:
|
|||||||
else:
|
else:
|
||||||
self._console_host = host
|
self._console_host = host
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def instance(cls):
|
||||||
|
"""
|
||||||
|
Singleton to return only one instance of BaseManager.
|
||||||
|
|
||||||
|
:returns: instance of Manager
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not hasattr(cls, "_instance") or cls._instance is None:
|
||||||
|
cls._instance = cls()
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@asyncio.coroutine
|
||||||
|
def destroy(cls):
|
||||||
|
cls._instance = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def console_host(self):
|
def console_host(self):
|
||||||
|
|
||||||
|
@ -48,20 +48,20 @@ class VPCSDevice(BaseVM):
|
|||||||
|
|
||||||
:param name: name of this VPCS device
|
:param name: name of this VPCS device
|
||||||
:param vpcs_id: VPCS instance ID
|
:param vpcs_id: VPCS instance ID
|
||||||
:param path: path to VPCS executable
|
:param manager: parent VM Manager
|
||||||
:param working_dir: path to a working directory
|
:param working_dir: path to a working directory
|
||||||
:param console: TCP console port
|
:param console: TCP console port
|
||||||
"""
|
"""
|
||||||
def __init__(self, name, vpcs_id, port_manager, working_dir=None, console=None):
|
def __init__(self, name, vpcs_id, manager, working_dir=None, console=None):
|
||||||
|
|
||||||
super().__init__(name, vpcs_id, port_manager)
|
super().__init__(name, vpcs_id, manager)
|
||||||
|
|
||||||
#self._path = path
|
|
||||||
#self._working_dir = working_dir
|
|
||||||
# TODO: Hardcodded for testing
|
# TODO: Hardcodded for testing
|
||||||
|
#self._working_dir = working_dir
|
||||||
|
self._working_dir = "/tmp"
|
||||||
|
|
||||||
self._path = self._config.get_section_config("VPCS").get("path", "vpcs")
|
self._path = self._config.get_section_config("VPCS").get("path", "vpcs")
|
||||||
|
|
||||||
self._working_dir = "/tmp"
|
|
||||||
self._console = console
|
self._console = console
|
||||||
|
|
||||||
self._command = []
|
self._command = []
|
||||||
@ -83,9 +83,9 @@ class VPCSDevice(BaseVM):
|
|||||||
#
|
#
|
||||||
try:
|
try:
|
||||||
if not self._console:
|
if not self._console:
|
||||||
self._console = port_manager.get_free_port()
|
self._console = self._manager.port_manager.get_free_console_port()
|
||||||
else:
|
else:
|
||||||
self._console = port_manager.reserve_port(self._console)
|
self._console = self._manager.port_manager.reserve_console_port(self._console)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
raise VPCSError(e)
|
raise VPCSError(e)
|
||||||
|
|
||||||
|
@ -129,7 +129,7 @@ def _get_unused_port():
|
|||||||
return port
|
return port
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="session")
|
||||||
def loop(request):
|
def loop(request):
|
||||||
"""Return an event loop and destroy it at the end of test"""
|
"""Return an event loop and destroy it at the end of test"""
|
||||||
loop = asyncio.new_event_loop()
|
loop = asyncio.new_event_loop()
|
||||||
@ -141,12 +141,8 @@ def loop(request):
|
|||||||
request.addfinalizer(tear_down)
|
request.addfinalizer(tear_down)
|
||||||
return loop
|
return loop
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
@pytest.fixture(scope="session")
|
||||||
def port_manager():
|
def server(request, loop):
|
||||||
return PortManager("127.0.0.1", False)
|
|
||||||
|
|
||||||
@pytest.fixture(scope="module")
|
|
||||||
def server(request, loop, port_manager):
|
|
||||||
port = _get_unused_port()
|
port = _get_unused_port()
|
||||||
host = "localhost"
|
host = "localhost"
|
||||||
app = web.Application()
|
app = web.Application()
|
||||||
@ -154,7 +150,7 @@ def server(request, loop, port_manager):
|
|||||||
app.router.add_route(method, route, handler)
|
app.router.add_route(method, route, handler)
|
||||||
for module in MODULES:
|
for module in MODULES:
|
||||||
instance = module.instance()
|
instance = module.instance()
|
||||||
instance.port_manager = port_manager
|
instance.port_manager = PortManager("127.0.0.1", False)
|
||||||
srv = loop.create_server(app.make_handler(), host, port)
|
srv = loop.create_server(app.make_handler(), host, port)
|
||||||
srv = loop.run_until_complete(srv)
|
srv = loop.run_until_complete(srv)
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ It's also used for unittest the HTTP implementation.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
from tests.api.base import server, loop, port_manager
|
from tests.api.base import server, loop
|
||||||
from gns3server.version import __version__
|
from gns3server.version import __version__
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
from tests.api.base import server, loop
|
from tests.api.base import server, loop
|
||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
from gns3server import modules
|
from gns3server import modules
|
||||||
|
from unittest.mock import patch
|
||||||
|
|
||||||
@asyncio_patch('gns3server.modules.VPCS.create_vm', return_value=84)
|
@asyncio_patch('gns3server.modules.VPCS.create_vm', return_value=84)
|
||||||
def test_vpcs_create(server):
|
def test_vpcs_create(server):
|
||||||
|
@ -20,44 +20,52 @@ import asyncio
|
|||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
|
|
||||||
#Move loop to util
|
#Move loop to util
|
||||||
from tests.api.base import loop, port_manager
|
from tests.api.base import loop
|
||||||
from asyncio.subprocess import Process
|
from asyncio.subprocess import Process
|
||||||
from unittest.mock import patch, MagicMock
|
from unittest.mock import patch, MagicMock
|
||||||
from gns3server.modules.vpcs.vpcs_device import VPCSDevice
|
from gns3server.modules.vpcs.vpcs_device import VPCSDevice
|
||||||
from gns3server.modules.vpcs.vpcs_error import VPCSError
|
from gns3server.modules.vpcs.vpcs_error import VPCSError
|
||||||
|
from gns3server.modules.vpcs import VPCS
|
||||||
|
from gns3server.modules.port_manager import PortManager
|
||||||
|
|
||||||
|
@pytest.fixture(scope="module")
|
||||||
|
def manager():
|
||||||
|
m = VPCS.instance()
|
||||||
|
m.port_manager = PortManager("127.0.0.1", False)
|
||||||
|
return m
|
||||||
|
|
||||||
@patch("subprocess.check_output", return_value="Welcome to Virtual PC Simulator, version 0.6".encode("utf-8"))
|
@patch("subprocess.check_output", return_value="Welcome to Virtual PC Simulator, version 0.6".encode("utf-8"))
|
||||||
def test_vm(tmpdir, port_manager):
|
def test_vm(tmpdir, manager):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
assert vm.name == "test"
|
assert vm.name == "test"
|
||||||
assert vm.id == 42
|
assert vm.id == 42
|
||||||
|
|
||||||
@patch("subprocess.check_output", return_value="Welcome to Virtual PC Simulator, version 0.1".encode("utf-8"))
|
@patch("subprocess.check_output", return_value="Welcome to Virtual PC Simulator, version 0.1".encode("utf-8"))
|
||||||
def test_vm_invalid_vpcs_version(tmpdir, port_manager):
|
def test_vm_invalid_vpcs_version(tmpdir, manager):
|
||||||
with pytest.raises(VPCSError):
|
with pytest.raises(VPCSError):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
assert vm.name == "test"
|
assert vm.name == "test"
|
||||||
assert vm.id == 42
|
assert vm.id == 42
|
||||||
|
|
||||||
@patch("gns3server.config.Config.get_section_config", return_value = {"path": "/bin/test_fake"})
|
@patch("gns3server.config.Config.get_section_config", return_value = {"path": "/bin/test_fake"})
|
||||||
def test_vm_invalid_vpcs_path(tmpdir, port_manager):
|
def test_vm_invalid_vpcs_path(tmpdir, manager):
|
||||||
with pytest.raises(VPCSError):
|
with pytest.raises(VPCSError):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
assert vm.name == "test"
|
assert vm.name == "test"
|
||||||
assert vm.id == 42
|
assert vm.id == 42
|
||||||
|
|
||||||
def test_start(tmpdir, loop, port_manager):
|
def test_start(tmpdir, loop, manager):
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||||
|
|
||||||
loop.run_until_complete(asyncio.async(vm.start()))
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
assert vm.is_running() == True
|
assert vm.is_running() == True
|
||||||
|
|
||||||
def test_stop(tmpdir, loop, port_manager):
|
def test_stop(tmpdir, loop, manager):
|
||||||
process = MagicMock()
|
process = MagicMock()
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=process):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||||
|
|
||||||
loop.run_until_complete(asyncio.async(vm.start()))
|
loop.run_until_complete(asyncio.async(vm.start()))
|
||||||
@ -66,26 +74,26 @@ def test_stop(tmpdir, loop, port_manager):
|
|||||||
assert vm.is_running() == False
|
assert vm.is_running() == False
|
||||||
process.terminate.assert_called_with()
|
process.terminate.assert_called_with()
|
||||||
|
|
||||||
def test_add_nio_binding_udp(port_manager, tmpdir):
|
def test_add_nio_binding_udp(tmpdir, manager):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||||
assert nio.lport == 4242
|
assert nio.lport == 4242
|
||||||
|
|
||||||
def test_add_nio_binding_tap(port_manager, tmpdir):
|
def test_add_nio_binding_tap(tmpdir, manager):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
with patch("gns3server.modules.vpcs.vpcs_device.has_privileged_access", return_value=True):
|
with patch("gns3server.modules.vpcs.vpcs_device.has_privileged_access", return_value=True):
|
||||||
nio = vm.port_add_nio_binding(0, {"type": "nio_tap", "tap_device": "test"})
|
nio = vm.port_add_nio_binding(0, {"type": "nio_tap", "tap_device": "test"})
|
||||||
assert nio.tap_device == "test"
|
assert nio.tap_device == "test"
|
||||||
|
|
||||||
def test_add_nio_binding_tap_no_privileged_access(port_manager, tmpdir):
|
def test_add_nio_binding_tap_no_privileged_access(tmpdir, manager):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
with patch("gns3server.modules.vpcs.vpcs_device.has_privileged_access", return_value=False):
|
with patch("gns3server.modules.vpcs.vpcs_device.has_privileged_access", return_value=False):
|
||||||
with pytest.raises(VPCSError):
|
with pytest.raises(VPCSError):
|
||||||
vm.port_add_nio_binding(0, {"type": "nio_tap", "tap_device": "test"})
|
vm.port_add_nio_binding(0, {"type": "nio_tap", "tap_device": "test"})
|
||||||
assert vm._ethernet_adapter.ports[0] is not None
|
assert vm._ethernet_adapter.ports[0] is not None
|
||||||
|
|
||||||
def test_port_remove_nio_binding(port_manager, tmpdir):
|
def test_port_remove_nio_binding(tmpdir, manager):
|
||||||
vm = VPCSDevice("test", 42, port_manager, working_dir=str(tmpdir))
|
vm = VPCSDevice("test", 42, manager, working_dir=str(tmpdir))
|
||||||
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
nio = vm.port_add_nio_binding(0, {"type": "nio_udp", "lport": 4242, "rport": 4243, "rhost": "127.0.0.1"})
|
||||||
vm.port_remove_nio_binding(0)
|
vm.port_remove_nio_binding(0)
|
||||||
assert vm._ethernet_adapter.ports[0] == None
|
assert vm._ethernet_adapter.ports[0] == None
|
||||||
|
Loading…
Reference in New Issue
Block a user