diff --git a/gns3server/compute/base_node.py b/gns3server/compute/base_node.py index ca2c74c8..0021d3cb 100644 --- a/gns3server/compute/base_node.py +++ b/gns3server/compute/base_node.py @@ -577,10 +577,17 @@ class BaseNode: yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters) @asyncio.coroutine - def _update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): + def update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio): if destination_nio: yield from self._ubridge_apply_filters(bridge_name, destination_nio.filters) + def ubridge_delete_bridge(self, name): + """ + :params name: Delete the bridge with this name + """ + if self.ubridge: + yield from self._ubridge_send("bridge delete {name}".format(name=name)) + @asyncio.coroutine def _ubridge_apply_filters(self, bridge_name, filters): """ @@ -592,13 +599,24 @@ class BaseNode: yield from self._ubridge_send('bridge reset_packet_filters ' + bridge_name) i = 0 for (filter_type, values) in filters.items(): - cmd = "bridge add_packet_filter {bridge_name} {filter_name} {filter_type} {filter_value}".format( - bridge_name=bridge_name, - filter_name="filter" + str(i), - filter_type=filter_type, - filter_value=" ".join([str(v) for v in values])) - yield from self._ubridge_send(cmd) - i += 1 + if isinstance(values[0], str): + for line in values[0].split('\n'): + line = line.strip() + cmd = "bridge add_packet_filter {bridge_name} {filter_name} {filter_type} {filter_value}".format( + bridge_name=bridge_name, + filter_name="filter" + str(i), + filter_type=filter_type, + filter_value='"{}" {}'.format(line, " ".join([str(v) for v in values[1:]]))).strip() + yield from self._ubridge_send(cmd) + i += 1 + else: + cmd = "bridge add_packet_filter {bridge_name} {filter_name} {filter_type} {filter_value}".format( + bridge_name=bridge_name, + filter_name="filter" + str(i), + filter_type=filter_type, + filter_value=" ".join([str(v) for v in values])) + yield from self._ubridge_send(cmd) + i += 1 @asyncio.coroutine def _add_ubridge_ethernet_connection(self, bridge_name, ethernet_interface, block_host_traffic=True): diff --git a/gns3server/compute/docker/__init__.py b/gns3server/compute/docker/__init__.py index 7e3ccd69..b7ce5339 100644 --- a/gns3server/compute/docker/__init__.py +++ b/gns3server/compute/docker/__init__.py @@ -36,6 +36,7 @@ log = logging.getLogger(__name__) # Be carefull to keep it consistent DOCKER_MINIMUM_API_VERSION = "1.25" DOCKER_MINIMUM_VERSION = "1.13" +DOCKER_PREFERRED_API_VERSION = "1.30" class Docker(BaseManager): @@ -50,6 +51,7 @@ class Docker(BaseManager): self.ubridge_lock = asyncio.Lock() self._connector = None self._session = None + self._api_version = DOCKER_MINIMUM_API_VERSION @asyncio.coroutine def _check_connection(self): @@ -61,8 +63,17 @@ class Docker(BaseManager): except (aiohttp.ClientOSError, FileNotFoundError): self._connected = False raise DockerError("Can't connect to docker daemon") - if parse_version(version["ApiVersion"]) < parse_version(DOCKER_MINIMUM_API_VERSION): - raise DockerError("Docker version is {}. GNS3 requires a minimum version of {}".format(version["Version"], DOCKER_MINIMUM_VERSION)) + + docker_version = parse_version(version['ApiVersion']) + + if docker_version < parse_version(DOCKER_MINIMUM_API_VERSION): + raise DockerError( + "Docker version is {}. GNS3 requires a minimum version of {}".format( + version["Version"], DOCKER_MINIMUM_VERSION)) + + preferred_api_version = parse_version(DOCKER_PREFERRED_API_VERSION) + if docker_version >= preferred_api_version: + self._api_version = DOCKER_PREFERRED_API_VERSION def connector(self): if self._connector is None or self._connector.closed: @@ -165,10 +176,10 @@ class Docker(BaseManager): :returns: Websocket """ - url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path - connection = yield from self._session.ws_connect(url, - origin="http://docker", - autoping=True) + url = "http://docker/v" + self._api_version + "/" + path + connection = yield from aiohttp.ws_connect(url, + origin="http://docker", + autoping=True) return connection @locked_coroutine diff --git a/gns3server/compute/docker/__init__.py.orig b/gns3server/compute/docker/__init__.py.orig new file mode 100644 index 00000000..22d88125 --- /dev/null +++ b/gns3server/compute/docker/__init__.py.orig @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# +# Copyright (C) 2015 GNS3 Technologies Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +""" +Docker server module. +""" + +import sys +import json +import asyncio +import logging +import aiohttp +from gns3server.utils import parse_version +from gns3server.utils.asyncio import locked_coroutine +from gns3server.compute.base_manager import BaseManager +from gns3server.compute.docker.docker_vm import DockerVM +from gns3server.compute.docker.docker_error import DockerError, DockerHttp304Error, DockerHttp404Error + +log = logging.getLogger(__name__) + + +# Be carefull to keep it consistent +DOCKER_MINIMUM_API_VERSION = "1.25" +DOCKER_MINIMUM_VERSION = "1.13" +DOCKER_PREFERRED_API_VERSION = "1.30" + + +class Docker(BaseManager): + + _NODE_CLASS = DockerVM + + def __init__(self): + super().__init__() + self._server_url = '/var/run/docker.sock' + self._connected = False + # Allow locking during ubridge operations + self.ubridge_lock = asyncio.Lock() + self._connector = None + self._session = None + self._api_version = DOCKER_MINIMUM_API_VERSION + + @asyncio.coroutine + def _check_connection(self): + if not self._connected: + try: + self._connected = True + connector = self.connector() + version = yield from self.query("GET", "version") + except (aiohttp.ClientOSError, FileNotFoundError): + self._connected = False + raise DockerError("Can't connect to docker daemon") + + docker_version = parse_version(version['ApiVersion']) + + if docker_version < parse_version(DOCKER_MINIMUM_API_VERSION): + raise DockerError( + "Docker version is {}. GNS3 requires a minimum version of {}".format( + version["Version"], DOCKER_MINIMUM_VERSION)) + + preferred_api_version = parse_version(DOCKER_PREFERRED_API_VERSION) + if docker_version >= preferred_api_version: + self._api_version = DOCKER_PREFERRED_API_VERSION + + def connector(self): + if self._connector is None or self._connector.closed: + if not sys.platform.startswith("linux"): + raise DockerError("Docker is supported only on Linux") + try: + self._connector = aiohttp.connector.UnixConnector(self._server_url, limit=None) + except (aiohttp.ClientOSError, FileNotFoundError): + raise DockerError("Can't connect to docker daemon") + return self._connector + + @asyncio.coroutine + def unload(self): + yield from super().unload() + if self._connected: + if self._connector and not self._connector.closed: + self._connector.close() + + @asyncio.coroutine + def query(self, method, path, data={}, params={}): + """ + Make a query to the docker daemon and decode the request + + :param method: HTTP method + :param path: Endpoint in API + :param data: Dictionnary with the body. Will be transformed to a JSON + :param params: Parameters added as a query arg + """ + + response = yield from self.http_query(method, path, data=data, params=params) + body = yield from response.read() + if body and len(body): + if response.headers['CONTENT-TYPE'] == 'application/json': + body = json.loads(body.decode("utf-8")) + else: + body = body.decode("utf-8") + log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body) + return body + + @asyncio.coroutine + def http_query(self, method, path, data={}, params={}, timeout=300): + """ + Make a query to the docker daemon + + :param method: HTTP method + :param path: Endpoint in API + :param data: Dictionnary with the body. Will be transformed to a JSON + :param params: Parameters added as a query arg + :param timeout: Timeout + :returns: HTTP response + """ + data = json.dumps(data) + if timeout is None: + timeout = 60 * 60 * 24 * 31 # One month timeout + + if path == 'version': + url = "http://docker/v1.12/" + path # API of docker v1.0 + else: + url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path + try: + if path != "version": # version is use by check connection + yield from self._check_connection() + if self._session is None or self._session.closed: + connector = self.connector() + self._session = aiohttp.ClientSession(connector=connector) + response = yield from self._session.request( + method, + url, + params=params, + data=data, + headers={"content-type": "application/json", }, + timeout=timeout + ) + except (aiohttp.ClientResponseError, aiohttp.ClientOSError) as e: + raise DockerError("Docker has returned an error: {}".format(str(e))) + except (asyncio.TimeoutError): + raise DockerError("Docker timeout " + method + " " + path) + if response.status >= 300: + body = yield from response.read() + try: + body = json.loads(body.decode("utf-8"))["message"] + except ValueError: + pass + log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body) + if response.status == 304: + raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body)) + elif response.status == 404: + raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body)) + else: + raise DockerError("Docker has returned an error: {} {}".format(response.status, body)) + return response + + @asyncio.coroutine + def websocket_query(self, path, params={}): + """ + Open a websocket connection + + :param path: Endpoint in API + :param params: Parameters added as a query arg + :returns: Websocket + """ + +<<<<<<< HEAD + url = "http://docker/v" + DOCKER_MINIMUM_API_VERSION + "/" + path + connection = yield from self._session.ws_connect(url, + origin="http://docker", + autoping=True) +======= + url = "http://docker/v" + self._api_version + "/" + path + connection = yield from aiohttp.ws_connect(url, + connector=self.connector(), + origin="http://docker", + autoping=True) +>>>>>>> master + return connection + + @locked_coroutine + def pull_image(self, image, progress_callback=None): + """ + Pull image from docker repository + + :params image: Image name + :params progress_callback: A function that receive a log message about image download progress + """ + + try: + yield from self.query("GET", "images/{}/json".format(image)) + return # We already have the image skip the download + except DockerHttp404Error: + pass + + if progress_callback: + progress_callback("Pull {} from docker hub".format(image)) + response = yield from self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None) + # The pull api will stream status via an HTTP JSON stream + content = "" + while True: + try: + chunk = yield from response.content.read(1024) + except aiohttp.ServerDisconnectedError: + break + if not chunk: + break + content += chunk.decode("utf-8") + + try: + while True: + content = content.lstrip(" \r\n\t") + answer, index = json.JSONDecoder().raw_decode(content) + if "progress" in answer and progress_callback: + progress_callback("Pulling image {}:{}: {}".format(image, answer["id"], answer["progress"])) + content = content[index:] + except ValueError: # Partial JSON + pass + response.close() + if progress_callback: + progress_callback("Success pulling image {}".format(image)) + + @asyncio.coroutine + def list_images(self): + """Gets Docker image list. + + :returns: list of dicts + :rtype: list + """ + images = [] + for image in (yield from self.query("GET", "images/json", params={"all": 0})): + if image['RepoTags']: + for tag in image['RepoTags']: + if tag != ":": + images.append({'image': tag}) + return sorted(images, key=lambda i: i['image']) diff --git a/gns3server/compute/docker/docker_vm.py b/gns3server/compute/docker/docker_vm.py index 00b80b0a..1c5fcf7b 100644 --- a/gns3server/compute/docker/docker_vm.py +++ b/gns3server/compute/docker/docker_vm.py @@ -502,6 +502,10 @@ class DockerVM(BaseNode): msg = yield from ws.receive() if msg.tp == aiohttp.WSMsgType.text: out.feed_data(msg.data.encode()) + elif msg.tp == aiohttp.WSMsgType.BINARY: + out.feed_data(msg.data) + elif msg.tp == aiohttp.WSMsgType.ERROR: + log.critical("Docker WebSocket Error: {}".format(msg.data)) else: out.feed_eof() ws.close() diff --git a/gns3server/compute/dynamips/nios/nio_udp.py b/gns3server/compute/dynamips/nios/nio_udp.py index 106e3836..60ddafa0 100644 --- a/gns3server/compute/dynamips/nios/nio_udp.py +++ b/gns3server/compute/dynamips/nios/nio_udp.py @@ -54,6 +54,14 @@ class NIOUDP(NIO): self._node = node super().__init__(name, node.hypervisor) + @property + def filters(self): + return self._filters + + @filters.setter + def filters(self, val): + self._filters = val + @asyncio.coroutine def create(self): if not self._hypervisor: @@ -67,7 +75,7 @@ class NIOUDP(NIO): return self._local_tunnel_lport = self._node.manager.port_manager.get_free_udp_port(self._node.project) self._local_tunnel_rport = self._node.manager.port_manager.get_free_udp_port(self._node.project) - name = 'DYNAMIPS-{}-{}'.format(self._local_tunnel_lport, self._local_tunnel_rport) + self._bridge_name = 'DYNAMIPS-{}-{}'.format(self._local_tunnel_lport, self._local_tunnel_rport) yield from self._hypervisor.send("nio create_udp {name} {lport} {rhost} {rport}".format(name=self._name, lport=self._local_tunnel_lport, rhost='127.0.0.1', @@ -77,21 +85,33 @@ class NIOUDP(NIO): lport=self._lport, rhost=self._rhost, rport=self._rport)) + + self._source_nio = nio_udp.NIOUDP(self._local_tunnel_rport, + '127.0.0.1', + self._local_tunnel_lport, + {}) + self._destination_nio = nio_udp.NIOUDP(self._lport, + self._rhost, + self._rport, + self._filters) yield from self._node.add_ubridge_udp_connection( - name, - nio_udp.NIOUDP(self._local_tunnel_rport, - '127.0.0.1', - self._local_tunnel_lport, - self._filters), - nio_udp.NIOUDP(self._lport, - self._rhost, - self._rport, - self._filters) + self._bridge_name, + self._source_nio, + self._destination_nio ) + @asyncio.coroutine + def update(self): + self._destination_nio.filters = self._filters + yield from self._node.update_ubridge_udp_connection( + self._bridge_name, + self._source_nio, + self._destination_nio) + @asyncio.coroutine def close(self): if self._local_tunnel_lport: + yield from self._node.ubridge_delete_bridge(self._bridge_name) self._node.manager.port_manager.release_udp_port(self._local_tunnel_lport, self ._node.project) if self._local_tunnel_rport: self._node.manager.port_manager.release_udp_port(self._local_tunnel_rport, self._node.project) diff --git a/gns3server/compute/dynamips/nodes/router.py b/gns3server/compute/dynamips/nodes/router.py index f0a87a1b..7ea6c15f 100644 --- a/gns3server/compute/dynamips/nodes/router.py +++ b/gns3server/compute/dynamips/nodes/router.py @@ -1281,6 +1281,17 @@ class Router(BaseNode): yield from self.slot_enable_nio(slot_number, port_number) adapter.add_nio(port_number, nio) + @asyncio.coroutine + def slot_update_nio_binding(self, slot_number, port_number, nio): + """ + Update a slot NIO binding. + + :param slot_number: slot number + :param port_number: port number + :param nio: NIO instance to add to the slot/port + """ + yield from nio.update() + @asyncio.coroutine def slot_remove_nio_binding(self, slot_number, port_number): """ @@ -1313,8 +1324,7 @@ class Router(BaseNode): nio = adapter.get_nio(port_number) if nio is None: return - if isinstance(nio, NIOUDP): - self.manager.port_manager.release_udp_port(nio.lport, self._project) + yield from nio.close() adapter.remove_nio(port_number) log.info('Router "{name}" [{id}]: NIO {nio_name} removed from port {slot_number}/{port_number}'.format(name=self._name, diff --git a/gns3server/compute/iou/iou_vm.py b/gns3server/compute/iou/iou_vm.py index 4d12122c..d1264ca3 100644 --- a/gns3server/compute/iou/iou_vm.py +++ b/gns3server/compute/iou/iou_vm.py @@ -182,9 +182,9 @@ class IOUVM(BaseNode): except OSError as e: raise IOUError("Cannot read ELF header for IOU image '{}': {}".format(self._path, e)) - # IOU images must start with the ELF magic number, be 32-bit, little endian + # IOU images must start with the ELF magic number, be 32-bit or 64-bit, little endian # and have an ELF version of 1 normal IOS image are big endian! - if elf_header_start != b'\x7fELF\x01\x01\x01': + if elf_header_start != b'\x7fELF\x01\x01\x01' and elf_header_start != b'\x7fELF\x02\x01\x01': raise IOUError("'{}' is not a valid IOU image".format(self._path)) if not os.access(self._path, os.X_OK): diff --git a/gns3server/compute/vpcs/vpcs_vm.py b/gns3server/compute/vpcs/vpcs_vm.py index 53c5d368..9633d716 100644 --- a/gns3server/compute/vpcs/vpcs_vm.py +++ b/gns3server/compute/vpcs/vpcs_vm.py @@ -391,7 +391,7 @@ class VPCSVM(BaseNode): raise VPCSError("Port {port_number} doesn't exist in adapter {adapter}".format(adapter=self._ethernet_adapter, port_number=port_number)) if self.is_running(): - yield from self._update_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) + yield from self.update_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio) @asyncio.coroutine def port_remove_nio_binding(self, port_number): diff --git a/gns3server/controller/link.py b/gns3server/controller/link.py index 9e202bfb..2e1e2956 100644 --- a/gns3server/controller/link.py +++ b/gns3server/controller/link.py @@ -36,6 +36,7 @@ FILTERS = [ "name": "Frequency", "minimum": -1, "maximum": 32767, + "type": "int", "unit": "th packet" } ] @@ -49,6 +50,7 @@ FILTERS = [ "name": "Chance", "minimum": 0, "maximum": 100, + "type": "int", "unit": "%" } ] @@ -62,13 +64,15 @@ FILTERS = [ "name": "Latency", "minimum": 0, "maximum": 32767, - "unit": "ms" + "unit": "ms", + "type": "int" }, { "name": "Jitter (-/+)", "minimum": 0, "maximum": 32767, - "unit": "ms" + "unit": "ms", + "type": "int" } ] }, @@ -81,7 +85,19 @@ FILTERS = [ "name": "Chance", "minimum": 0, "maximum": 100, - "unit": "%" + "unit": "%", + "type": "int" + } + ] + }, + { + "type": "bpf", + "name": "Berkeley Packet Filter (BPF)", + "description": "This filter will drop any packet matching a BPF expression. Put one expression per line", + "parameters": [ + { + "name": "Filters", + "type": "text" } ] } @@ -124,8 +140,14 @@ class Link: """ new_filters = {} for (filter, values) in filters.items(): - values = [int(v) for v in values] - if len(values) != 0 and values[0] != 0: + new_values = [] + for value in values: + if isinstance(value, str): + new_values.append(value.strip("\n ")) + else: + new_values.append(int(value)) + values = new_values + if len(values) != 0 and values[0] != 0 and values[0] != '': new_filters[filter] = values if new_filters != self.filters: @@ -343,7 +365,7 @@ class Link: :returns: None if no node support filtering else the node """ for node in self._nodes: - if node["node"].node_type in ('vpcs', ): + if node["node"].node_type in ('vpcs', 'dynamips'): return node["node"] return None diff --git a/gns3server/controller/project.py b/gns3server/controller/project.py index 6936e95c..3f396770 100644 --- a/gns3server/controller/project.py +++ b/gns3server/controller/project.py @@ -612,6 +612,9 @@ class Project: :param name: Name of the snapshot """ + if name in [snap.name for snap in self.snapshots.values()]: + raise aiohttp.web_exceptions.HTTPConflict(text="The snapshot {} already exist".format(name)) + snapshot = Snapshot(self, name=name) try: if os.path.exists(snapshot.path): diff --git a/gns3server/handlers/api/compute/dynamips_vm_handler.py b/gns3server/handlers/api/compute/dynamips_vm_handler.py index dd0352de..6626a0d8 100644 --- a/gns3server/handlers/api/compute/dynamips_vm_handler.py +++ b/gns3server/handlers/api/compute/dynamips_vm_handler.py @@ -268,6 +268,35 @@ class DynamipsVMHandler: response.set_status(201) response.json(nio) + @Route.put( + r"/projects/{project_id}/dynamips/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio", + parameters={ + "project_id": "Project UUID", + "node_id": "Node UUID", + "adapter_number": "Network adapter where the nio is located", + "port_number": "Port from where the nio should be updated" + }, + status_codes={ + 201: "NIO updated", + 400: "Invalid request", + 404: "Instance doesn't exist" + }, + input=NIO_SCHEMA, + output=NIO_SCHEMA, + description="Update a NIO from a Dynamips instance") + def update_nio(request, response): + + dynamips_manager = Dynamips.instance() + vm = dynamips_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) + slot_number = int(request.match_info["adapter_number"]) + port_number = int(request.match_info["port_number"]) + nio = vm.slots[slot_number].get_nio(port_number) + if "filters" in request.json and nio: + nio.filters = request.json["filters"] + yield from vm.slot_update_nio_binding(slot_number, port_number, nio) + response.set_status(201) + response.json(request.json) + @Route.delete( r"/projects/{project_id}/dynamips/nodes/{node_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio", parameters={ diff --git a/gns3server/schemas/iou.py b/gns3server/schemas/iou.py index b8a68565..656eb524 100644 --- a/gns3server/schemas/iou.py +++ b/gns3server/schemas/iou.py @@ -184,7 +184,7 @@ IOU_OBJECT_SCHEMA = { "type": ["boolean", "null"] }, "command_line": { - "description": "Last command line used by GNS3 to start QEMU", + "description": "Last command line used by GNS3 to start IOU", "type": "string" }, "application_id": { diff --git a/gns3server/schemas/vpcs.py b/gns3server/schemas/vpcs.py index adbd2043..4e05dd9d 100644 --- a/gns3server/schemas/vpcs.py +++ b/gns3server/schemas/vpcs.py @@ -122,7 +122,7 @@ VPCS_OBJECT_SCHEMA = { "pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$" }, "command_line": { - "description": "Last command line used by GNS3 to start QEMU", + "description": "Last command line used by GNS3 to start VPCS", "type": "string" } }, diff --git a/gns3server/web/web_server.py b/gns3server/web/web_server.py index be24f2ab..9e17f0ba 100644 --- a/gns3server/web/web_server.py +++ b/gns3server/web/web_server.py @@ -43,8 +43,8 @@ import gns3server.handlers import logging log = logging.getLogger(__name__) -if not aiohttp.__version__.startswith("2.0"): - raise RuntimeError("You need aiohttp 2.0 for running GNS3") +if not aiohttp.__version__.startswith("2.2"): + raise RuntimeError("aiohttp 2.2 is required to run the GNS3 server") class WebServer: @@ -123,7 +123,7 @@ class WebServer: task.cancel() try: yield from asyncio.wait_for(task, 1) - except: + except BaseException: pass self._loop.stop() diff --git a/requirements.txt b/requirements.txt index bcfc7289..66ae7e03 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ jsonschema>=2.4.0 -aiohttp>=2.0.7,<2.1.0 # pyup: ignore +aiohttp>=2.2.0,<2.3.0 # pyup: ignore aiohttp-cors>=0.5.3,<0.6.0 # pyup: ignore -yarl>=0.10.2,<0.11 # pyup: ignore +yarl>=0.11,<0.12 # pyup: ignore Jinja2>=2.7.3 raven>=5.23.0 psutil>=3.0.0 diff --git a/tests/compute/docker/test_docker.py b/tests/compute/docker/test_docker.py index 095a25de..db43c84c 100644 --- a/tests/compute/docker/test_docker.py +++ b/tests/compute/docker/test_docker.py @@ -17,10 +17,10 @@ import pytest import asyncio -from unittest.mock import MagicMock +from unittest.mock import MagicMock, patch from tests.utils import asyncio_patch, AsyncioMagicMock -from gns3server.compute.docker import Docker +from gns3server.compute.docker import Docker, DOCKER_PREFERRED_API_VERSION, DOCKER_MINIMUM_API_VERSION from gns3server.compute.docker.docker_error import DockerError, DockerHttp404Error @@ -162,3 +162,40 @@ def test_pull_image(loop): with asyncio_patch("gns3server.compute.docker.Docker.http_query", return_value=mock_query) as mock: images = loop.run_until_complete(asyncio.async(Docker.instance().pull_image("ubuntu"))) mock.assert_called_with("POST", "images/create", params={"fromImage": "ubuntu"}, timeout=None) + + +def test_docker_check_connection_docker_minimum_version(vm, loop): + response = { + 'ApiVersion': '1.01', + 'Version': '1.12' + } + + with patch("gns3server.compute.docker.Docker.connector"), \ + asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response): + vm._connected = False + with pytest.raises(DockerError): + loop.run_until_complete(asyncio.async(vm._check_connection())) + + +def test_docker_check_connection_docker_preferred_version_against_newer(vm, loop): + response = { + 'ApiVersion': '1.31' + } + + with patch("gns3server.compute.docker.Docker.connector"), \ + asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response): + vm._connected = False + loop.run_until_complete(asyncio.async(vm._check_connection())) + assert vm._api_version == DOCKER_PREFERRED_API_VERSION + + +def test_docker_check_connection_docker_preferred_version_against_older(vm, loop): + response = { + 'ApiVersion': '1.27', + } + + with patch("gns3server.compute.docker.Docker.connector"), \ + asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response): + vm._connected = False + loop.run_until_complete(asyncio.async(vm._check_connection())) + assert vm._api_version == DOCKER_MINIMUM_API_VERSION \ No newline at end of file diff --git a/tests/compute/docker/test_docker_vm.py b/tests/compute/docker/test_docker_vm.py index 6ca6fd14..e6610ebc 100644 --- a/tests/compute/docker/test_docker_vm.py +++ b/tests/compute/docker/test_docker_vm.py @@ -15,6 +15,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import aiohttp import asyncio import pytest import uuid @@ -904,3 +905,27 @@ def test_fix_permission(vm, loop): loop.run_until_complete(vm._fix_permissions()) mock_exec.assert_called_with('docker', 'exec', 'e90e34656842', '/gns3/bin/busybox', 'sh', '-c', '(/gns3/bin/busybox find "/etc" -depth -print0 | /gns3/bin/busybox xargs -0 /gns3/bin/busybox stat -c \'%a:%u:%g:%n\' > "/etc/.gns3_perms") && /gns3/bin/busybox chmod -R u+rX "/etc" && /gns3/bin/busybox chown {}:{} -R "/etc"'.format(os.getuid(), os.getgid())) assert process.wait.called + + +def test_read_console_output_with_binary_mode(vm, loop): + class InputStreamMock(object): + def __init__(self): + self.sent = False + + @asyncio.coroutine + def receive(self): + if not self.sent: + self.sent = True + return MagicMock(tp=aiohttp.WSMsgType.BINARY, data=b"test") + else: + return MagicMock(tp=aiohttp.WSMsgType.CLOSE) + + def close(self): + pass + + input_stream = InputStreamMock() + output_stream = MagicMock() + + with asyncio_patch('gns3server.compute.docker.docker_vm.DockerVM.stop'): + loop.run_until_complete(asyncio.async(vm._read_console_output(input_stream, output_stream))) + output_stream.feed_data.assert_called_once_with(b"test") diff --git a/tests/compute/test_base_node.py b/tests/compute/test_base_node.py index e42f3e98..635c9966 100644 --- a/tests/compute/test_base_node.py +++ b/tests/compute/test_base_node.py @@ -133,15 +133,27 @@ def test_update_ubridge_udp_connection(node, async_run): snio = NIOUDP(1245, "localhost", 1246, []) dnio = NIOUDP(1245, "localhost", 1244, filters) with asyncio_patch("gns3server.compute.base_node.BaseNode._ubridge_apply_filters") as mock: - async_run(node._update_ubridge_udp_connection('VPCS-10', snio, dnio)) + async_run(node.update_ubridge_udp_connection('VPCS-10', snio, dnio)) mock.assert_called_with("VPCS-10", filters) def test_ubridge_apply_filters(node, async_run): filters = { - "latency": [10] + "latency": [10], + "bpf": ["icmp[icmptype] == 8\ntcp src port 53"] } node._ubridge_send = AsyncioMagicMock() async_run(node._ubridge_apply_filters("VPCS-10", filters)) node._ubridge_send.assert_any_call("bridge reset_packet_filters VPCS-10") node._ubridge_send.assert_any_call("bridge add_packet_filter VPCS-10 filter0 latency 10") + + +def test_ubridge_apply_bpf_filters(node, async_run): + filters = { + "bpf": ["icmp[icmptype] == 8\ntcp src port 53"] + } + node._ubridge_send = AsyncioMagicMock() + async_run(node._ubridge_apply_filters("VPCS-10", filters)) + node._ubridge_send.assert_any_call("bridge reset_packet_filters VPCS-10") + node._ubridge_send.assert_any_call("bridge add_packet_filter VPCS-10 filter0 bpf \"icmp[icmptype] == 8\"") + node._ubridge_send.assert_any_call("bridge add_packet_filter VPCS-10 filter1 bpf \"tcp src port 53\"") diff --git a/tests/controller/test_link.py b/tests/controller/test_link.py index 24638fdb..42015b08 100644 --- a/tests/controller/test_link.py +++ b/tests/controller/test_link.py @@ -369,9 +369,10 @@ def test_update_filters(async_run, project, compute): link.update = AsyncioMagicMock() assert link._created async_run(link.update_filters({ - "packet_loss": ["10"], - "delay": ["50", "10"], - "frequency_drop": ["0"] + "packet_loss": [10], + "delay": [50, 10], + "frequency_drop": [0], + "bpf": [" \n "] })) assert link.filters == { "packet_loss": [10], diff --git a/tests/controller/test_udp_link.py b/tests/controller/test_udp_link.py index 3ab6581f..5c738b4b 100644 --- a/tests/controller/test_udp_link.py +++ b/tests/controller/test_udp_link.py @@ -373,11 +373,14 @@ def test_update(async_run, project): }, timeout=120) assert link.created - async_run(link.update_filters({"drop": [5]})) + async_run(link.update_filters({"drop": [5], "bpf": ["icmp[icmptype] == 8"]})) compute1.put.assert_any_call("/projects/{}/vpcs/nodes/{}/adapters/0/ports/4/nio".format(project.id, node1.id), data={ "lport": 1024, "rhost": "192.168.1.2", "rport": 2048, "type": "nio_udp", - "filters": {"drop": [5]} + "filters": { + "drop": [5], + "bpf": ["icmp[icmptype] == 8"] + } }, timeout=120) diff --git a/tests/handlers/api/compute/test_project.py b/tests/handlers/api/compute/test_project.py index 25d3d8c2..a1e1cd42 100644 --- a/tests/handlers/api/compute/test_project.py +++ b/tests/handlers/api/compute/test_project.py @@ -136,7 +136,7 @@ def test_get_file(http_compute, tmpdir): assert response.status == 404 response = http_compute.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True) - assert response.status == 403 + assert response.status == 404 def test_write_file(http_compute, tmpdir): @@ -151,7 +151,7 @@ def test_write_file(http_compute, tmpdir): assert f.read() == "world" response = http_compute.post("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True) - assert response.status == 403 + assert response.status == 404 def test_stream_file(http_compute, tmpdir): @@ -170,4 +170,4 @@ def test_stream_file(http_compute, tmpdir): assert response.status == 404 response = http_compute.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True) - assert response.status == 403 + assert response.status == 404 diff --git a/tests/handlers/api/compute/test_qemu.py b/tests/handlers/api/compute/test_qemu.py index aa1f2c98..2a4348e8 100644 --- a/tests/handlers/api/compute/test_qemu.py +++ b/tests/handlers/api/compute/test_qemu.py @@ -251,7 +251,7 @@ def test_upload_image_ova(http_compute, tmpdir): def test_upload_image_forbiden_location(http_compute, tmpdir): with patch("gns3server.compute.Qemu.get_images_directory", return_value=str(tmpdir),): response = http_compute.post("/qemu/images/../../test2", body="TEST", raw=True) - assert response.status == 403 + assert response.status == 404 def test_upload_image_permission_denied(http_compute, tmpdir): diff --git a/tests/handlers/api/controller/test_node.py b/tests/handlers/api/controller/test_node.py index eb795c7c..635c1e93 100644 --- a/tests/handlers/api/controller/test_node.py +++ b/tests/handlers/api/controller/test_node.py @@ -234,7 +234,7 @@ def test_get_file(http_controller, tmpdir, project, node, compute): compute.http_query.assert_called_with("GET", "/projects/{project_id}/files/project-files/vpcs/{node_id}/hello".format(project_id=project.id, node_id=node.id), timeout=None, raw=True) response = http_controller.get("/projects/{project_id}/nodes/{node_id}/files/../hello".format(project_id=project.id, node_id=node.id), raw=True) - assert response.status == 403 + assert response.status == 404 def test_post_file(http_controller, tmpdir, project, node, compute): @@ -245,4 +245,4 @@ def test_post_file(http_controller, tmpdir, project, node, compute): compute.http_query.assert_called_with("POST", "/projects/{project_id}/files/project-files/vpcs/{node_id}/hello".format(project_id=project.id, node_id=node.id), data=b'hello', timeout=None, raw=True) response = http_controller.get("/projects/{project_id}/nodes/{node_id}/files/../hello".format(project_id=project.id, node_id=node.id), raw=True) - assert response.status == 403 + assert response.status == 404 diff --git a/tests/handlers/api/controller/test_project.py b/tests/handlers/api/controller/test_project.py index 6290ed2c..7381bd5d 100644 --- a/tests/handlers/api/controller/test_project.py +++ b/tests/handlers/api/controller/test_project.py @@ -203,7 +203,7 @@ def test_get_file(http_controller, tmpdir, loop, project): assert response.status == 404 response = http_controller.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True) - assert response.status == 403 + assert response.status == 404 def test_write_file(http_controller, tmpdir, project): @@ -214,7 +214,7 @@ def test_write_file(http_controller, tmpdir, project): assert f.read() == "world" response = http_controller.post("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True) - assert response.status == 403 + assert response.status == 404 def test_write_and_get_file_with_leading_slashes_in_filename(http_controller, tmpdir, loop, project):