1
0
mirror of https://github.com/GNS3/gns3-server synced 2024-11-24 09:18:08 +00:00

Merge pull request #379 from GNS3/docker_cleanup

Docker cleanup and improvements
This commit is contained in:
Jeremy Grossmann 2015-12-24 14:56:24 -08:00
commit 8cf55166cb
17 changed files with 1446 additions and 277 deletions

1
.gitignore vendored
View File

@ -23,6 +23,7 @@ pip-log.txt
# Unit test / coverage reports
.coverage
.coverage*
.tox
.cache
nosetests.xml

View File

@ -1,6 +1,5 @@
language: python
python:
- '3.4'
- '3.5'
sudo: false
cache: pip

View File

@ -205,4 +205,4 @@ If you want test coverage:
.. code:: bash
py.test --cov=gns3server
py.test --cov-report term-missing --cov=gns3server

View File

@ -21,8 +21,10 @@ from ...web.route import Route
from ...modules.docker import Docker
from ...schemas.docker import (
DOCKER_CREATE_SCHEMA, DOCKER_UPDATE_SCHEMA, DOCKER_CAPTURE_SCHEMA,
DOCKER_OBJECT_SCHEMA
DOCKER_CREATE_SCHEMA,
DOCKER_OBJECT_SCHEMA,
DOCKER_UPDATE_SCHEMA,
DOCKER_LIST_IMAGES_SCHEMA
)
from ...schemas.nio import NIO_SCHEMA
@ -36,6 +38,7 @@ class DockerHandler:
status_codes={
200: "Success",
},
output=DOCKER_LIST_IMAGES_SCHEMA,
description="Get all available Docker images")
def show(request, response):
docker_manager = Docker.instance()
@ -44,7 +47,7 @@ class DockerHandler:
@classmethod
@Route.post(
r"/projects/{project_id}/docker/images",
r"/projects/{project_id}/docker/vms",
parameters={
"project_id": "UUID for the project"
},
@ -61,11 +64,12 @@ class DockerHandler:
container = yield from docker_manager.create_vm(
request.json.pop("name"),
request.match_info["project_id"],
request.json.get("id"),
image=request.json.pop("imagename"),
startcmd=request.json.get("startcmd")
request.json.get("vm_id"),
image=request.json.pop("image"),
start_command=request.json.get("start_command"),
environment=request.json.get("environment"),
adapters=request.json.get("adapters")
)
# FIXME: DO WE NEED THIS?
for name, value in request.json.items():
if name != "_vm_id":
if hasattr(container, name) and getattr(container, name) != value:
@ -76,7 +80,7 @@ class DockerHandler:
@classmethod
@Route.post(
r"/projects/{project_id}/docker/images/{id}/start",
r"/projects/{project_id}/docker/vms/{id}/start",
parameters={
"project_id": "UUID of the project",
"id": "ID of the container"
@ -91,7 +95,7 @@ class DockerHandler:
output=DOCKER_OBJECT_SCHEMA)
def start(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_container(
container = docker_manager.get_vm(
request.match_info["id"],
project_id=request.match_info["project_id"])
yield from container.start()
@ -99,7 +103,7 @@ class DockerHandler:
@classmethod
@Route.post(
r"/projects/{project_id}/docker/images/{id}/stop",
r"/projects/{project_id}/docker/vms/{id}/stop",
parameters={
"project_id": "UUID of the project",
"id": "ID of the container"
@ -114,7 +118,7 @@ class DockerHandler:
output=DOCKER_OBJECT_SCHEMA)
def stop(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_container(
container = docker_manager.get_vm(
request.match_info["id"],
project_id=request.match_info["project_id"])
yield from container.stop()
@ -122,7 +126,7 @@ class DockerHandler:
@classmethod
@Route.post(
r"/projects/{project_id}/docker/images/{id}/reload",
r"/projects/{project_id}/docker/vms/{id}/reload",
parameters={
"project_id": "UUID of the project",
"id": "ID of the container"
@ -137,7 +141,7 @@ class DockerHandler:
output=DOCKER_OBJECT_SCHEMA)
def reload(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_container(
container = docker_manager.get_vm(
request.match_info["id"],
project_id=request.match_info["project_id"])
yield from container.restart()
@ -145,7 +149,7 @@ class DockerHandler:
@classmethod
@Route.delete(
r"/projects/{project_id}/docker/images/{id}",
r"/projects/{project_id}/docker/vms/{id}",
parameters={
"id": "ID for the container",
"project_id": "UUID for the project"
@ -158,7 +162,7 @@ class DockerHandler:
description="Delete a Docker container")
def delete(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_container(
container = docker_manager.get_vm(
request.match_info["id"],
project_id=request.match_info["project_id"])
yield from container.remove()
@ -166,7 +170,7 @@ class DockerHandler:
@classmethod
@Route.post(
r"/projects/{project_id}/docker/images/{id}/suspend",
r"/projects/{project_id}/docker/vms/{id}/suspend",
parameters={
"project_id": "UUID of the project",
"id": "ID of the container"
@ -181,14 +185,14 @@ class DockerHandler:
output=DOCKER_OBJECT_SCHEMA)
def suspend(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_container(
container = docker_manager.get_vm(
request.match_info["id"],
project_id=request.match_info["project_id"])
yield from container.pause()
response.set_status(204)
@Route.post(
r"/projects/{project_id}/docker/images/{id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
parameters={
"project_id": "UUID for the project",
"id": "ID of the container",
@ -205,8 +209,8 @@ class DockerHandler:
output=NIO_SCHEMA)
def create_nio(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_container(
request.match_info["id"],
container = docker_manager.get_vm(
request.match_info["vm_id"],
project_id=request.match_info["project_id"])
nio_type = request.json["type"]
if nio_type not in ("nio_udp"):
@ -217,14 +221,14 @@ class DockerHandler:
adapter = container._ethernet_adapters[
int(request.match_info["adapter_number"])
]
container.adapter_add_nio_binding(
yield from container.adapter_add_nio_binding(
int(request.match_info["adapter_number"]), nio)
response.set_status(201)
response.json(nio)
@classmethod
@Route.delete(
r"/projects/{project_id}/docker/images/{id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
r"/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio",
parameters={
"project_id": "UUID for the project",
"id": "ID of the container",
@ -239,9 +243,36 @@ class DockerHandler:
description="Remove a NIO from a Docker container")
def delete_nio(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_container(
request.match_info["id"],
container = docker_manager.get_vm(
request.match_info["vm_id"],
project_id=request.match_info["project_id"])
yield from container.adapter_remove_nio_binding(
int(request.match_info["adapter_number"]))
response.set_status(204)
@classmethod
@Route.put(
r"/projects/{project_id}/docker/vms/{vm_id}",
parameters={
"project_id": "UUID for the project",
"vm_id": "UUID for the instance"
},
status_codes={
200: "Instance updated",
400: "Invalid request",
404: "Instance doesn't exist",
409: "Conflict"
},
description="Update a Docker instance",
input=DOCKER_UPDATE_SCHEMA,
output=DOCKER_OBJECT_SCHEMA)
def update(request, response):
docker_manager = Docker.instance()
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
vm.name = request.json.get("name", vm.name)
vm.console = request.json.get("console", vm.console)
vm.start_command = request.json.get("start_command", vm.start_command)
vm.environment = request.json.get("environment", vm.environment)
yield from vm.update()
response.json(vm)

View File

@ -27,6 +27,9 @@ MODULES = [VPCS, VirtualBox, Dynamips, Qemu, VMware]
if sys.platform.startswith("linux") or hasattr(sys, "_called_from_test") or os.environ.get("PYTEST_BUILD_DOCUMENTATION") == "1":
from .docker import Docker
MODULES.append(Docker)
# IOU runs only on Linux but testsuite work on UNIX platform
if not sys.platform.startswith("win"):
from .iou import IOU

View File

@ -27,6 +27,7 @@ class EthernetAdapter(Adapter):
def __init__(self, interfaces=1):
super().__init__(interfaces)
self.host_ifc = None
def __str__(self):

View File

@ -22,51 +22,92 @@ Docker server module.
import asyncio
import logging
import aiohttp
import docker
from requests.exceptions import ConnectionError
import urllib
import json
log = logging.getLogger(__name__)
from ..base_manager import BaseManager
from ..project_manager import ProjectManager
from .docker_vm import Container
from .docker_vm import DockerVM
from .docker_error import DockerError
class Docker(BaseManager):
_VM_CLASS = Container
_VM_CLASS = DockerVM
def __init__(self):
super().__init__()
# FIXME: make configurable and start docker before trying
self._server_url = 'unix://var/run/docker.sock'
self._client = docker.Client(base_url=self._server_url)
self._execute_lock = asyncio.Lock()
@property
def server_url(self):
"""Returns the Docker server url.
:returns: url
:rtype: string
"""
return self._server_url
@server_url.setter
def server_url(self, value):
self._server_url = value
self._client = docker.Client(base_url=value)
self._server_url = '/var/run/docker.sock'
self._connector = aiohttp.connector.UnixConnector(self._server_url)
# Allow locking during ubridge operations
self.ubridge_lock = asyncio.Lock()
@asyncio.coroutine
def execute(self, command, kwargs, timeout=60):
command = getattr(self._client, command)
log.debug("Executing Docker with command: {}".format(command))
try:
result = command(**kwargs)
except Exception as error:
raise DockerError("Docker has returned an error: {}".format(error))
return result
def query(self, method, path, data={}, params={}):
"""
Make a query to the docker daemon and decode the request
:param method: HTTP method
:param path: Endpoint in API
:param data: Dictionnary with the body. Will be transformed to a JSON
:param params: Parameters added as a query arg
"""
response = yield from self.http_query(method, path, data=data, params=params)
body = yield from response.read()
if len(body):
body = json.loads(body.decode("utf-8"))
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
return body
@asyncio.coroutine
def http_query(self, method, path, data={}, params={}):
"""
Make a query to the docker daemon
:param method: HTTP method
:param path: Endpoint in API
:param data: Dictionnary with the body. Will be transformed to a JSON
:param params: Parameters added as a query arg
:returns: HTTP response
"""
data = json.dumps(data)
url = "http://docker/" + path
response = yield from aiohttp.request(
method,
url,
connector=self._connector,
params=params,
data=data,
headers={"content-type": "application/json", },
)
if response.status >= 300:
body = yield from response.read()
try:
body = json.loads(body.decode("utf-8"))["message"]
except ValueError:
pass
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
raise DockerError("Docker has returned an error: {}".format(body))
return response
@asyncio.coroutine
def websocket_query(self, path, params={}):
"""
Open a websocket connection
:param path: Endpoint in API
:param params: Parameters added as a query arg
:returns: Websocket
"""
url = "http://docker/" + path
connection = yield from aiohttp.ws_connect(url,
connector=self._connector,
origin="http://docker",
autoping=True)
return connection
@asyncio.coroutine
def list_images(self):
@ -76,44 +117,8 @@ class Docker(BaseManager):
:rtype: list
"""
images = []
try:
for image in self._client.images():
for tag in image['RepoTags']:
images.append({'imagename': tag})
return images
except ConnectionError as error:
raise DockerError(
"""Docker couldn't list images and returned an error: {}
Is the Docker service running?""".format(error))
@asyncio.coroutine
def list_containers(self):
"""Gets Docker container list.
:returns: list of dicts
:rtype: list
"""
return self._client.containers()
def get_container(self, cid, project_id=None):
"""Returns a Docker container.
:param id: Docker container identifier
:param project_id: Project identifier
:returns: Docker container
"""
if project_id:
project = ProjectManager.instance().get_project(project_id)
if cid not in self._vms:
raise aiohttp.web.HTTPNotFound(
text="Docker container with ID {} doesn't exist".format(cid))
container = self._vms[cid]
if project_id:
if container.project.id != project.id:
raise aiohttp.web.HTTPNotFound(
text="Project ID {} doesn't belong to container {}".format(
project_id, container.name))
return container
for image in (yield from self.query("GET", "images/json", params={"all": 0})):
for tag in image['RepoTags']:
if tag != "<none>:<none>":
images.append({'image': tag})
return sorted(images, key=lambda i: i['image'])

View File

@ -22,20 +22,22 @@ Docker container instance.
import asyncio
import shutil
import psutil
import shlex
import aiohttp
import json
from docker.utils import create_host_config
from gns3server.ubridge.hypervisor import Hypervisor
from pkg_resources import parse_version
from ...ubridge.hypervisor import Hypervisor
from .docker_error import DockerError
from ..base_vm import BaseVM
from ..adapters.ethernet_adapter import EthernetAdapter
from ..nios.nio_udp import NIOUDP
from ...utils.asyncio.telnet_server import AsyncioTelnetServer
import logging
log = logging.getLogger(__name__)
class Container(BaseVM):
class DockerVM(BaseVM):
"""Docker container implementation.
:param name: Docker container name
@ -45,18 +47,23 @@ class Container(BaseVM):
:param image: Docker image
"""
def __init__(self, name, vm_id, project, manager, image, startcmd=None):
self._name = name
self._id = vm_id
self._project = project
self._manager = manager
def __init__(self, name, vm_id, project, manager, image, console=None, start_command=None, adapters=None, environment=None):
super().__init__(name, vm_id, project, manager, console=console)
self._image = image
self._startcmd = startcmd
self._veths = []
self._start_command = start_command
self._environment = environment
self._cid = None
self._ethernet_adapters = []
self._ubridge_hypervisor = None
self._temporary_directory = None
self._hw_virtualization = False
self._telnet_server = None
self._closed = False
if adapters is None:
self.adapters = 1
else:
self.adapters = adapters
log.debug(
"{module}: {name} [{image}] initialized.".format(
@ -68,15 +75,30 @@ class Container(BaseVM):
return {
"name": self._name,
"vm_id": self._id,
"cid": self._cid,
"container_id": self._cid,
"project_id": self._project.id,
"image": self._image,
"adapters": self.adapters,
"console": self.console,
"start_command": self.start_command,
"environment": self.environment
}
@property
def veths(self):
"""Returns Docker host veth interfaces."""
return self._veths
def start_command(self):
return self._start_command
@start_command.setter
def start_command(self, command):
self._start_command = command
@property
def environment(self):
return self._environment
@environment.setter
def environment(self, command):
self._environment = command
@asyncio.coroutine
def _get_container_state(self):
@ -85,74 +107,53 @@ class Container(BaseVM):
:returns: state
:rtype: str
"""
try:
result = yield from self.manager.execute(
"inspect_container", {"container": self._cid})
result_dict = {state.lower(): value for state, value in result["State"].items()}
for state, value in result_dict.items():
if value is True:
# a container can be both paused and running
if state == "paused":
return "paused"
if state == "running":
if "paused" in result_dict and result_dict["paused"] is True:
return "paused"
return state.lower()
return 'exited'
except Exception as err:
raise DockerError("Could not get container state for {0}: ".format(
self._name), str(err))
result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid))
if result["State"]["Paused"]:
return "paused"
if result["State"]["Running"]:
return "running"
return "exited"
@asyncio.coroutine
def create(self):
"""Creates the Docker container."""
params = {
"name": self._name,
"image": self._image,
"network_disabled": True,
"host_config": create_host_config(
privileged=True, cap_add=['ALL'])
"Name": self._name,
"Image": self._image,
"NetworkDisabled": True,
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig": {
"CapAdd": ["ALL"],
"Privileged": True
}
}
if self._startcmd:
params.update({'command': self._startcmd})
if self._start_command:
params.update({"Cmd": shlex.split(self._start_command)})
result = yield from self.manager.execute("create_container", params)
if self._environment:
params.update({"Env": [e.strip() for e in self._environment.split("\n")]})
images = [i["image"] for i in (yield from self.manager.list_images())]
if self._image not in images:
log.info("Image %s is missing pulling it from docker hub", self._image)
yield from self.pull_image(self._image)
result = yield from self.manager.query("POST", "containers/create", data=params)
self._cid = result['Id']
log.info("Docker container '{name}' [{id}] created".format(
name=self._name, id=self._id))
return True
@property
def ubridge_path(self):
"""Returns the uBridge executable path.
:returns: path to uBridge
"""
path = self._manager.config.get_section_config("Server").get(
"ubridge_path", "ubridge")
if path == "ubridge":
path = shutil.which("ubridge")
return path
@asyncio.coroutine
def _start_ubridge(self):
"""Starts uBridge (handles connections to and from this Docker VM)."""
server_config = self._manager.config.get_section_config("Server")
server_host = server_config.get("host")
self._ubridge_hypervisor = Hypervisor(
self._project, self.ubridge_path, self.working_dir, server_host)
log.info("Starting new uBridge hypervisor {}:{}".format(
self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
yield from self._ubridge_hypervisor.start()
log.info("Hypervisor {}:{} has successfully started".format(
self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
yield from self._ubridge_hypervisor.connect()
if parse_version(
self._ubridge_hypervisor.version) < parse_version('0.9.1'):
raise DockerError(
"uBridge version must be >= 0.9.1, detected version is {}".format(
self._ubridge_hypervisor.version))
def update(self):
"""
Destroy an recreate the container with the new settings
"""
yield from self.remove()
yield from self.create()
@asyncio.coroutine
def start(self):
@ -162,17 +163,68 @@ class Container(BaseVM):
if state == "paused":
yield from self.unpause()
else:
result = yield from self.manager.execute(
"start", {"container": self._cid})
result = yield from self.manager.query("POST", "containers/{}/start".format(self._cid))
yield from self._start_ubridge()
for adapter_number in range(0, self.adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
yield from self._add_ubridge_connection(nio, adapter_number)
yield from self._start_ubridge()
for adapter_number in range(0, self.adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
with (yield from self.manager.ubridge_lock):
yield from self._add_ubridge_connection(nio, adapter_number)
log.info("Docker container '{name}' [{image}] started".format(
name=self._name, image=self._image))
yield from self._start_console()
self.status = "started"
log.info("Docker container '{name}' [{image}] started listen for telnet on {console}".format(name=self._name, image=self._image, console=self._console))
@asyncio.coroutine
def _start_console(self):
"""
Start streaming the console via telnet
"""
class InputStream:
def __init__(self):
self._data = b""
def write(self, data):
self._data += data
@asyncio.coroutine
def drain(self):
if not self.ws.closed:
self.ws.send_bytes(self._data)
self._data = b""
output_stream = asyncio.StreamReader()
input_stream = InputStream()
telnet = AsyncioTelnetServer(reader=output_stream, writer=input_stream)
self._telnet_server = yield from asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self._console)
ws = yield from self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid))
input_stream.ws = ws
output_stream.feed_data(self.name.encode() + b" console is now available... Press RETURN to get started.\r\n")
asyncio.async(self._read_console_output(ws, output_stream))
@asyncio.coroutine
def _read_console_output(self, ws, out):
"""
Read websocket and forward it to the telnet
:params ws: Websocket connection
:param out: Output stream
"""
while True:
msg = yield from ws.receive()
if msg.tp == aiohttp.MsgType.text:
out.feed_data(msg.data.encode())
else:
out.feed_eof()
ws.close()
break
def is_running(self):
"""Checks if the container is running.
@ -187,9 +239,8 @@ class Container(BaseVM):
@asyncio.coroutine
def restart(self):
"""Restarts this Docker container."""
result = yield from self.manager.execute(
"restart", {"container": self._cid})
"""Restart this Docker container."""
yield from self.manager.query("POST", "containers/{}/restart".format(self._cid))
log.info("Docker container '{name}' [{image}] restarted".format(
name=self._name, image=self._image))
@ -203,27 +254,30 @@ class Container(BaseVM):
state = yield from self._get_container_state()
if state == "paused":
yield from self.unpause()
result = yield from self.manager.execute(
"kill", {"container": self._cid})
if self._telnet_server:
self._telnet_server.close()
self._telnet_server = None
# t=5 number of seconds to wait before killing the container
yield from self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5})
log.info("Docker container '{name}' [{image}] stopped".format(
name=self._name, image=self._image))
@asyncio.coroutine
def pause(self):
"""Pauses this Docker container."""
result = yield from self.manager.execute(
"pause", {"container": self._cid})
yield from self.manager.query("POST", "containers/{}/pause".format(self._cid))
log.info("Docker container '{name}' [{image}] paused".format(
name=self._name, image=self._image))
self.status = "paused"
@asyncio.coroutine
def unpause(self):
"""Unpauses this Docker container."""
result = yield from self.manager.execute(
"unpause", {"container": self._cid})
state = yield from self._get_container_state()
yield from self.manager.query("POST", "containers/{}/unpause".format(self._cid))
log.info("Docker container '{name}' [{image}] unpaused".format(
name=self._name, image=self._image))
self.status = "started"
@asyncio.coroutine
def remove(self):
@ -233,17 +287,30 @@ class Container(BaseVM):
yield from self.unpause()
if state == "running":
yield from self.stop()
result = yield from self.manager.execute(
"remove_container", {"container": self._cid, "force": True})
yield from self.manager.query("DELETE", "containers/{}".format(self._cid), params={"force": 1})
log.info("Docker container '{name}' [{image}] removed".format(
name=self._name, image=self._image))
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
for adapter in self._ethernet_adapters:
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
@asyncio.coroutine
def close(self):
"""Closes this Docker container."""
if self._closed:
return
log.debug("Docker container '{name}' [{id}] is closing".format(
name=self.name, id=self._cid))
for adapter in self._ethernet_adapters.values():
for adapter in self._ethernet_adapters:
if adapter is not None:
for nio in adapter.ports.values():
if nio and isinstance(nio, NIOUDP):
@ -256,6 +323,7 @@ class Container(BaseVM):
name=self.name, id=self._cid))
self._closed = True
@asyncio.coroutine
def _add_ubridge_connection(self, nio, adapter_number):
"""
Creates a connection in uBridge.
@ -267,8 +335,7 @@ class Container(BaseVM):
adapter = self._ethernet_adapters[adapter_number]
except IndexError:
raise DockerError(
"Adapter {adapter_number} doesn't exist on Docker container '{name}'".format(
name=self.name, adapter_number=adapter_number))
"Adapter {adapter_number} doesn't exist on Docker container '{name}'".format(name=self.name, adapter_number=adapter_number))
if nio and isinstance(nio, NIOUDP):
for index in range(128):
@ -279,18 +346,20 @@ class Container(BaseVM):
break
if not hasattr(adapter, "ifc"):
raise DockerError(
"Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'".format(
"Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(
name=self.name, adapter_number=adapter_number))
else:
raise ValueError("Invalid NIO")
yield from self._ubridge_hypervisor.send(
'docker create_veth {hostif} {guestif}'.format(
guestif=adapter.guest_ifc, hostif=adapter.host_ifc))
self._veths.append(adapter.host_ifc)
namespace = yield from self.get_namespace()
namespace = yield from self._get_namespace()
log.debug("Move container %s adapter %s to namespace %s", self.name, adapter.guest_ifc, namespace)
yield from self._ubridge_hypervisor.send(
'docker move_to_ns {ifc} {ns}'.format(
ifc=adapter.guest_ifc, ns=namespace))
'docker move_to_ns {ifc} {ns} eth{adapter}'.format(
ifc=adapter.guest_ifc, ns=namespace, adapter=adapter_number))
yield from self._ubridge_hypervisor.send(
'bridge create bridge{}'.format(adapter_number))
@ -321,9 +390,14 @@ class Container(BaseVM):
name=adapter_number))
adapter = self._ethernet_adapters[adapter_number]
yield from self._ubridge_hypervisor.send("docker delete_veth {name}".format(
name=adapter.host_ifc))
yield from self._ubridge_hypervisor.send('docker delete_veth {hostif} {guestif}'.format(guestif=adapter.guest_ifc, hostif=adapter.host_ifc))
@asyncio.coroutine
def _get_namespace(self):
result = yield from self.manager.query("GET", "containers/{}/json".format(self._cid))
return int(result['State']['Pid'])
@asyncio.coroutine
def adapter_add_nio_binding(self, adapter_number, nio):
"""Adds an adapter NIO binding.
@ -345,6 +419,7 @@ class Container(BaseVM):
nio=nio,
adapter_number=adapter_number))
@asyncio.coroutine
def adapter_remove_nio_binding(self, adapter_number):
"""
Removes an adapter NIO binding.
@ -361,10 +436,7 @@ class Container(BaseVM):
name=self.name, adapter_number=adapter_number))
adapter.remove_nio(0)
try:
yield from self._delete_ubridge_connection(adapter_number)
except:
pass
yield from self._delete_ubridge_connection(adapter_number)
log.info(
"Docker VM '{name}' [{id}]: {nio} removed from adapter {adapter_number}".format(
@ -397,7 +469,28 @@ class Container(BaseVM):
id=self._id,
adapters=adapters))
def get_namespace(self):
result = yield from self.manager.execute(
"inspect_container", {"container": self._cid})
return int(result['State']['Pid'])
@asyncio.coroutine
def pull_image(self, image):
"""
Pull image from docker repository
"""
log.info("Pull %s from docker hub", image)
response = yield from self.manager.http_query("POST", "images/create", params={"fromImage": image})
# The pull api will stream status via an HTTP JSON stream
content = ""
while True:
chunk = yield from response.content.read(1024)
if not chunk:
break
content += chunk.decode("utf-8")
try:
while True:
content = content.lstrip(" \r\n\t")
answer, index = json.JSONDecoder().raw_decode(content)
if "progress" in answer:
self.project.emit("log.info", {"message": "Pulling image {}:{}: {}".format(self._image, answer["id"], answer["progress"])})
content = content[index:]
except ValueError: # Partial JSON
pass
self.project.emit("log.info", {"message": "Success pulling image {}".format(self._image)})

View File

@ -23,58 +23,26 @@ DOCKER_CREATE_SCHEMA = {
"properties": {
"vm_id": {
"description": "Docker VM instance identifier",
"oneOf": [
{"type": "string",
"minLength": 36,
"maxLength": 36,
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"},
{"type": "integer"} # for legacy projects
]
"type": "string",
"minLength": 36,
"maxLength": 36,
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
},
"name": {
"description": "Docker container name",
"type": "string",
"minLength": 1,
},
"startcmd": {
"description": "Docker CMD entry",
"type": "string",
"minLength": 1,
},
"imagename": {
"description": "Docker image name",
"type": "string",
"minLength": 1,
},
"adapters": {
"description": "number of adapters",
"type": "integer",
"minimum": 0,
"maximum": 64,
},
"adapter_type": {
"description": "Docker adapter type",
"type": "string",
"minLength": 1,
},
"console": {
"description": "console name",
"type": "string",
"minLength": 1,
"description": "console TCP port",
"minimum": 1,
"maximum": 65535,
"type": ["integer", "null"]
},
},
"additionalProperties": False,
}
DOCKER_UPDATE_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Request validation to update a Docker container",
"type": "object",
"properties": {
"name": {
"description": "Docker container name",
"type": "string",
"minLength": 1,
"start_command": {
"description": "Docker CMD entry",
"type": ["string", "null"],
"minLength": 0,
},
"image": {
"description": "Docker image name",
@ -83,32 +51,50 @@ DOCKER_UPDATE_SCHEMA = {
},
"adapters": {
"description": "number of adapters",
"type": "integer",
"type": ["integer", "null"],
"minimum": 0,
"maximum": 64,
},
"adapter_type": {
"description": "Docker adapter type",
"type": "string",
"minLength": 1,
"maximum": 99,
},
"environment": {
"description": "Docker environment",
"type": ["string", "null"],
"minLength": 0,
}
},
"additionalProperties": False,
}
DOCKER_CAPTURE_SCHEMA = {
DOCKER_UPDATE_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Request validation to start a packet capture on a Docker container port",
"description": "Request validation to create a new Docker container",
"type": "object",
"properties": {
"capture_file_name": {
"description": "Capture file name",
"name": {
"description": "Docker container name",
"type": "string",
"minLength": 1,
},
"console": {
"description": "console TCP port",
"minimum": 1,
"maximum": 65535,
"type": ["integer", "null"]
},
"start_command": {
"description": "Docker CMD entry",
"type": ["string", "null"],
"minLength": 0,
},
"environment": {
"description": "Docker environment",
"type": ["string", "null"],
"minLength": 0,
}
},
"additionalProperties": False,
"required": ["capture_file_name"]
}
DOCKER_OBJECT_SCHEMA = {
@ -128,12 +114,18 @@ DOCKER_OBJECT_SCHEMA = {
"maxLength": 36,
"pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$"
},
"cid": {
"console": {
"description": "console TCP port",
"minimum": 1,
"maximum": 65535,
"type": ["integer", "null"]
},
"container_id": {
"description": "Docker container ID",
"type": "string",
"minLength": 64,
"minLength": 12,
"maxLength": 64,
"pattern": "^[a-zA-Z0-9_.-]{64}$"
"pattern": "^[a-f0-9]+$"
},
"project_id": {
"description": "Project UUID",
@ -149,16 +141,40 @@ DOCKER_OBJECT_SCHEMA = {
},
"adapters": {
"description": "number of adapters",
"type": "integer",
"type": ["integer", "null"],
"minimum": 0,
"maximum": 64,
"maximum": 99,
},
"adapter_type": {
"description": "Docker adapter type",
"type": "string",
"minLength": 1,
"start_command": {
"description": "Docker CMD entry",
"type": ["string", "null"],
"minLength": 0,
},
"environment": {
"description": "Docker environment",
"type": ["string", "null"],
"minLength": 0,
}
},
"additionalProperties": False,
"required": ["vm_id", "project_id"]
"required": ["vm_id", "project_id", "image", "container_id", "adapters", "console", "start_command", "environment"]
}
DOCKER_LIST_IMAGES_SCHEMA = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Docker list of images",
"type": "array",
"items": [
{
"type": "object",
"properties": {
"image": {
"description": "Docker image name",
"type": "string",
"minLength": 1
}
}
}
]
}

View File

@ -200,7 +200,7 @@ class Server:
"""
logger = logging.getLogger("asyncio")
logger.setLevel(logging.WARNING)
logger.setLevel(logging.ERROR)
server_config = Config.instance().get_section_config("Server")
if sys.platform.startswith("win"):
@ -249,6 +249,9 @@ class Server:
if server_config.getboolean("shell"):
asyncio.async(self.start_shell())
from gns3server.modules.docker import Docker
asyncio.async(Docker.instance().query("GET", "info"))
try:
self._loop.run_forever()
except TypeError as e:

View File

@ -225,4 +225,6 @@ class Hypervisor(UBridgeHypervisor):
command = [self._path]
command.extend(["-H", "{}:{}".format(self._host, self._port)])
if log.getEffectiveLevel() == logging.DEBUG:
command.extend(["-d", "2"])
return command

View File

@ -0,0 +1,227 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import asyncio.subprocess
import logging
log = logging.getLogger(__name__)
# Mostly from https://code.google.com/p/miniboa/source/browse/trunk/miniboa/telnet.py
# Telnet Commands
SE = 240 # End of sub-negotiation parameters
NOP = 241 # No operation
DATMK = 242 # Data stream portion of a sync.
BREAK = 243 # NVT Character BRK
IP = 244 # Interrupt Process
AO = 245 # Abort Output
AYT = 246 # Are you there
EC = 247 # Erase Character
EL = 248 # Erase Line
GA = 249 # The Go Ahead Signal
SB = 250 # Sub-option to follow
WILL = 251 # Will; request or confirm option begin
WONT = 252 # Wont; deny option request
DO = 253 # Do = Request or confirm remote option
DONT = 254 # Don't = Demand or confirm option halt
IAC = 255 # Interpret as Command
SEND = 1 # Sub-process negotiation SEND command
IS = 0 # Sub-process negotiation IS command
# Telnet Options
BINARY = 0 # Transmit Binary
ECHO = 1 # Echo characters back to sender
RECON = 2 # Reconnection
SGA = 3 # Suppress Go-Ahead
TMARK = 6 # Timing Mark
TTYPE = 24 # Terminal Type
NAWS = 31 # Negotiate About Window Size
LINEMO = 34 # Line Mode
READ_SIZE = 1024
class AsyncioTelnetServer:
def __init__(self, reader=None, writer=None):
self._reader = reader
self._writer = writer
self._clients = set()
self._lock = asyncio.Lock()
self._reader_process = None
self._current_read = None
@asyncio.coroutine
def run(self, network_reader, network_writer):
# Keep track of connected clients
self._clients.add(network_writer)
try:
# Send initial telnet session opening
network_writer.write(bytes([IAC, WILL, ECHO,
IAC, WILL, SGA,
IAC, WILL, BINARY,
IAC, DO, BINARY]))
yield from network_writer.drain()
yield from self._process(network_reader, network_writer)
except ConnectionResetError:
with (yield from self._lock):
if self._reader_process == network_reader:
self._reader_process = None
# Cancel current read from this reader
self._current_read.cancel()
self._clients.remove(network_writer)
@asyncio.coroutine
def _get_reader(self, network_reader):
"""
Get a reader or None if another reader is already reading.
"""
with (yield from self._lock):
if self._reader_process is None:
self._reader_process = network_reader
if self._reader_process == network_reader:
self._current_read = asyncio.async(self._reader.read(READ_SIZE))
return self._current_read
return None
@asyncio.coroutine
def _process(self, network_reader, network_writer):
network_read = asyncio.async(network_reader.read(READ_SIZE))
reader_read = yield from self._get_reader(network_reader)
while True:
if reader_read is None:
reader_read = yield from self._get_reader(network_reader)
if reader_read is None:
done, pending = yield from asyncio.wait(
[
network_read,
],
timeout=1,
return_when=asyncio.FIRST_COMPLETED)
else:
done, pending = yield from asyncio.wait(
[
network_read,
reader_read
],
return_when=asyncio.FIRST_COMPLETED)
for coro in done:
data = coro.result()
if coro == network_read:
network_read = asyncio.async(network_reader.read(READ_SIZE))
if IAC in data:
data = yield from self._IAC_parser(data, network_reader, network_writer)
if self._writer:
self._writer.write(data)
yield from self._writer.drain()
elif coro == reader_read:
reader_read = yield from self._get_reader(network_reader)
# Replicate the output on all clients
for writer in self._clients:
writer.write(data)
yield from writer.drain()
def _IAC_parser(self, buf, network_reader, network_writer):
"""
Processes and removes any Telnet commands from the buffer.
:param buf: buffer
:returns: buffer minus Telnet commands
"""
skip_to = 0
while True:
# Locate an IAC to process
iac_loc = buf.find(IAC, skip_to)
if iac_loc < 0:
break
# Get the TELNET command
iac_cmd = bytearray([IAC])
try:
iac_cmd.append(buf[iac_loc + 1])
except IndexError:
d = yield from network_reader.read(1)
buf.extend(d)
iac_cmd.append(buf[iac_loc + 1])
# Is this just a 2-byte TELNET command?
if iac_cmd[1] not in [WILL, WONT, DO, DONT]:
if iac_cmd[1] == AYT:
log.debug("Telnet server received Are-You-There (AYT)")
network_writer.write(b'\r\nYour Are-You-There received. I am here.\r\n')
elif iac_cmd[1] == IAC:
# It's data, not an IAC
iac_cmd.pop()
# This prevents the 0xff from being
# interrupted as yet another IAC
skip_to = iac_loc + 1
log.debug("Received IAC IAC")
elif iac_cmd[1] == NOP:
pass
else:
log.debug("Unhandled telnet command: "
"{0:#x} {1:#x}".format(*iac_cmd))
# This must be a 3-byte TELNET command
else:
try:
iac_cmd.append(buf[iac_loc + 2])
except IndexError:
d = yield from network_reader.read(1)
buf.extend(d)
iac_cmd.append(buf[iac_loc + 2])
# We do ECHO, SGA, and BINARY. Period.
if iac_cmd[1] == DO and iac_cmd[2] not in [ECHO, SGA, BINARY]:
network_writer.write(bytes([IAC, WONT, iac_cmd[2]]))
log.debug("Telnet WON'T {:#x}".format(iac_cmd[2]))
else:
log.debug("Unhandled telnet command: "
"{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
# Remove the entire TELNET command from the buffer
buf = buf.replace(iac_cmd, b'', 1)
yield from network_writer.drain()
# Return the new copy of the buffer, minus telnet commands
return buf
if __name__ == '__main__':
loop = asyncio.get_event_loop()
process = loop.run_until_complete(asyncio.async(asyncio.subprocess.create_subprocess_exec("bash",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
stdin=asyncio.subprocess.PIPE)))
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin)
coro = asyncio.start_server(server.run, '127.0.0.1', 2222, loop=loop)
s = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
s.close()
loop.run_until_complete(s.wait_closed())
loop.close()

View File

@ -1,6 +1,4 @@
jsonschema>=2.4.0
aiohttp==0.17.4
aiohttp==0.19.0
Jinja2>=2.7.3
raven>=5.2.0
docker-py==1.4.0
psutil>=3.0.0

View File

@ -0,0 +1,125 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import os
import stat
import sys
import uuid
import aiohttp
from tests.utils import asyncio_patch
from unittest.mock import patch, MagicMock, PropertyMock
@pytest.fixture
def base_params():
"""Return standard parameters"""
return {"name": "PC TEST 1", "image": "nginx", "start_command": "nginx-daemon", "adapters": 2, "environment": "YES=1\nNO=0"}
@pytest.fixture
def vm(server, project, base_params):
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
response = server.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
if response.status != 201:
print(response.body)
assert response.status == 201
return response.json
def test_docker_create(server, project, base_params):
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "nginx"}]) as mock_list:
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value={"Id": "8bd8153ea8f5"}) as mock:
response = server.post("/projects/{project_id}/docker/vms".format(project_id=project.id), base_params)
assert response.status == 201
assert response.route == "/projects/{project_id}/docker/vms"
assert response.json["name"] == "PC TEST 1"
assert response.json["project_id"] == project.id
assert response.json["container_id"] == "8bd8153ea8f5"
assert response.json["image"] == "nginx"
assert response.json["adapters"] == 2
assert response.json["environment"] == "YES=1\nNO=0"
def test_docker_start(server, vm):
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.start", return_value=True) as mock:
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/start".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
assert mock.called
assert response.status == 204
def test_docker_stop(server, vm):
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.stop", return_value=True) as mock:
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/stop".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
assert mock.called
assert response.status == 204
def test_docker_reload(server, vm):
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.restart", return_value=True) as mock:
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/reload".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
assert mock.called
assert response.status == 204
def test_docker_delete(server, vm):
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.remove", return_value=True) as mock:
response = server.delete("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
assert mock.called
assert response.status == 204
def test_docker_reload(server, vm):
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.pause", return_value=True) as mock:
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/suspend".format(project_id=vm["project_id"], vm_id=vm["vm_id"]))
assert mock.called
assert response.status == 204
def test_docker_nio_create_udp(server, vm):
response = server.post("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"},
example=True)
assert response.status == 201
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
assert response.json["type"] == "nio_udp"
def test_docker_delete_nio(server, vm):
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.adapter_remove_nio_binding") as mock:
response = server.delete("/projects/{project_id}/docker/vms/{vm_id}/adapters/0/ports/0/nio".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), example=True)
assert response.status == 204
assert response.route == "/projects/{project_id}/docker/vms/{vm_id}/adapters/{adapter_number:\d+}/ports/{port_number:\d+}/nio"
def test_docker_update(server, vm, tmpdir, free_console_port):
with asyncio_patch("gns3server.modules.docker.docker_vm.DockerVM.update") as mock:
response = server.put("/projects/{project_id}/docker/vms/{vm_id}".format(project_id=vm["project_id"], vm_id=vm["vm_id"]), {"name": "test",
"console": free_console_port,
"start_command": "yes",
"environment": "GNS3=1\nGNS4=0"},
example=True)
assert mock.called
assert response.status == 200
assert response.json["name"] == "test"
assert response.json["console"] == free_console_port
assert response.json["start_command"] == "yes"
assert response.json["environment"] == "GNS3=1\nGNS4=0"

View File

@ -0,0 +1,130 @@
#!/usr/bin/env python
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import asyncio
from unittest.mock import MagicMock
from tests.utils import asyncio_patch
from gns3server.modules.docker import Docker
from gns3server.modules.docker.docker_error import DockerError
def test_query_success(loop):
vm = Docker()
response = MagicMock()
response.status = 200
@asyncio.coroutine
def read():
return b'{"c": false}'
response.read.side_effect = read
with asyncio_patch("aiohttp.request", return_value=response) as mock:
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
mock.assert_called_with('POST',
'http://docker/test',
connector=vm._connector,
data='{"a": true}',
headers={'content-type': 'application/json'},
params={'b': 1})
assert data == {"c": False}
def test_query_error(loop):
vm = Docker()
response = MagicMock()
response.status = 404
@asyncio.coroutine
def read():
return b"NOT FOUND"
response.read.side_effect = read
with asyncio_patch("aiohttp.request", return_value=response) as mock:
with pytest.raises(DockerError):
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
mock.assert_called_with('POST',
'http://docker/test',
connector=vm._connector,
data='{"a": true}',
headers={'content-type': 'application/json'},
params={'b': 1})
def test_query_error_json(loop):
vm = Docker()
response = MagicMock()
response.status = 404
@asyncio.coroutine
def read():
return b'{"message": "Error"}'
response.read.side_effect = read
with asyncio_patch("aiohttp.request", return_value=response) as mock:
with pytest.raises(DockerError):
data = loop.run_until_complete(asyncio.async(vm.query("POST", "test", data={"a": True}, params={"b": 1})))
mock.assert_called_with('POST',
'http://docker/test',
connector=vm._connector,
data='{"a": true}',
headers={'content-type': 'application/json'},
params={'b': 1})
def test_list_images(loop):
response = [
{
"RepoTags": [
"ubuntu:12.04",
"ubuntu:precise",
"ubuntu:latest"
],
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Created": 1365714795,
"Size": 131506275,
"VirtualSize": 131506275
},
{
"RepoTags": [
"ubuntu:12.10",
"ubuntu:quantal",
"<none>:<none>"
],
"ParentId": "27cf784147099545",
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Created": 1364102658,
"Size": 24653,
"VirtualSize": 180116135
}
]
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
images = loop.run_until_complete(asyncio.async(Docker.instance().list_images()))
mock.assert_called_with("GET", "images/json", params={"all": 0})
assert images == [
{"image": "ubuntu:12.04"},
{"image": "ubuntu:precise"},
{"image": "ubuntu:latest"},
{"image": "ubuntu:12.10"},
{"image": "ubuntu:quantal"}
]

View File

@ -0,0 +1,535 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import uuid
import asyncio
from tests.utils import asyncio_patch
from gns3server.modules.docker.docker_vm import DockerVM
from gns3server.modules.docker.docker_error import DockerError
from gns3server.modules.docker import Docker
from unittest.mock import patch, MagicMock, PropertyMock, call
from gns3server.config import Config
@pytest.fixture(scope="module")
def manager(port_manager):
m = Docker.instance()
m.port_manager = port_manager
return m
@pytest.fixture(scope="function")
def vm(project, manager):
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu")
vm._cid = "e90e34656842"
return vm
def test_json(vm, project):
assert vm.__json__() == {
'container_id': 'e90e34656842',
'image': 'ubuntu',
'name': 'test',
'project_id': project.id,
'vm_id': vm.id,
'adapters': 1,
'console': vm.console,
'start_command': vm.start_command,
'environment': vm.environment
}
def test_create(loop, project, manager):
response = {
"Id": "e90e34656806",
"Warnings": []
}
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu")
loop.run_until_complete(asyncio.async(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Privileged": True
},
"NetworkDisabled": True,
"Name": "test",
"Image": "ubuntu"
})
assert vm._cid == "e90e34656806"
def test_create_start_cmd(loop, project, manager):
response = {
"Id": "e90e34656806",
"Warnings": []
}
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu")
vm._start_command = "/bin/ls"
loop.run_until_complete(asyncio.async(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Privileged": True
},
"Cmd": ["/bin/ls"],
"NetworkDisabled": True,
"Name": "test",
"Image": "ubuntu"
})
assert vm._cid == "e90e34656806"
def test_create_environment(loop, project, manager):
response = {
"Id": "e90e34656806",
"Warnings": []
}
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu")
vm.environment = "YES=1\nNO=0"
loop.run_until_complete(asyncio.async(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Privileged": True
},
"Env": [
"YES=1",
"NO=0"
],
"NetworkDisabled": True,
"Name": "test",
"Image": "ubuntu"
})
assert vm._cid == "e90e34656806"
def test_create_image_not_available(loop, project, manager):
response = {
"Id": "e90e34656806",
"Warnings": []
}
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[]) as mock_list_images:
with asyncio_patch("gns3server.modules.docker.DockerVM.pull_image", return_value=True) as mock_pull:
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu")
loop.run_until_complete(asyncio.async(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Privileged": True
},
"NetworkDisabled": True,
"Name": "test",
"Image": "ubuntu"
})
assert vm._cid == "e90e34656806"
mock_pull.assert_called_with("ubuntu")
def test_get_container_state(loop, vm):
response = {
"State": {
"Error": "",
"ExitCode": 9,
"FinishedAt": "2015-01-06T15:47:32.080254511Z",
"OOMKilled": False,
"Paused": False,
"Pid": 0,
"Restarting": False,
"Running": True,
"StartedAt": "2015-01-06T15:47:32.072697474Z"
}
}
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "running"
response["State"]["Running"] = False
response["State"]["Paused"] = True
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "paused"
response["State"]["Running"] = False
response["State"]["Paused"] = False
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
assert loop.run_until_complete(asyncio.async(vm._get_container_state())) == "exited"
def test_is_running(loop, vm):
response = {
"State": {
"Running": False,
"Paused": False
}
}
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
assert loop.run_until_complete(asyncio.async(vm.is_running())) is False
response["State"]["Running"] = True
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock:
assert loop.run_until_complete(asyncio.async(vm.is_running())) is True
def test_pause(loop, vm):
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
loop.run_until_complete(asyncio.async(vm.pause()))
mock.assert_called_with("POST", "containers/e90e34656842/pause")
assert vm.status == "paused"
def test_unpause(loop, vm):
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
loop.run_until_complete(asyncio.async(vm.unpause()))
mock.assert_called_with("POST", "containers/e90e34656842/unpause")
def test_start(loop, vm, manager, free_console_port):
assert vm.status != "started"
vm.adapters = 1
nio = manager.create_nio(0, {"type": "nio_udp", "lport": free_console_port, "rport": free_console_port, "rhost": "127.0.0.1"})
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
with asyncio_patch("gns3server.modules.docker.DockerVM._start_ubridge") as mock_start_ubridge:
with asyncio_patch("gns3server.modules.docker.DockerVM._add_ubridge_connection") as mock_add_ubridge_connection:
with asyncio_patch("gns3server.modules.docker.DockerVM._start_console") as mock_start_console:
loop.run_until_complete(asyncio.async(vm.start()))
mock_query.assert_called_with("POST", "containers/e90e34656842/start")
mock_add_ubridge_connection.assert_called_once_with(nio, 0)
assert mock_start_ubridge.called
assert mock_start_console.called
assert vm.status == "started"
def test_start_unpause(loop, vm, manager, free_console_port):
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="paused"):
with asyncio_patch("gns3server.modules.docker.DockerVM.unpause", return_value="paused") as mock:
loop.run_until_complete(asyncio.async(vm.start()))
assert mock.called
assert vm.status == "started"
def test_restart(loop, vm):
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock:
loop.run_until_complete(asyncio.async(vm.restart()))
mock.assert_called_with("POST", "containers/e90e34656842/restart")
def test_stop(loop, vm):
vm._ubridge_hypervisor = MagicMock()
vm._ubridge_hypervisor.is_running.return_value = True
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="running"):
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
loop.run_until_complete(asyncio.async(vm.stop()))
mock_query.assert_called_with("POST", "containers/e90e34656842/stop", params={"t": 5})
assert vm._ubridge_hypervisor.stop.called
def test_stop_paused_container(loop, vm):
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="paused"):
with asyncio_patch("gns3server.modules.docker.DockerVM.unpause") as mock_unpause:
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
loop.run_until_complete(asyncio.async(vm.stop()))
mock_query.assert_called_with("POST", "containers/e90e34656842/stop", params={"t": 5})
assert mock_unpause.called
def test_update(loop, vm):
response = {
"Id": "e90e34656806",
"Warnings": []
}
with asyncio_patch("gns3server.modules.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
loop.run_until_complete(asyncio.async(vm.update()))
mock_query.assert_any_call("DELETE", "containers/e90e34656842", params={"force": 1})
mock_query.assert_any_call("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Privileged": True
},
"NetworkDisabled": True,
"Name": "test",
"Image": "ubuntu"
})
def test_remove(loop, vm):
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="stopped"):
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
loop.run_until_complete(asyncio.async(vm.remove()))
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1})
def test_remove_paused(loop, vm):
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="paused"):
with asyncio_patch("gns3server.modules.docker.DockerVM.unpause") as mock_unpause:
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
loop.run_until_complete(asyncio.async(vm.remove()))
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1})
assert mock_unpause.called
def test_remove_running(loop, vm):
with asyncio_patch("gns3server.modules.docker.DockerVM._get_container_state", return_value="running"):
with asyncio_patch("gns3server.modules.docker.DockerVM.stop") as mock_stop:
with asyncio_patch("gns3server.modules.docker.Docker.query") as mock_query:
loop.run_until_complete(asyncio.async(vm.remove()))
mock_query.assert_called_with("DELETE", "containers/e90e34656842", params={"force": 1})
assert mock_stop.called
def test_close(loop, vm, port_manager):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
with asyncio_patch("gns3server.modules.docker.DockerVM.remove") as mock_remove:
loop.run_until_complete(asyncio.async(vm.close()))
assert mock_remove.called
assert vm._closed is True
assert "4242" not in port_manager.udp_ports
def test_get_namespace(loop, vm):
response = {
"State": {
"Pid": 42
}
}
with asyncio_patch("gns3server.modules.docker.Docker.query", return_value=response) as mock_query:
assert loop.run_until_complete(asyncio.async(vm._get_namespace())) == 42
mock_query.assert_called_with("GET", "containers/e90e34656842/json")
def test_add_ubridge_connection(loop, vm):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
vm._ubridge_hypervisor = MagicMock()
with asyncio_patch("gns3server.modules.docker.DockerVM._get_namespace", return_value=42):
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0)))
calls = [
call.send("docker create_veth gns3-veth0ext gns3-veth0int"),
call.send('docker move_to_ns gns3-veth0int 42'),
call.send('bridge create bridge0'),
call.send('bridge add_nio_linux_raw bridge0 gns3-veth0ext'),
call.send('bridge add_nio_udp bridge0 4242 127.0.0.1 4343'),
call.send('bridge start bridge0')
]
# We need to check any_order ortherwise mock is confused by asyncio
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
def test_add_ubridge_connection(loop, vm):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
nio.startPacketCapture("/tmp/capture.pcap")
vm._ubridge_hypervisor = MagicMock()
with asyncio_patch("gns3server.modules.docker.DockerVM._get_namespace", return_value=42):
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0)))
calls = [
call.send("docker create_veth gns3-veth0ext gns3-veth0int"),
call.send('docker move_to_ns gns3-veth0int 0'),
call.send('bridge create bridge0'),
call.send('bridge add_nio_linux_raw bridge0 gns3-veth0ext'),
call.send('bridge add_nio_udp bridge0 4242 127.0.0.1 4343'),
call.send('bridge start_capture bridge0 "/tmp/capture.pcap"'),
call.send('bridge start bridge0')
]
# We need to check any_order ortherwise mock is confused by asyncio
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
def test_add_ubridge_connection_invalid_nio(loop, vm):
with pytest.raises(ValueError):
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection({}, 0)))
def test_add_ubridge_connection_invalid_adapter_number(loop, vm):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
with pytest.raises(DockerError):
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 12)))
def test_add_ubridge_connection_no_free_interface(loop, vm):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
with pytest.raises(DockerError):
# We create fake ethernet interfaces for docker
interfaces = ["gns3-veth{}ext".format(index) for index in range(128)]
with patch("psutil.net_if_addrs", return_value=interfaces):
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0)))
def test_delete_ubridge_connection(loop, vm):
vm._ubridge_hypervisor = MagicMock()
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
with asyncio_patch("gns3server.modules.docker.DockerVM._get_namespace", return_value=42):
loop.run_until_complete(asyncio.async(vm._add_ubridge_connection(nio, 0)))
loop.run_until_complete(asyncio.async(vm._delete_ubridge_connection(0)))
calls = [
call.send("bridge delete bridge0"),
call.send('docker delete_veth gns3-veth0ext gns3-veth0int')
]
vm._ubridge_hypervisor.assert_has_calls(calls, any_order=True)
def test_adapter_add_nio_binding(vm, loop):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
assert vm._ethernet_adapters[0].get_nio(0) == nio
def test_adapter_add_nio_binding_invalid_adapter(vm, loop):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
with pytest.raises(DockerError):
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(12, nio)))
def test_adapter_remove_nio_binding(vm, loop):
nio = {"type": "nio_udp",
"lport": 4242,
"rport": 4343,
"rhost": "127.0.0.1"}
nio = vm.manager.create_nio(0, nio)
loop.run_until_complete(asyncio.async(vm.adapter_add_nio_binding(0, nio)))
with asyncio_patch("gns3server.modules.docker.DockerVM._delete_ubridge_connection") as delete_ubridge_mock:
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(0)))
assert vm._ethernet_adapters[0].get_nio(0) is None
delete_ubridge_mock.assert_called_with(0)
def test_adapter_remove_nio_binding_invalid_adapter(vm, loop):
with pytest.raises(DockerError):
loop.run_until_complete(asyncio.async(vm.adapter_remove_nio_binding(12)))
def test_pull_image(loop, vm):
class Response:
"""
Simulate a response splitted in multiple packets
"""
def __init__(self):
self._read = -1
@asyncio.coroutine
def read(self, size):
self._read += 1
if self._read == 0:
return b'{"progress": "0/100",'
elif self._read == 1:
return '"id": 42}'
else:
None
mock_query = MagicMock()
mock_query.content.return_value = Response()
with asyncio_patch("gns3server.modules.docker.Docker.http_query", return_value=mock_query) as mock:
images = loop.run_until_complete(asyncio.async(vm.pull_image("ubuntu")))
mock.assert_called_with("POST", "images/create", params={"fromImage": "ubuntu"})