1
0
mirror of https://github.com/GNS3/gns3-server synced 2024-11-28 03:08:14 +00:00

Merge branch '1.5' into 2.0

This commit is contained in:
Julien Duponchelle 2016-04-08 17:40:27 +02:00
commit 9e8fcab65c
No known key found for this signature in database
GPG Key ID: F1E2485547D4595D
34 changed files with 895 additions and 188 deletions

View File

@ -29,7 +29,7 @@ You can check the server version with a simple curl command:
.. code-block:: shell-session .. code-block:: shell-session
# curl "http://localhost:8000/v1/version" # curl "http://localhost:3080/v1/version"
{ {
"version": "1.3.dev1" "version": "1.3.dev1"
} }
@ -39,7 +39,7 @@ The next step is to create a project.
.. code-block:: shell-session .. code-block:: shell-session
# curl -X POST "http://localhost:8000/v1/projects" -d '{"name": "test"}' # curl -X POST "http://localhost:3080/v1/projects" -d '{"name": "test"}'
{ {
"project_id": "42f9feee-3217-4104-981e-85d5f0a806ec", "project_id": "42f9feee-3217-4104-981e-85d5f0a806ec",
"temporary": false, "temporary": false,
@ -50,7 +50,7 @@ With this project id we can now create two VPCS VM.
.. code-block:: shell-session .. code-block:: shell-session
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 1"}' # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 1"}'
{ {
"console": 2000, "console": 2000,
"name": "VPCS 1", "name": "VPCS 1",
@ -58,7 +58,7 @@ With this project id we can now create two VPCS VM.
"vm_id": "24d2e16b-fbef-4259-ae34-7bc21a41ee28" "vm_id": "24d2e16b-fbef-4259-ae34-7bc21a41ee28"
}% }%
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 2"}' # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms" -d '{"name": "VPCS 2"}'
{ {
"console": 2001, "console": 2001,
"name": "VPCS 2", "name": "VPCS 2",
@ -70,12 +70,12 @@ two UDP ports.
.. code-block:: shell-session .. code-block:: shell-session
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}' # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
{ {
"udp_port": 10000 "udp_port": 10000
} }
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}' # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/ports/udp" -d '{}'
{ {
"udp_port": 10001 "udp_port": 10001
} }
@ -86,7 +86,7 @@ communication is made by creating two UDP tunnels.
.. code-block:: shell-session .. code-block:: shell-session
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/adapters/0/ports/0/nio" -d '{"lport": 10000, "rhost": "127.0.0.1", "rport": 10001, "type": "nio_udp"}' # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/adapters/0/ports/0/nio" -d '{"lport": 10000, "rhost": "127.0.0.1", "rport": 10001, "type": "nio_udp"}'
{ {
"lport": 10000, "lport": 10000,
"rhost": "127.0.0.1", "rhost": "127.0.0.1",
@ -94,7 +94,7 @@ communication is made by creating two UDP tunnels.
"type": "nio_udp" "type": "nio_udp"
} }
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/adapters/0/ports/0/nio" -d '{"lport": 10001, "rhost": "127.0.0.1", "rport": 10000, "type": "nio_udp"}' # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/adapters/0/ports/0/nio" -d '{"lport": 10001, "rhost": "127.0.0.1", "rport": 10000, "type": "nio_udp"}'
{ {
"lport": 10001, "lport": 10001,
"rhost": "127.0.0.1", "rhost": "127.0.0.1",
@ -106,8 +106,8 @@ Now we can start the two VM
.. code-block:: shell-session .. code-block:: shell-session
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/start" -d "{}" # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/24d2e16b-fbef-4259-ae34-7bc21a41ee28/start" -d "{}"
# curl -X POST "http://localhost:8000/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/start" -d '{}' # curl -X POST "http://localhost:3080/v1/projects/42f9feee-3217-4104-981e-85d5f0a806ec/vpcs/vms/daefc24a-103c-4717-8e01-6517d931c1ae/start" -d '{}'
Everything should be started now. You can connect via telnet to the different VM. Everything should be started now. You can connect via telnet to the different VM.
The port is the field console in the create VM request. The port is the field console in the create VM request.
@ -190,7 +190,7 @@ complexity for the client due to the fact only some command on some VM can be
concurrent. concurrent.
Authentification Authentication
----------------- -----------------
In this version of the API you have no authentification system. If you In this version of the API you have no authentification system. If you

View File

@ -73,6 +73,7 @@ class DockerHandler:
adapters=request.json.get("adapters"), adapters=request.json.get("adapters"),
console=request.json.get("console"), console=request.json.get("console"),
console_type=request.json.get("console_type"), console_type=request.json.get("console_type"),
console_resolution=request.json.get("console_resolution", "1024x768"),
aux=request.json.get("aux") aux=request.json.get("aux")
) )
for name, value in request.json.items(): for name, value in request.json.items():
@ -277,8 +278,11 @@ class DockerHandler:
vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"]) vm = docker_manager.get_vm(request.match_info["vm_id"], project_id=request.match_info["project_id"])
vm.name = request.json.get("name", vm.name) vm.name = request.json.get("name", vm.name)
vm.console = request.json.get("console", vm.console) vm.console = request.json.get("console", vm.console)
vm.aux = request.json.get("aux", vm.aux)
vm.console_resolution = request.json.get("console_resolution", vm.console_resolution)
vm.start_command = request.json.get("start_command", vm.start_command) vm.start_command = request.json.get("start_command", vm.start_command)
vm.environment = request.json.get("environment", vm.environment) vm.environment = request.json.get("environment", vm.environment)
vm.adapters = request.json.get("adapters", vm.adapters)
yield from vm.update() yield from vm.update()
response.json(vm) response.json(vm)

View File

@ -20,6 +20,7 @@ import asyncio
import json import json
import os import os
import psutil import psutil
import tempfile
from ....web.route import Route from ....web.route import Route
from ....schemas.project import PROJECT_OBJECT_SCHEMA, PROJECT_CREATE_SCHEMA, PROJECT_UPDATE_SCHEMA, PROJECT_FILE_LIST_SCHEMA, PROJECT_LIST_SCHEMA from ....schemas.project import PROJECT_OBJECT_SCHEMA, PROJECT_CREATE_SCHEMA, PROJECT_UPDATE_SCHEMA, PROJECT_FILE_LIST_SCHEMA, PROJECT_LIST_SCHEMA
@ -56,6 +57,7 @@ class ProjectHandler:
description="Create a new project on the server", description="Create a new project on the server",
status_codes={ status_codes={
201: "Project created", 201: "Project created",
403: "You are not allowed to modify this property",
409: "Project already created" 409: "Project already created"
}, },
output=PROJECT_OBJECT_SCHEMA, output=PROJECT_OBJECT_SCHEMA,
@ -301,4 +303,111 @@ class ProjectHandler:
except FileNotFoundError: except FileNotFoundError:
raise aiohttp.web.HTTPNotFound() raise aiohttp.web.HTTPNotFound()
except PermissionError: except PermissionError:
raise aiohttp.web.HTTPForbidden()
@classmethod
@Route.post(
r"/projects/{project_id}/files/{path:.+}",
description="Get a file of a project",
parameters={
"project_id": "The UUID of the project",
},
raw=True,
status_codes={
200: "Return the file",
403: "Permission denied",
404: "The path doesn't exist"
})
def write_file(request, response):
pm = ProjectManager.instance()
project = pm.get_project(request.match_info["project_id"])
path = request.match_info["path"]
path = os.path.normpath(path)
# Raise error if user try to escape
if path[0] == ".":
raise aiohttp.web.HTTPForbidden raise aiohttp.web.HTTPForbidden
path = os.path.join(project.path, path)
response.set_status(200)
try:
with open(path, 'wb+') as f:
while True:
packet = yield from request.content.read(512)
if not packet:
break
f.write(packet)
except FileNotFoundError:
raise aiohttp.web.HTTPNotFound()
except PermissionError:
raise aiohttp.web.HTTPForbidden()
@classmethod
@Route.get(
r"/projects/{project_id}/export",
description="Export a project as a portable archive",
parameters={
"project_id": "The UUID of the project",
},
raw=True,
status_codes={
200: "Return the file",
404: "The project doesn't exist"
})
def export_project(request, response):
pm = ProjectManager.instance()
project = pm.get_project(request.match_info["project_id"])
response.content_type = 'application/gns3z'
response.headers['CONTENT-DISPOSITION'] = 'attachment; filename="{}.gns3z"'.format(project.name)
response.enable_chunked_encoding()
# Very important: do not send a content length otherwise QT close the connection but curl can consume the Feed
response.content_length = None
response.start(request)
for data in project.export():
response.write(data)
yield from response.drain()
yield from response.write_eof()
@classmethod
@Route.post(
r"/projects/{project_id}/import",
description="Import a project from a portable archive",
parameters={
"project_id": "The UUID of the project",
},
raw=True,
output=PROJECT_OBJECT_SCHEMA,
status_codes={
200: "Project imported",
403: "You are not allowed to modify this property"
})
def import_project(request, response):
pm = ProjectManager.instance()
project_id = request.match_info["project_id"]
project = pm.create_project(project_id=project_id)
# We write the content to a temporary location
# and after extract all. It could be more optimal to stream
# this but it's not implemented in Python.
# 
# Spooled mean the file is temporary keep in ram until max_size
try:
with tempfile.SpooledTemporaryFile(max_size=10000) as temp:
while True:
packet = yield from request.content.read(512)
if not packet:
break
temp.write(packet)
project.import_zip(temp, gns3vm=bool(request.GET.get("gns3vm", "1")))
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not import the project: {}".format(e))
response.json(project)
response.set_status(201)

View File

@ -340,13 +340,17 @@ class BaseVM:
return return
if self._console_type == "vnc" and console is not None and console < 5900: if self._console_type == "vnc" and console is not None and console < 5900:
raise VMError("VNC console require a port superior or equal to 5900") raise VMError("VNC console require a port superior or equal to 5900 currently it's {}".format(console))
if self._console: if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project) self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None self._console = None
if console is not None: if console is not None:
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project) if self.console_type == "vnc":
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project, port_range_start=5900, port_range_end=6000)
else:
self._console = self._manager.port_manager.reserve_tcp_port(console, self._project)
log.info("{module}: '{name}' [{id}]: console port set to {port}".format(module=self.manager.module_name, log.info("{module}: '{name}' [{id}]: console port set to {port}".format(module=self.manager.module_name,
name=self.name, name=self.name,
id=self.id, id=self.id,
@ -401,7 +405,7 @@ class BaseVM:
if path == "ubridge": if path == "ubridge":
path = shutil.which("ubridge") path = shutil.which("ubridge")
if path is None: if path is None or len(path) == 0:
raise VMError("uBridge is not installed") raise VMError("uBridge is not installed")
return path return path

View File

@ -53,11 +53,13 @@ class DockerVM(BaseVM):
:param console: TCP console port :param console: TCP console port
:param console_type: Console type :param console_type: Console type
:param aux: TCP aux console port :param aux: TCP aux console port
:param console_resolution: Resolution of the VNC display
""" """
def __init__(self, name, vm_id, project, manager, image, def __init__(self, name, vm_id, project, manager, image,
console=None, aux=None, start_command=None, console=None, aux=None, start_command=None,
adapters=None, environment=None, console_type="telnet"): adapters=None, environment=None, console_type="telnet",
console_resolution="1024x768"):
super().__init__(name, vm_id, project, manager, console=console, aux=aux, allocate_aux=True, console_type=console_type) super().__init__(name, vm_id, project, manager, console=console, aux=aux, allocate_aux=True, console_type=console_type)
self._image = image self._image = image
@ -68,6 +70,8 @@ class DockerVM(BaseVM):
self._ubridge_hypervisor = None self._ubridge_hypervisor = None
self._temporary_directory = None self._temporary_directory = None
self._telnet_servers = [] self._telnet_servers = []
self._x11vnc_process = None
self._console_resolution = console_resolution
if adapters is None: if adapters is None:
self.adapters = 1 self.adapters = 1
@ -90,6 +94,7 @@ class DockerVM(BaseVM):
"adapters": self.adapters, "adapters": self.adapters,
"console": self.console, "console": self.console,
"console_type": self.console_type, "console_type": self.console_type,
"console_resolution": self.console_resolution,
"aux": self.aux, "aux": self.aux,
"start_command": self.start_command, "start_command": self.start_command,
"environment": self.environment, "environment": self.environment,
@ -120,6 +125,14 @@ class DockerVM(BaseVM):
else: else:
self._start_command = command self._start_command = command
@property
def console_resolution(self):
return self._console_resolution
@console_resolution.setter
def console_resolution(self, resolution):
self._console_resolution = resolution
@property @property
def environment(self): def environment(self):
return self._environment return self._environment
@ -159,6 +172,10 @@ class DockerVM(BaseVM):
binds.append("{}:/gns3:ro".format(get_resource("hypervisor/docker/resources"))) binds.append("{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")))
# We mount our own etc/network
network_config = self._create_network_config()
binds.append("{}:/etc/network:rw".format(network_config))
volumes = image_infos.get("ContainerConfig", {}).get("Volumes") volumes = image_infos.get("ContainerConfig", {}).get("Volumes")
if volumes is None: if volumes is None:
return binds return binds
@ -169,6 +186,39 @@ class DockerVM(BaseVM):
return binds return binds
def _create_network_config(self):
"""
If network config is empty we create a sample config
"""
path = os.path.join(self.working_dir, "etc", "network")
os.makedirs(path, exist_ok=True)
os.makedirs(os.path.join(path, "if-up.d"), exist_ok=True)
os.makedirs(os.path.join(path, "if-down.d"), exist_ok=True)
os.makedirs(os.path.join(path, "if-pre-up.d"), exist_ok=True)
os.makedirs(os.path.join(path, "if-post-down.d"), exist_ok=True)
if not os.path.exists(os.path.join(path, "interfaces")):
with open(os.path.join(path, "interfaces"), "w+") as f:
f.write("""#
# This is a sample network config uncomment lines to configure the network
#
""")
for adapter in range(0, self.adapters):
f.write("""
# Static config for eth{adapter}
#auto eth{adapter}
#iface eth{adapter} inet static
#\taddress 192.168.{adapter}.2
#\tnetmask 255.255.255.0
#\tgateway 192.168.{adapter}.1
#\tup echo nameserver 192.168.{adapter}.1 > /etc/resolv.conf
# DHCP config for eth{adapter}
# auto eth{adapter}
# iface eth{adapter} inet dhcp""".format(adapter=adapter))
return path
@asyncio.coroutine @asyncio.coroutine
def create(self): def create(self):
"""Creates the Docker container.""" """Creates the Docker container."""
@ -199,7 +249,6 @@ class DockerVM(BaseVM):
"Entrypoint": image_infos.get("Config", {"Entrypoint": []})["Entrypoint"] "Entrypoint": image_infos.get("Config", {"Entrypoint": []})["Entrypoint"]
} }
if params["Entrypoint"] is None: if params["Entrypoint"] is None:
params["Entrypoint"] = [] params["Entrypoint"] = []
if self._start_command: if self._start_command:
@ -233,11 +282,13 @@ class DockerVM(BaseVM):
""" """
# We need to save the console and state and restore it # We need to save the console and state and restore it
console = self.console console = self.console
aux = self.aux
state = yield from self._get_container_state() state = yield from self._get_container_state()
yield from self.close() yield from self.close()
yield from self.create() yield from self.create()
self.console = console self.console = console
self.aux = aux
if state == "running": if state == "running":
yield from self.start() yield from self.start()
@ -287,7 +338,7 @@ class DockerVM(BaseVM):
# We can not use the API because docker doesn't expose a websocket api for exec # We can not use the API because docker doesn't expose a websocket api for exec
# https://github.com/GNS3/gns3-gui/issues/1039 # https://github.com/GNS3/gns3-gui/issues/1039
process = yield from asyncio.subprocess.create_subprocess_exec( process = yield from asyncio.subprocess.create_subprocess_exec(
"docker", "exec", "-i", self._cid, "/bin/sh", "-i", "docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "sh", "-i",
stdout=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT, stderr=asyncio.subprocess.STDOUT,
stdin=asyncio.subprocess.PIPE) stdin=asyncio.subprocess.PIPE)
@ -304,8 +355,8 @@ class DockerVM(BaseVM):
self._display = self._get_free_display_port() self._display = self._get_free_display_port()
if shutil.which("Xvfb") is None or shutil.which("x11vnc") is None: if shutil.which("Xvfb") is None or shutil.which("x11vnc") is None:
raise DockerError("Please install Xvfb and x11vnc before using the VNC support") raise DockerError("Please install Xvfb and x11vnc before using the VNC support")
self._xvfb_process = yield from asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", "1024x768x16") self._xvfb_process = yield from asyncio.create_subprocess_exec("Xvfb", "-nolisten", "tcp", ":{}".format(self._display), "-screen", "0", self._console_resolution + "x16")
self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host) self._x11vnc_process = yield from asyncio.create_subprocess_exec("x11vnc", "-forever", "-nopw", "-shared", "-geometry", self._console_resolution, "-display", "WAIT:{}".format(self._display), "-rfbport", str(self.console), "-noncache", "-listen", self._manager.port_manager.console_host)
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display)) x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
yield from wait_for_file_creation(x11_socket) yield from wait_for_file_creation(x11_socket)
@ -433,10 +484,11 @@ class DockerVM(BaseVM):
try: try:
if self.console_type == "vnc": if self.console_type == "vnc":
self._x11vnc_process.terminate() if self._x11vnc_process:
self._xvfb_process.terminate() self._x11vnc_process.terminate()
yield from self._x11vnc_process.wait() self._xvfb_process.terminate()
yield from self._xvfb_process.wait() yield from self._x11vnc_process.wait()
yield from self._xvfb_process.wait()
state = yield from self._get_container_state() state = yield from self._get_container_state()
if state == "paused" or state == "running": if state == "paused" or state == "running":
@ -521,11 +573,17 @@ class DockerVM(BaseVM):
if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running(): if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running():
return return
yield from self._ubridge_hypervisor.send("bridge delete bridge{name}".format(
name=adapter_number))
adapter = self._ethernet_adapters[adapter_number] adapter = self._ethernet_adapters[adapter_number]
yield from self._ubridge_hypervisor.send('docker delete_veth {hostif}'.format(hostif=adapter.host_ifc))
try:
yield from self._ubridge_hypervisor.send("bridge delete bridge{name}".format(
name=adapter_number))
except UbridgeError as e:
log.debug(str(e))
try:
yield from self._ubridge_hypervisor.send('docker delete_veth {hostif}'.format(hostif=adapter.host_ifc))
except UbridgeError as e:
log.debug(str(e))
@asyncio.coroutine @asyncio.coroutine
def _get_namespace(self): def _get_namespace(self):

View File

@ -1,4 +1,4 @@
#!/bin/sh #!/gns3/bin/busybox sh
# #
# Copyright (C) 2016 GNS3 Technologies Inc. # Copyright (C) 2016 GNS3 Technologies Inc.
# #
@ -19,6 +19,14 @@
# This script is injected into the container and launch before # This script is injected into the container and launch before
# the start command of the container # the start command of the container
# #
OLD_PATH="$PATH"
PATH=/gns3/bin:/tmp/gns3/bin
# bootstrap busybox commands
if [ ! -d /tmp/gns3/bin ]; then
busybox mkdir -p /tmp/gns3/bin
/gns3/bin/busybox --install -s /tmp/gns3/bin
fi
# Wait 2 seconds to settle the network interfaces # Wait 2 seconds to settle the network interfaces
sleep 2 sleep 2
@ -37,10 +45,14 @@ __EOF__
# configure loopback interface # configure loopback interface
ip link set dev lo up ip link set dev lo up
# configure eth interfaces # activate eth interfaces
sed -n 's/^ *\(eth[0-9]*\):.*/\1/p' < /proc/net/dev | while read dev; do sed -n 's/^ *\(eth[0-9]*\):.*/\1/p' < /proc/net/dev | while read dev; do
ip link set dev $dev up ip link set dev $dev up
done done
# configure network interfaces
ifup -a -f
# continue normal docker startup # continue normal docker startup
PATH="$OLD_PATH"
exec "$@" exec "$@"

View File

@ -32,7 +32,7 @@ import glob
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
from gns3server.utils.interfaces import get_windows_interfaces, is_interface_up from gns3server.utils.interfaces import interfaces, is_interface_up
from gns3server.utils.asyncio import wait_run_in_executor from gns3server.utils.asyncio import wait_run_in_executor
from pkg_resources import parse_version from pkg_resources import parse_version
from uuid import UUID, uuid4 from uuid import UUID, uuid4
@ -439,9 +439,9 @@ class Dynamips(BaseManager):
ethernet_device = nio_settings["ethernet_device"] ethernet_device = nio_settings["ethernet_device"]
if sys.platform.startswith("win"): if sys.platform.startswith("win"):
# replace the interface name by the GUID on Windows # replace the interface name by the GUID on Windows
interfaces = get_windows_interfaces() windows_interfaces = interfaces()
npf_interface = None npf_interface = None
for interface in interfaces: for interface in windows_interfaces:
if interface["name"] == ethernet_device: if interface["name"] == ethernet_device:
npf_interface = interface["id"] npf_interface = interface["id"]
if not npf_interface: if not npf_interface:

View File

@ -895,7 +895,7 @@ class Router(BaseVM):
""" """
self.console = console self.console = console
yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=console)) yield from self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self.console))
@asyncio.coroutine @asyncio.coroutine
def set_aux(self, aux): def set_aux(self, aux):

View File

@ -42,8 +42,8 @@ class PortManager:
server_config = Config.instance().get_section_config("Server") server_config = Config.instance().get_section_config("Server")
remote_console_connections = server_config.getboolean("allow_remote_console") remote_console_connections = server_config.getboolean("allow_remote_console")
console_start_port_range = server_config.getint("console_start_port_range", 2001) console_start_port_range = server_config.getint("console_start_port_range", 5000)
console_end_port_range = server_config.getint("console_end_port_range", 7000) console_end_port_range = server_config.getint("console_end_port_range", 10000)
self._console_port_range = (console_start_port_range, console_end_port_range) self._console_port_range = (console_start_port_range, console_end_port_range)
log.debug("Console port range is {}-{}".format(console_start_port_range, console_end_port_range)) log.debug("Console port range is {}-{}".format(console_start_port_range, console_end_port_range))
@ -225,15 +225,15 @@ class PortManager:
old_port = port old_port = port
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end) port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port) msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
log.warning(msg) log.debug(msg)
project.emit("log.warning", {"message": msg}) #project.emit("log.warning", {"message": msg})
return port return port
if port < self._console_port_range[0] or port > self._console_port_range[1]: if port < port_range_start or port > port_range_end:
old_port = port old_port = port
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end) port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
msg = "TCP port {} is outside the range {}-{} on host {}. Port has been replaced by {}".format(old_port, port_range_start, port_range_end, self._console_host, port) msg = "TCP port {} is outside the range {}-{} on host {}. Port has been replaced by {}".format(old_port, port_range_start, port_range_end, self._console_host, port)
log.warning(msg) log.debug(msg)
project.emit("log.warning", {"message": msg}) #project.emit("log.warning", {"message": msg})
return port return port
try: try:
PortManager._check_port(self._console_host, port, "TCP") PortManager._check_port(self._console_host, port, "TCP")
@ -241,8 +241,8 @@ class PortManager:
old_port = port old_port = port
port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end) port = self.get_free_tcp_port(project, port_range_start=port_range_start, port_range_end=port_range_end)
msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port) msg = "TCP port {} already in use on host {}. Port has been replaced by {}".format(old_port, self._console_host, port)
log.warning(msg) log.debug(msg)
project.emit("log.warning", {"message": msg}) #project.emit("log.warning", {"message": msg})
return port return port
self._used_tcp_ports.add(port) self._used_tcp_ports.add(port)

View File

@ -15,11 +15,14 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import aiohttp
import os import os
import aiohttp
import shutil import shutil
import asyncio import asyncio
import hashlib import hashlib
import zipstream
import zipfile
import json
from uuid import UUID, uuid4 from uuid import UUID, uuid4
from .port_manager import PortManager from .port_manager import PortManager
@ -143,6 +146,8 @@ class Project:
@name.setter @name.setter
def name(self, name): def name(self, name):
if "/" in name or "\\" in name:
raise aiohttp.web.HTTPForbidden(text="Name can not contain path separator")
self._name = name self._name = name
@property @property
@ -460,3 +465,108 @@ class Project:
break break
m.update(buf) m.update(buf)
return m.hexdigest() return m.hexdigest()
def export(self):
"""
Export the project as zip. It's a ZipStream object.
The file will be read chunk by chunk when you iterate on
the zip.
It will ignore some files like snapshots and
:returns: ZipStream object
"""
z = zipstream.ZipFile()
# topdown allo to modify the list of directory in order to ignore
# directory
for root, dirs, files in os.walk(self._path, topdown=True):
# Remove snapshots
if os.path.split(root)[-1:][0] == "project-files":
dirs[:] = [d for d in dirs if d != "snapshots"]
# Ignore log files and OS noise
files = [f for f in files if not f.endswith('_log.txt') and not f.endswith('.log') and f != '.DS_Store']
for file in files:
path = os.path.join(root, file)
# We rename the .gns3 project.gns3 to avoid the task to the client to guess the file name
if file.endswith(".gns3"):
z.write(path, "project.gns3")
else:
# We merge the data from all server in the same project-files directory
vm_directory = os.path.join(self._path, "servers", "vm")
if os.path.commonprefix([root, vm_directory]) == vm_directory:
z.write(path, os.path.relpath(path, vm_directory))
else:
z.write(path, os.path.relpath(path, self._path))
return z
def import_zip(self, stream, gns3vm=True):
"""
Import a project contain in a zip file
:param stream: A io.BytesIO of the zipfile
:param gns3vm: True move docker, iou and qemu to the GNS3 VM
"""
with zipfile.ZipFile(stream) as myzip:
myzip.extractall(self.path)
project_file = os.path.join(self.path, "project.gns3")
if os.path.exists(project_file):
with open(project_file) as f:
topology = json.load(f)
topology["project_id"] = self.id
topology["name"] = self.name
topology.setdefault("topology", {})
topology["topology"].setdefault("nodes", [])
topology["topology"]["servers"] = [
{
"id": 1,
"local": True,
"vm": False
}
]
# By default all node run on local server
for node in topology["topology"]["nodes"]:
node["server_id"] = 1
if gns3vm:
# Move to servers/vm directory the data that should be import on remote server
modules_to_vm = {
"qemu": "QemuVM",
"iou": "IOUDevice",
"docker": "DockerVM"
}
vm_directory = os.path.join(self.path, "servers", "vm", "project-files")
vm_server_use = False
for module, device_type in modules_to_vm.items():
module_directory = os.path.join(self.path, "project-files", module)
if os.path.exists(module_directory):
os.makedirs(vm_directory, exist_ok=True)
shutil.move(module_directory, os.path.join(vm_directory, module))
# Patch node to use the GNS3 VM
for node in topology["topology"]["nodes"]:
if node["type"] == device_type:
node["server_id"] = 2
vm_server_use = True
# We use the GNS3 VM. We need to add the server to the list
if vm_server_use:
topology["topology"]["servers"].append({
"id": 2,
"vm": True,
"local": False
})
# Write the modified topology
with open(project_file, "w") as f:
json.dump(topology, f, indent=4)
# Rename to a human distinctive name
shutil.move(project_file, os.path.join(self.path, self.name + ".gns3"))

View File

@ -40,6 +40,7 @@ from ..base_vm import BaseVM
from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS
from ...utils.asyncio import monitor_process from ...utils.asyncio import monitor_process
from ...utils.images import md5sum from ...utils.images import md5sum
from .qcow2 import Qcow2, Qcow2Error
import logging import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -1233,90 +1234,45 @@ class QemuVM(BaseVM):
options = [] options = []
qemu_img_path = self._get_qemu_img() qemu_img_path = self._get_qemu_img()
if self._hda_disk_image: drives = ["a", "b", "c", "d"]
if not os.path.isfile(self._hda_disk_image) or not os.path.exists(self._hda_disk_image):
if os.path.islink(self._hda_disk_image): for disk_index, drive in enumerate(drives):
raise QemuError("hda disk image '{}' linked to '{}' is not accessible".format(self._hda_disk_image, os.path.realpath(self._hda_disk_image))) disk_image = getattr(self, "_hd{}_disk_image".format(drive))
interface = getattr(self, "hd{}_disk_interface".format(drive))
if not disk_image:
continue
disk_name = "hd" + drive
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
if os.path.islink(disk_image):
raise QemuError("{} disk image '{}' linked to '{}' is not accessible".format(disk_name, disk_image, os.path.realpath(disk_image)))
else: else:
raise QemuError("hda disk image '{}' is not accessible".format(self._hda_disk_image)) raise QemuError("{} disk image '{}' is not accessible".format(disk_name, disk_image))
if self._linked_clone: if self._linked_clone:
hda_disk = os.path.join(self.working_dir, "hda_disk.qcow2") disk = os.path.join(self.working_dir, "{}_disk.qcow2".format(disk_name))
if not os.path.exists(hda_disk): if not os.path.exists(disk):
# create the disk # create the disk
try: try:
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o", process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
"backing_file={}".format(self._hda_disk_image), "backing_file={}".format(disk_image),
"-f", "qcow2", hda_disk) "-f", "qcow2", disk)
retcode = yield from process.wait() retcode = yield from process.wait()
log.info("{} returned with {}".format(qemu_img_path, retcode)) log.info("{} returned with {}".format(qemu_img_path, retcode))
except (OSError, subprocess.SubprocessError) as e: except (OSError, subprocess.SubprocessError) as e:
raise QemuError("Could not create hda disk image {}".format(e)) raise QemuError("Could not create {} disk image {}".format(disk_name, e))
else:
hda_disk = self._hda_disk_image
options.extend(["-drive", 'file={},if={},index=0,media=disk'.format(hda_disk, self.hda_disk_interface)])
if self._hdb_disk_image:
if not os.path.isfile(self._hdb_disk_image) or not os.path.exists(self._hdb_disk_image):
if os.path.islink(self._hdb_disk_image):
raise QemuError("hdb disk image '{}' linked to '{}' is not accessible".format(self._hdb_disk_image, os.path.realpath(self._hdb_disk_image)))
else: else:
raise QemuError("hdb disk image '{}' is not accessible".format(self._hdb_disk_image)) # The disk exists we check if the clone work
if self._linked_clone:
hdb_disk = os.path.join(self.working_dir, "hdb_disk.qcow2")
if not os.path.exists(hdb_disk):
try: try:
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o", qcow2 = Qcow2(disk)
"backing_file={}".format(self._hdb_disk_image), yield from qcow2.rebase(qemu_img_path, disk_image)
"-f", "qcow2", hdb_disk) except (Qcow2Error, OSError) as e:
retcode = yield from process.wait() raise QemuError("Could not use qcow2 disk image {} for {} {}".format(disk_image, disk_name, e))
log.info("{} returned with {}".format(qemu_img_path, retcode))
except (OSError, subprocess.SubprocessError) as e:
raise QemuError("Could not create hdb disk image {}".format(e))
else:
hdb_disk = self._hdb_disk_image
options.extend(["-drive", 'file={},if={},index=1,media=disk'.format(hdb_disk, self.hdb_disk_interface)])
if self._hdc_disk_image:
if not os.path.isfile(self._hdc_disk_image) or not os.path.exists(self._hdc_disk_image):
if os.path.islink(self._hdc_disk_image):
raise QemuError("hdc disk image '{}' linked to '{}' is not accessible".format(self._hdc_disk_image, os.path.realpath(self._hdc_disk_image)))
else:
raise QemuError("hdc disk image '{}' is not accessible".format(self._hdc_disk_image))
if self._linked_clone:
hdc_disk = os.path.join(self.working_dir, "hdc_disk.qcow2")
if not os.path.exists(hdc_disk):
try:
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
"backing_file={}".format(self._hdc_disk_image),
"-f", "qcow2", hdc_disk)
retcode = yield from process.wait()
log.info("{} returned with {}".format(qemu_img_path, retcode))
except (OSError, subprocess.SubprocessError) as e:
raise QemuError("Could not create hdc disk image {}".format(e))
else: else:
hdc_disk = self._hdc_disk_image disk = disk_image
options.extend(["-drive", 'file={},if={},index=2,media=disk'.format(hdc_disk, self.hdc_disk_interface)]) options.extend(["-drive", 'file={},if={},index={},media=disk'.format(disk, interface, disk_index)])
if self._hdd_disk_image:
if not os.path.isfile(self._hdd_disk_image) or not os.path.exists(self._hdd_disk_image):
if os.path.islink(self._hdd_disk_image):
raise QemuError("hdd disk image '{}' linked to '{}' is not accessible".format(self._hdd_disk_image, os.path.realpath(self._hdd_disk_image)))
else:
raise QemuError("hdd disk image '{}' is not accessible".format(self._hdd_disk_image))
if self._linked_clone:
hdd_disk = os.path.join(self.working_dir, "hdd_disk.qcow2")
if not os.path.exists(hdd_disk):
try:
process = yield from asyncio.create_subprocess_exec(qemu_img_path, "create", "-o",
"backing_file={}".format(self._hdd_disk_image),
"-f", "qcow2", hdd_disk)
retcode = yield from process.wait()
log.info("{} returned with {}".format(qemu_img_path, retcode))
except (OSError, subprocess.SubprocessError) as e:
raise QemuError("Could not create hdd disk image {}".format(e))
else:
hdd_disk = self._hdd_disk_image
options.extend(["-drive", 'file={},if={},index=3,media=disk'.format(hdd_disk, self.hdd_disk_interface)])
return options return options

View File

@ -594,8 +594,10 @@ class VMware(BaseManager):
""" """
if sys.platform.startswith("win"): if sys.platform.startswith("win"):
from win32com.shell import shell, shellcon import ctypes
documents_folder = shell.SHGetSpecialFolderPath(None, shellcon.CSIDL_PERSONAL) path = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, path)
documents_folder = path.value
windows_type = sys.getwindowsversion().product_type windows_type = sys.getwindowsversion().product_type
if windows_type == 2 or windows_type == 3: if windows_type == 2 or windows_type == 3:
return '{}\My Virtual Machines'.format(documents_folder) return '{}\My Virtual Machines'.format(documents_folder)

View File

@ -26,7 +26,7 @@ import asyncio
import tempfile import tempfile
from gns3server.utils.telnet_server import TelnetServer from gns3server.utils.telnet_server import TelnetServer
from gns3server.utils.interfaces import interfaces, get_windows_interfaces from gns3server.utils.interfaces import interfaces
from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation from gns3server.utils.asyncio import wait_for_file_creation, wait_for_named_pipe_creation
from collections import OrderedDict from collections import OrderedDict
from .vmware_error import VMwareError from .vmware_error import VMwareError
@ -144,6 +144,8 @@ class VMwareVM(BaseVM):
yield from self.manager.check_vmrun_version() yield from self.manager.check_vmrun_version()
if self._linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))): if self._linked_clone and not os.path.exists(os.path.join(self.working_dir, os.path.basename(self._vmx_path))):
if self.manager.host_type == "player":
raise VMwareError("Linked clones are not supported by VMware Player")
# create the base snapshot for linked clones # create the base snapshot for linked clones
base_snapshot_name = "GNS3 Linked Base for clones" base_snapshot_name = "GNS3 Linked Base for clones"
vmsd_path = os.path.splitext(self._vmx_path)[0] + ".vmsd" vmsd_path = os.path.splitext(self._vmx_path)[0] + ".vmsd"
@ -320,7 +322,7 @@ class VMwareVM(BaseVM):
yield from self._ubridge_hypervisor.send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=vnet, yield from self._ubridge_hypervisor.send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=vnet,
interface=vmnet_interface)) interface=vmnet_interface))
elif sys.platform.startswith("win"): elif sys.platform.startswith("win"):
windows_interfaces = get_windows_interfaces() windows_interfaces = interfaces()
npf = None npf = None
source_mac = None source_mac = None
for interface in windows_interfaces: for interface in windows_interfaces:

Binary file not shown.

View File

@ -0,0 +1,138 @@
#!/tmp/gns3/bin/sh
# script for udhcpc
# Copyright (c) 2008 Natanael Copa <natanael.copa@gmail.com>
UDHCPC="/gns3/etc/udhcpc"
UDHCPC_CONF="$UDHCPC/udhcpc.conf"
RESOLV_CONF="/etc/resolv.conf"
[ -f $UDHCPC_CONF ] && . $UDHCPC_CONF
export broadcast
export dns
export domain
export interface
export ip
export mask
export metric
export router
export subnet
#export PATH=/usr/bin:/bin:/usr/sbin:/sbin
run_scripts() {
local dir=$1
if [ -d $dir ]; then
for i in $dir/*; do
[ -f $i ] && $i
done
fi
}
deconfig() {
ip addr flush dev $interface
}
is_wifi() {
test -e /sys/class/net/$interface/phy80211
}
if_index() {
if [ -e /sys/class/net/$interface/ifindex ]; then
cat /sys/class/net/$interface/ifindex
else
ip link show dev $interface | head -n1 | cut -d: -f1
fi
}
calc_metric() {
local base=
if is_wifi; then
base=300
else
base=200
fi
echo $(( $base + $(if_index) ))
}
routes() {
[ -z "$router" ] && return
local gw= num=
while ip route del default via dev $interface 2>/dev/null; do
:
done
num=0
for gw in $router; do
ip route add 0.0.0.0/0 via $gw dev $interface \
metric $(( $num + ${IF_METRIC:-$(calc_metric)} ))
num=$(( $num + 1 ))
done
}
resolvconf() {
local i
[ -n "$IF_PEER_DNS" ] && [ "$IF_PEER_DNS" != "yes" ] && return
if [ "$RESOLV_CONF" = "no" ] || [ "$RESOLV_CONF" = "NO" ] \
|| [ -z "$RESOLV_CONF" ]; then
return
fi
echo -n > "$RESOLV_CONF"
[ -n "$domain" ] && echo "search $domain" >> "$RESOLV_CONF"
for i in $dns; do
echo "nameserver $i" >> "$RESOLV_CONF"
done
}
bound() {
ip addr add $ip/$mask ${broadcast:+broadcast $broadcast} dev $interface
ip link set dev $interface up
routes
resolvconf
}
renew() {
if ! ip addr show dev $interface | grep $ip/$mask; then
ip addr flush dev $interface
ip addr add $ip/$mask ${broadcast:+broadcast $broadcast} dev $interface
fi
local i
for i in $router; do
if ! ip route show | grep ^default | grep $i; then
routes
break
fi
done
if ! grep "^search $domain"; then
resolvconf
return
fi
for i in $dns; do
if ! grep "^nameserver $i"; then
resolvconf
return
fi
done
}
case "$1" in
deconfig|renew|bound)
run_scripts $UDHCPC/pre-$1
$1
run_scripts $UDHCPC/post-$1
;;
leasefail)
echo "udhcpc failed to get a DHCP lease" >&2
;;
nak)
echo "udhcpc received DHCP NAK" >&2
;;
*)
echo "Error: this script should be called from udhcpc" >&2
exit 1
;;
esac
exit 0

View File

@ -112,7 +112,7 @@ def parse_arguments(argv):
config = Config.instance().get_section_config("Server") config = Config.instance().get_section_config("Server")
defaults = { defaults = {
"host": config.get("host", "0.0.0.0"), "host": config.get("host", "0.0.0.0"),
"port": config.get("port", 8000), "port": config.get("port", 3080),
"ssl": config.getboolean("ssl", False), "ssl": config.getboolean("ssl", False),
"certfile": config.get("certfile", ""), "certfile": config.get("certfile", ""),
"certkey": config.get("certkey", ""), "certkey": config.get("certkey", ""),

View File

@ -43,6 +43,11 @@ DOCKER_CREATE_SCHEMA = {
"description": "console type", "description": "console type",
"enum": ["telnet", "vnc"] "enum": ["telnet", "vnc"]
}, },
"console_resolution": {
"description": "console resolution for VNC",
"type": ["string", "null"],
"pattern": "^[0-9]+x[0-9]+$"
},
"aux": { "aux": {
"description": "auxilary TCP port", "description": "auxilary TCP port",
"minimum": 1, "minimum": 1,
@ -92,6 +97,11 @@ DOCKER_UPDATE_SCHEMA = {
"maximum": 65535, "maximum": 65535,
"type": ["integer", "null"] "type": ["integer", "null"]
}, },
"console_resolution": {
"description": "console resolution for VNC",
"type": ["string", "null"],
"pattern": "^[0-9]+x[0-9]+$"
},
"console_type": { "console_type": {
"description": "console type", "description": "console type",
"enum": ["telnet", "vnc"] "enum": ["telnet", "vnc"]
@ -143,13 +153,18 @@ DOCKER_OBJECT_SCHEMA = {
"description": "auxilary TCP port", "description": "auxilary TCP port",
"minimum": 1, "minimum": 1,
"maximum": 65535, "maximum": 65535,
"type": ["integer", "null"] "type": "integer"
}, },
"console": { "console": {
"description": "console TCP port", "description": "console TCP port",
"minimum": 1, "minimum": 1,
"maximum": 65535, "maximum": 65535,
"type": ["integer", "null"] "type": "integer"
},
"console_resolution": {
"description": "console resolution for VNC",
"type": "string",
"pattern": "^[0-9]+x[0-9]+$"
}, },
"console_type": { "console_type": {
"description": "console type", "description": "console type",
@ -196,7 +211,7 @@ DOCKER_OBJECT_SCHEMA = {
} }
}, },
"additionalProperties": False, "additionalProperties": False,
"required": ["vm_id", "project_id", "image", "container_id", "adapters", "aux", "console", "console_type", "start_command", "environment", "vm_directory"] "required": ["vm_id", "project_id", "image", "container_id", "adapters", "aux", "console", "console_type", "console_resolution", "start_command", "environment", "vm_directory"]
} }

View File

@ -148,11 +148,11 @@ class AsyncioTelnetServer:
return_when=asyncio.FIRST_COMPLETED) return_when=asyncio.FIRST_COMPLETED)
for coro in done: for coro in done:
data = coro.result() data = coro.result()
# Console is closed
if len(data) == 0:
raise ConnectionResetError()
if coro == network_read: if coro == network_read:
if network_reader.at_eof():
raise ConnectionResetError()
network_read = asyncio.async(network_reader.read(READ_SIZE)) network_read = asyncio.async(network_reader.read(READ_SIZE))
if IAC in data: if IAC in data:
@ -167,6 +167,9 @@ class AsyncioTelnetServer:
self._writer.write(data) self._writer.write(data)
yield from self._writer.drain() yield from self._writer.drain()
elif coro == reader_read: elif coro == reader_read:
if self._reader.at_eof():
raise ConnectionResetError()
reader_read = yield from self._get_reader(network_reader) reader_read = yield from self._get_reader(network_reader)
# Replicate the output on all clients # Replicate the output on all clients

View File

@ -163,6 +163,19 @@ def interfaces():
"mac_address": mac_address}) "mac_address": mac_address})
else: else:
try: try:
import pywintypes
import win32service
import win32serviceutil
try:
if win32serviceutil.QueryServiceStatus("npf", None)[1] != win32service.SERVICE_RUNNING:
raise aiohttp.web.HTTPInternalServerError(text="The NPF service is not running")
except pywintypes.error as e:
if e[0] == 1060:
raise aiohttp.web.HTTPInternalServerError(text="The NPF service is not installed")
else:
raise aiohttp.web.HTTPInternalServerError(text="Could not check if the NPF service is running: {}".format(e[2]))
results = get_windows_interfaces() results = get_windows_interfaces()
except ImportError: except ImportError:
message = "pywin32 module is not installed, please install it on the server to get the available interface names" message = "pywin32 module is not installed, please install it on the server to get the available interface names"

View File

@ -19,6 +19,7 @@ import json
import jsonschema import jsonschema
import asyncio import asyncio
import aiohttp.web import aiohttp.web
import asyncio
import logging import logging
import sys import sys
import jinja2 import jinja2

View File

@ -1,5 +1,6 @@
jsonschema>=2.4.0 jsonschema>=2.4.0
aiohttp==0.21.2 aiohttp>=0.21.5
Jinja2>=2.7.3 Jinja2>=2.7.3
raven>=5.2.0 raven>=5.2.0
psutil>=3.0.0 psutil>=3.0.0
zipstream>=1.1.3

View File

@ -24,6 +24,8 @@
function help { function help {
echo "Usage:" >&2 echo "Usage:" >&2
echo "--with-openvpn: Install Open VPN" >&2 echo "--with-openvpn: Install Open VPN" >&2
echo "--with-iou: Install IOU" >&2
echo "--with-i386-repository: Add i386 repositories require by IOU if they are not available on the system. Warning this will replace your source.list in order to use official ubuntu mirror" >&2
echo "--help: This help" >&2 echo "--help: This help" >&2
} }
@ -42,8 +44,10 @@ fi
# Read the options # Read the options
USE_VPN=0 USE_VPN=0
USE_IOU=0
I386_REPO=0
TEMP=`getopt -o h --long with-openvpn,help -n 'gns3-remote-install.sh' -- "$@"` TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,help -n 'gns3-remote-install.sh' -- "$@"`
if [ $? != 0 ] if [ $? != 0 ]
then then
help help
@ -58,6 +62,14 @@ while true ; do
USE_VPN=1 USE_VPN=1
shift shift
;; ;;
--with-iou)
USE_IOU=1
shift
;;
--with-i386-repository)
I386_REPO=1
shift
;;
-h|--help) -h|--help)
help help
exit 1 exit 1
@ -73,17 +85,31 @@ set -e
export DEBIAN_FRONTEND="noninteractive" export DEBIAN_FRONTEND="noninteractive"
log "Add GNS3 repository" log "Add GNS3 repository"
cat > /etc/apt/sources.list.d/gns3.list << EOF cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
deb http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main deb http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu trusty main
deb http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main deb http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu trusty main
EOF EOFLIST
if [ $I386_REPO == 1 ]
then
cat <<EOFLIST2 >> /etc/apt/sources.list
###### Ubuntu Main Repos
deb http://archive.ubuntu.com/ubuntu/ trusty main universe multiverse
deb-src http://archive.ubuntu.com/ubuntu/ trusty main universe multiverse
###### Ubuntu Update Repos
deb http://archive.ubuntu.com/ubuntu/ trusty-security main universe multiverse
deb http://archive.ubuntu.com/ubuntu/ trusty-updates main universe multiverse
deb-src http://archive.ubuntu.com/ubuntu/ trusty-security main universe multiverse
deb-src http://archive.ubuntu.com/ubuntu/ trusty-updates main universe multiverse
EOFLIST2
fi
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A2E3EF7B apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A2E3EF7B
log "Update system packages" log "Update system packages"
dpkg --add-architecture i386
apt-get update apt-get update
log "Upgrade packages" log "Upgrade packages"
@ -107,53 +133,46 @@ fi
log "Add GNS3 to the docker group" log "Add GNS3 to the docker group"
usermod -aG docker gns3 usermod -aG docker gns3
log "IOU setup" if [ $USE_IOU == 1 ]
#apt-get install -y gns3-iou then
log "IOU setup"
dpkg --add-architecture i386
apt-get update
# Force the host name to gns3vm apt-get install -y gns3-iou
hostnamectl set-hostname gns3vm
# Force hostid for IOU # Force the host name to gns3vm
dd if=/dev/zero bs=4 count=1 of=/etc/hostid hostnamectl set-hostname gns3vm
# Block iou call. The server is down # Force hostid for IOU
echo "127.0.0.254 xml.cisco.com" | tee --append /etc/hosts dd if=/dev/zero bs=4 count=1 of=/etc/hostid
# Block iou call. The server is down
echo "127.0.0.254 xml.cisco.com" | tee --append /etc/hosts
fi
log "Add gns3 to the kvm group" log "Add gns3 to the kvm group"
usermod -aG kvm gns3 usermod -aG kvm gns3
log "Setup VDE network"
apt-get install -y vde2 uml-utilities
usermod -a -G vde2-net gns3
cat <<EOF > /etc/network/interfaces.d/qemu0.conf
# A vde network
auto qemu0
iface qemu0 inet static
address 172.16.0.1
netmask 255.255.255.0
vde2-switch -t qemu0
EOF
log "Setup GNS3 server" log "Setup GNS3 server"
mkdir -p /etc/gns3
#TODO: 1.4.5 allow /etc/gns3/gns3_server.conf it's cleaner cat <<EOFC > /etc/gns3/gns3_server.conf
cat <<EOF > /opt/gns3/gns3_server.conf
[Server] [Server]
host = 0.0.0.0 host = 0.0.0.0
port = 8000 port = 3080
images_path = /opt/gns3/images images_path = /opt/gns3/images
projects_path = /opt/gns3/projects projects_path = /opt/gns3/projects
report_errors = True report_errors = True
[Qemu] [Qemu]
enable_kvm = True enable_kvm = True
EOF EOFC
cat <<EOF > /etc/init/gns3.conf chown -R gns3:gns3 /etc/gns3
chmod -R 700 /etc/gns3
cat <<EOFI > /etc/init/gns3.conf
description "GNS3 server" description "GNS3 server"
author "GNS3 Team" author "GNS3 Team"
@ -175,7 +194,7 @@ end script
pre-stop script pre-stop script
echo "[`date`] GNS3 Stopping" echo "[`date`] GNS3 Stopping"
end script end script
EOF EOFI
chown root:root /etc/init/gns3.conf chown root:root /etc/init/gns3.conf
chmod 644 /etc/init/gns3.conf chmod 644 /etc/init/gns3.conf
@ -193,17 +212,17 @@ if [ $USE_VPN == 1 ]
then then
log "Setup VPN" log "Setup VPN"
cat <<EOF > /opt/gns3/gns3_server.conf cat <<EOFSERVER > /etc/gns3/gns3_server.conf
[Server] [Server]
host = 172.16.253.1 host = 172.16.253.1
port = 8000 port = 3080
images_path = /opt/gns3/images images_path = /opt/gns3/images
projects_path = /opt/gns3/projects projects_path = /opt/gns3/projects
report_errors = True report_errors = True
[Qemu] [Qemu]
enable_kvm = True enable_kvm = True
EOF EOFSERVER
log "Install packages for Open VPN" log "Install packages for Open VPN"
@ -221,7 +240,7 @@ UUID=$(uuid)
log "Update motd" log "Update motd"
cat <<EOF > /etc/update-motd.d/70-openvpn cat <<EOFMOTD > /etc/update-motd.d/70-openvpn
#!/bin/sh #!/bin/sh
echo "" echo ""
echo "_______________________________________________________________________________________________" echo "_______________________________________________________________________________________________"
@ -232,7 +251,7 @@ echo "And add it to your openvpn client."
echo "" echo ""
echo "apt-get remove nginx-light to disable the HTTP server." echo "apt-get remove nginx-light to disable the HTTP server."
echo "And remove this file with rm /etc/update-motd.d/70-openvpn" echo "And remove this file with rm /etc/update-motd.d/70-openvpn"
EOF EOFMOTD
chmod 755 /etc/update-motd.d/70-openvpn chmod 755 /etc/update-motd.d/70-openvpn
@ -250,7 +269,7 @@ chmod 600 /etc/openvpn/key.pem
[ -f /etc/openvpn/cert.pem ] || openssl x509 -req -in /etc/openvpn/csr.pem -out /etc/openvpn/cert.pem -signkey /etc/openvpn/key.pem -days 24855 [ -f /etc/openvpn/cert.pem ] || openssl x509 -req -in /etc/openvpn/csr.pem -out /etc/openvpn/cert.pem -signkey /etc/openvpn/key.pem -days 24855
log "Create client configuration" log "Create client configuration"
cat <<EOF > /root/client.ovpn cat <<EOFCLIENT > /root/client.ovpn
client client
nobind nobind
comp-lzo comp-lzo
@ -302,7 +321,7 @@ server {
listen 8003; listen 8003;
root /usr/share/nginx/openvpn; root /usr/share/nginx/openvpn;
} }
EOF EOFCLIENT
[ -f /etc/nginx/sites-enabled/openvpn ] || ln -s /etc/nginx/sites-available/openvpn /etc/nginx/sites-enabled/ [ -f /etc/nginx/sites-enabled/openvpn ] || ln -s /etc/nginx/sites-available/openvpn /etc/nginx/sites-enabled/
service nginx stop service nginx stop
service nginx start service nginx start

View File

@ -133,7 +133,7 @@ class Query:
if path is None: if path is None:
return return
with open(self._example_file_path(method, route), 'w+') as f: with open(self._example_file_path(method, route), 'w+') as f:
f.write("curl -i -X {} 'http://localhost:8000/v{}{}{}'".format(method, self._api_version, self._prefix, path)) f.write("curl -i -X {} 'http://localhost:3080/v{}{}{}'".format(method, self._api_version, self._prefix, path))
if body: if body:
f.write(" -d '{}'".format(re.sub(r"\n", "", json.dumps(json.loads(body), sort_keys=True)))) f.write(" -d '{}'".format(re.sub(r"\n", "", json.dumps(json.loads(body), sort_keys=True))))
f.write("\n\n") f.write("\n\n")

View File

@ -30,7 +30,7 @@ from gns3server.hypervisor.docker import Docker
@pytest.fixture @pytest.fixture
def base_params(): def base_params():
"""Return standard parameters""" """Return standard parameters"""
return {"name": "PC TEST 1", "image": "nginx", "start_command": "nginx-daemon", "adapters": 2, "environment": "YES=1\nNO=0", "console_type": "telnet"} return {"name": "PC TEST 1", "image": "nginx", "start_command": "nginx-daemon", "adapters": 2, "environment": "YES=1\nNO=0", "console_type": "telnet", "console_resolution": "1280x1024"}
@pytest.yield_fixture(autouse=True) @pytest.yield_fixture(autouse=True)
@ -65,6 +65,7 @@ def test_docker_create(http_hypervisor, project, base_params):
assert response.json["image"] == "nginx" assert response.json["image"] == "nginx"
assert response.json["adapters"] == 2 assert response.json["adapters"] == 2
assert response.json["environment"] == "YES=1\nNO=0" assert response.json["environment"] == "YES=1\nNO=0"
assert response.json["console_resolution"] == "1280x1024"
def test_docker_start(http_hypervisor, vm): def test_docker_start(http_hypervisor, vm):

View File

@ -23,6 +23,7 @@ import uuid
import os import os
import asyncio import asyncio
import aiohttp import aiohttp
import zipfile
from unittest.mock import patch from unittest.mock import patch
from tests.utils import asyncio_patch from tests.utils import asyncio_patch
@ -216,3 +217,40 @@ def test_get_file(http_hypervisor, tmpdir):
response = http_hypervisor.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True) response = http_hypervisor.get("/projects/{project_id}/files/../hello".format(project_id=project.id), raw=True)
assert response.status == 403 assert response.status == 403
def test_export(http_hypervisor, tmpdir, loop, project):
os.makedirs(project.path, exist_ok=True)
with open(os.path.join(project.path, 'a'), 'w+') as f:
f.write('hello')
response = http_hypervisor.get("/projects/{project_id}/export".format(project_id=project.id), raw=True)
assert response.status == 200
assert response.headers['CONTENT-TYPE'] == 'application/gns3z'
assert response.headers['CONTENT-DISPOSITION'] == 'attachment; filename="{}.gns3z"'.format(project.name)
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
f.write(response.body)
with zipfile.ZipFile(str(tmpdir / 'project.zip')) as myzip:
with myzip.open("a") as myfile:
content = myfile.read()
assert content == b"hello"
def test_import(http_hypervisor, tmpdir, loop, project):
with zipfile.ZipFile(str(tmpdir / "test.zip"), 'w') as myzip:
myzip.writestr("demo", b"hello")
project_id = project.id
with open(str(tmpdir / "test.zip"), "rb") as f:
response = http_hypervisor.post("/projects/{project_id}/import".format(project_id=project_id), body=f.read(), raw=True)
assert response.status == 201
project = ProjectManager.instance().get_project(project_id=project_id)
with open(os.path.join(project.path, "demo")) as f:
content = f.read()
assert content == "hello"

View File

@ -219,7 +219,6 @@ def test_backup_projects(http_root, tmpdir, loop):
assert response.headers['CONTENT-TYPE'] == 'application/x-gtar' assert response.headers['CONTENT-TYPE'] == 'application/x-gtar'
with open(str(tmpdir / 'projects.tar'), 'wb+') as f: with open(str(tmpdir / 'projects.tar'), 'wb+') as f:
print(len(response.body))
f.write(response.body) f.write(response.body)
tar = tarfile.open(str(tmpdir / 'projects.tar'), 'r') tar = tarfile.open(str(tmpdir / 'projects.tar'), 'r')

View File

@ -57,6 +57,7 @@ def test_json(vm, project):
'adapters': 1, 'adapters': 1,
'console': vm.console, 'console': vm.console,
'console_type': 'telnet', 'console_type': 'telnet',
'console_resolution': '1024x768',
'aux': vm.aux, 'aux': vm.aux,
'start_command': vm.start_command, 'start_command': vm.start_command,
'environment': vm.environment, 'environment': vm.environment,
@ -89,7 +90,10 @@ def test_create(loop, project, manager):
"HostConfig": "HostConfig":
{ {
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": ["{}:/gns3:ro".format(get_resource("hypervisor/docker/resources"))], "Binds": [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
],
"Privileged": True "Privileged": True
}, },
"Volumes": {}, "Volumes": {},
@ -113,7 +117,7 @@ def test_create_vnc(loop, project, manager):
with asyncio_patch("gns3server.hypervisor.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images: with asyncio_patch("gns3server.hypervisor.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.hypervisor.docker.Docker.query", return_value=response) as mock: with asyncio_patch("gns3server.hypervisor.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu", console_type="vnc") vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu", console_type="vnc", console=5900)
vm._start_vnc = MagicMock() vm._start_vnc = MagicMock()
vm._display = 42 vm._display = 42
loop.run_until_complete(asyncio.async(vm.create())) loop.run_until_complete(asyncio.async(vm.create()))
@ -126,6 +130,7 @@ def test_create_vnc(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")), "{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")),
'/tmp/.X11-unix/:/tmp/.X11-unix/' '/tmp/.X11-unix/:/tmp/.X11-unix/'
], ],
"Privileged": True "Privileged": True
@ -141,6 +146,7 @@ def test_create_vnc(loop, project, manager):
}) })
assert vm._start_vnc.called assert vm._start_vnc.called
assert vm._cid == "e90e34656806" assert vm._cid == "e90e34656806"
assert vm._console_type == "vnc"
def test_create_start_cmd(loop, project, manager): def test_create_start_cmd(loop, project, manager):
@ -161,7 +167,10 @@ def test_create_start_cmd(loop, project, manager):
"HostConfig": "HostConfig":
{ {
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": ["{}:/gns3:ro".format(get_resource("hypervisor/docker/resources"))], "Binds": [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
],
"Privileged": True "Privileged": True
}, },
"Volumes": {}, "Volumes": {},
@ -194,7 +203,10 @@ def test_create_environment(loop, project, manager):
"HostConfig": "HostConfig":
{ {
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": ["{}:/gns3:ro".format(get_resource("hypervisor/docker/resources"))], "Binds": [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
],
"Privileged": True "Privileged": True
}, },
"Env": ["YES=1", "NO=0"], "Env": ["YES=1", "NO=0"],
@ -241,7 +253,10 @@ def test_create_image_not_available(loop, project, manager):
"HostConfig": "HostConfig":
{ {
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": ["{}:/gns3:ro".format(get_resource("hypervisor/docker/resources"))], "Binds": [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
],
"Privileged": True "Privileged": True
}, },
"Volumes": {}, "Volumes": {},
@ -438,6 +453,7 @@ def test_update(loop, vm):
} }
original_console = vm.console original_console = vm.console
original_aux = vm.aux
with asyncio_patch("gns3server.hypervisor.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images: with asyncio_patch("gns3server.hypervisor.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.hypervisor.docker.DockerVM._get_container_state", return_value="stopped"): with asyncio_patch("gns3server.hypervisor.docker.DockerVM._get_container_state", return_value="stopped"):
@ -452,7 +468,10 @@ def test_update(loop, vm):
"HostConfig": "HostConfig":
{ {
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": ["{}:/gns3:ro".format(get_resource("hypervisor/docker/resources"))], "Binds": [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
],
"Privileged": True "Privileged": True
}, },
"Volumes": {}, "Volumes": {},
@ -465,6 +484,30 @@ def test_update(loop, vm):
"Cmd": ["/bin/sh"] "Cmd": ["/bin/sh"]
}) })
assert vm.console == original_console assert vm.console == original_console
assert vm.aux == original_aux
def test_update_vnc(loop, vm):
response = {
"Id": "e90e34656806",
"Warnings": []
}
vm.console_type = "vnc"
vm.console = 5900
vm._display = "display"
original_console = vm.console
original_aux = vm.aux
with asyncio_patch("gns3server.hypervisor.docker.DockerVM._start_vnc"):
with asyncio_patch("gns3server.hypervisor.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.hypervisor.docker.DockerVM._get_container_state", return_value="stopped"):
with asyncio_patch("gns3server.hypervisor.docker.Docker.query", return_value=response) as mock_query:
loop.run_until_complete(asyncio.async(vm.update()))
assert vm.console == original_console
assert vm.aux == original_aux
def test_update_running(loop, vm): def test_update_running(loop, vm):
@ -490,7 +533,10 @@ def test_update_running(loop, vm):
"HostConfig": "HostConfig":
{ {
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": ["{}:/gns3:ro".format(get_resource("hypervisor/docker/resources"))], "Binds": [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
],
"Privileged": True "Privileged": True
}, },
"Volumes": {}, "Volumes": {},
@ -763,6 +809,7 @@ def test_mount_binds(vm, tmpdir):
dst = os.path.join(vm.working_dir, "test/experimental") dst = os.path.join(vm.working_dir, "test/experimental")
assert vm._mount_binds(image_infos) == [ assert vm._mount_binds(image_infos) == [
"{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")), "{}:/gns3:ro".format(get_resource("hypervisor/docker/resources")),
"{}:/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")),
"{}:{}".format(dst, "/test/experimental") "{}:{}".format(dst, "/test/experimental")
] ]
@ -770,13 +817,14 @@ def test_mount_binds(vm, tmpdir):
def test_start_vnc(vm, loop): def test_start_vnc(vm, loop):
vm.console_resolution = "1280x1024"
with patch("shutil.which", return_value="/bin/x"): with patch("shutil.which", return_value="/bin/x"):
with asyncio_patch("gns3server.hypervisor.docker.docker_vm.wait_for_file_creation") as mock_wait: with asyncio_patch("gns3server.hypervisor.docker.docker_vm.wait_for_file_creation") as mock_wait:
with asyncio_patch("asyncio.create_subprocess_exec") as mock_exec: with asyncio_patch("asyncio.create_subprocess_exec") as mock_exec:
loop.run_until_complete(asyncio.async(vm._start_vnc())) loop.run_until_complete(asyncio.async(vm._start_vnc()))
assert vm._display is not None assert vm._display is not None
mock_exec.assert_any_call("Xvfb", "-nolisten", "tcp", ":{}".format(vm._display), "-screen", "0", "1024x768x16") mock_exec.assert_any_call("Xvfb", "-nolisten", "tcp", ":{}".format(vm._display), "-screen", "0", "1280x1024x16")
mock_exec.assert_any_call("x11vnc", "-forever", "-nopw", "-display", "WAIT:{}".format(vm._display), "-rfbport", str(vm.console), "-noncache", "-listen", "127.0.0.1") mock_exec.assert_any_call("x11vnc", "-forever", "-nopw", "-shared", "-geometry", "1280x1024", "-display", "WAIT:{}".format(vm._display), "-rfbport", str(vm.console), "-noncache", "-listen", "127.0.0.1")
mock_wait.assert_called_with("/tmp/.X11-unix/X{}".format(vm._display)) mock_wait.assert_called_with("/tmp/.X11-unix/X{}".format(vm._display))
@ -789,3 +837,17 @@ def test_start_aux(vm, loop):
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=MagicMock()) as mock_exec: with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=MagicMock()) as mock_exec:
loop.run_until_complete(asyncio.async(vm._start_aux())) loop.run_until_complete(asyncio.async(vm._start_aux()))
def test_create_network_interfaces(vm):
vm.adapters = 5
network_config = vm._create_network_config()
assert os.path.exists(os.path.join(network_config, "interfaces"))
assert os.path.exists(os.path.join(network_config, "if-up.d"))
with open(os.path.join(network_config, "interfaces")) as f:
content = f.read()
assert "eth0" in content
assert "eth4" in content
assert "eth5" not in content

View File

@ -323,11 +323,35 @@ def test_disk_options(vm, tmpdir, loop, fake_qemu_img_binary):
open(vm._hda_disk_image, "w+").close() open(vm._hda_disk_image, "w+").close()
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process: with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
loop.run_until_complete(asyncio.async(vm._disk_options())) options = loop.run_until_complete(asyncio.async(vm._disk_options()))
assert process.called assert process.called
args, kwargs = process.call_args args, kwargs = process.call_args
assert args == (fake_qemu_img_binary, "create", "-o", "backing_file={}".format(vm._hda_disk_image), "-f", "qcow2", os.path.join(vm.working_dir, "hda_disk.qcow2")) assert args == (fake_qemu_img_binary, "create", "-o", "backing_file={}".format(vm._hda_disk_image), "-f", "qcow2", os.path.join(vm.working_dir, "hda_disk.qcow2"))
assert options == ['-drive', 'file=' + os.path.join(vm.working_dir, "hda_disk.qcow2") + ',if=ide,index=0,media=disk']
def test_disk_options_multiple_disk(vm, tmpdir, loop, fake_qemu_img_binary):
vm._hda_disk_image = str(tmpdir / "test0.qcow2")
vm._hdb_disk_image = str(tmpdir / "test1.qcow2")
vm._hdc_disk_image = str(tmpdir / "test2.qcow2")
vm._hdd_disk_image = str(tmpdir / "test3.qcow2")
open(vm._hda_disk_image, "w+").close()
open(vm._hdb_disk_image, "w+").close()
open(vm._hdc_disk_image, "w+").close()
open(vm._hdd_disk_image, "w+").close()
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
options = loop.run_until_complete(asyncio.async(vm._disk_options()))
assert options == [
'-drive', 'file=' + os.path.join(vm.working_dir, "hda_disk.qcow2") + ',if=ide,index=0,media=disk',
'-drive', 'file=' + os.path.join(vm.working_dir, "hdb_disk.qcow2") + ',if=ide,index=1,media=disk',
'-drive', 'file=' + os.path.join(vm.working_dir, "hdc_disk.qcow2") + ',if=ide,index=2,media=disk',
'-drive', 'file=' + os.path.join(vm.working_dir, "hdd_disk.qcow2") + ',if=ide,index=3,media=disk'
]
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows") @pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
def test_set_process_priority(vm, loop, fake_qemu_img_binary): def test_set_process_priority(vm, loop, fake_qemu_img_binary):

View File

@ -49,8 +49,8 @@ def test_temporary_directory(project, manager):
def test_console(project, manager): def test_console(project, manager):
vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager) vm = VPCSVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", project, manager)
vm.console = 2111 vm.console = 5011
assert vm.console == 2111 assert vm.console == 5011
vm.console = None vm.console = None
assert vm.console is None assert vm.console is None

View File

@ -32,7 +32,6 @@ def test_reserve_tcp_port():
with patch("gns3server.hypervisor.project.Project.emit") as mock_emit: with patch("gns3server.hypervisor.project.Project.emit") as mock_emit:
port = pm.reserve_tcp_port(2001, project) port = pm.reserve_tcp_port(2001, project)
assert port != 2001 assert port != 2001
assert mock_emit.call_args[0][0] == "log.warning"
def test_reserve_tcp_port_outside_range(): def test_reserve_tcp_port_outside_range():
@ -41,7 +40,6 @@ def test_reserve_tcp_port_outside_range():
with patch("gns3server.hypervisor.project.Project.emit") as mock_emit: with patch("gns3server.hypervisor.project.Project.emit") as mock_emit:
port = pm.reserve_tcp_port(80, project) port = pm.reserve_tcp_port(80, project)
assert port != 80 assert port != 80
assert mock_emit.call_args[0][0] == "log.warning"
def test_reserve_tcp_port_already_used_by_another_program(): def test_reserve_tcp_port_already_used_by_another_program():
@ -65,7 +63,6 @@ def test_reserve_tcp_port_already_used_by_another_program():
with patch("gns3server.hypervisor.project.Project.emit") as mock_emit: with patch("gns3server.hypervisor.project.Project.emit") as mock_emit:
port = pm.reserve_tcp_port(2001, project) port = pm.reserve_tcp_port(2001, project)
assert port != 2001 assert port != 2001
assert mock_emit.call_args[0][0] == "log.warning"
def test_reserve_tcp_port_already_used(): def test_reserve_tcp_port_already_used():
@ -89,7 +86,6 @@ def test_reserve_tcp_port_already_used():
with patch("gns3server.hypervisor.project.Project.emit") as mock_emit: with patch("gns3server.hypervisor.project.Project.emit") as mock_emit:
port = pm.reserve_tcp_port(2001, project) port = pm.reserve_tcp_port(2001, project)
assert port != 2001 assert port != 2001
assert mock_emit.call_args[0][0] == "log.warning"
def test_reserve_udp_port(): def test_reserve_udp_port():

View File

@ -17,9 +17,12 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>. # along with this program. If not, see <http://www.gnu.org/licenses/>.
import os import os
import uuid
import json
import asyncio import asyncio
import pytest import pytest
import aiohttp import aiohttp
import zipfile
from uuid import uuid4 from uuid import uuid4
from unittest.mock import patch from unittest.mock import patch
@ -269,3 +272,140 @@ def test_emit(async_run):
(action, event, context) = async_run(queue.get(0.5)) (action, event, context) = async_run(queue.get(0.5))
assert action == "test" assert action == "test"
assert context["project_id"] == project.id assert context["project_id"] == project.id
def test_export(tmpdir):
project = Project()
path = project.path
os.makedirs(os.path.join(path, "vm-1", "dynamips"))
# The .gns3 should be renamed project.gns3 in order to simplify import
with open(os.path.join(path, "test.gns3"), 'w+') as f:
f.write("{}")
with open(os.path.join(path, "vm-1", "dynamips", "test"), 'w+') as f:
f.write("HELLO")
with open(os.path.join(path, "vm-1", "dynamips", "test_log.txt"), 'w+') as f:
f.write("LOG")
os.makedirs(os.path.join(path, "project-files", "snapshots"))
with open(os.path.join(path, "project-files", "snapshots", "test"), 'w+') as f:
f.write("WORLD")
z = project.export()
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
for data in z:
f.write(data)
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
with myzip.open("vm-1/dynamips/test") as myfile:
content = myfile.read()
assert content == b"HELLO"
assert 'test.gns3' not in myzip.namelist()
assert 'project.gns3' in myzip.namelist()
assert 'project-files/snapshots/test' not in myzip.namelist()
assert 'vm-1/dynamips/test_log.txt' not in myzip.namelist()
def test_export(tmpdir):
project = Project(project_id=str(uuid.uuid4()))
path = project.path
os.makedirs(os.path.join(path, "vm-1", "dynamips"))
# The .gns3 should be renamed project.gns3 in order to simplify import
with open(os.path.join(path, "test.gns3"), 'w+') as f:
f.write("{}")
with open(os.path.join(path, "vm-1", "dynamips", "test"), 'w+') as f:
f.write("HELLO")
with open(os.path.join(path, "vm-1", "dynamips", "test_log.txt"), 'w+') as f:
f.write("LOG")
os.makedirs(os.path.join(path, "project-files", "snapshots"))
with open(os.path.join(path, "project-files", "snapshots", "test"), 'w+') as f:
f.write("WORLD")
os.makedirs(os.path.join(path, "servers", "vm", "project-files", "docker"))
with open(os.path.join(path, "servers", "vm", "project-files", "docker", "busybox"), 'w+') as f:
f.write("DOCKER")
z = project.export()
with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
for data in z:
f.write(data)
with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
with myzip.open("vm-1/dynamips/test") as myfile:
content = myfile.read()
assert content == b"HELLO"
assert 'test.gns3' not in myzip.namelist()
assert 'project.gns3' in myzip.namelist()
assert 'project-files/snapshots/test' not in myzip.namelist()
assert 'vm-1/dynamips/test_log.txt' not in myzip.namelist()
assert 'servers/vm/project-files/docker/busybox' not in myzip.namelist()
assert 'project-files/docker/busybox' in myzip.namelist()
def test_import(tmpdir):
project_id = str(uuid.uuid4())
project = Project(name="test", project_id=project_id)
topology = {
"project_id": str(uuid.uuid4()),
"name": "testtest",
"topology": {
"nodes": [
{
"server_id": 3,
"type": "VPCSDevice"
},
{
"server_id": 3,
"type": "QemuVM"
}
]
}
}
with open(str(tmpdir / "project.gns3"), 'w+') as f:
json.dump(topology, f)
with open(str(tmpdir / "b.png"), 'w+') as f:
f.write("B")
zip_path = str(tmpdir / "project.zip")
with zipfile.ZipFile(zip_path, 'w') as myzip:
myzip.write(str(tmpdir / "project.gns3"), "project.gns3")
myzip.write(str(tmpdir / "b.png"), "b.png")
myzip.write(str(tmpdir / "b.png"), "project-files/dynamips/test")
myzip.write(str(tmpdir / "b.png"), "project-files/qemu/test")
with open(zip_path, "rb") as f:
project.import_zip(f)
assert os.path.exists(os.path.join(project.path, "b.png"))
assert os.path.exists(os.path.join(project.path, "test.gns3"))
assert os.path.exists(os.path.join(project.path, "project-files/dynamips/test"))
assert os.path.exists(os.path.join(project.path, "servers/vm/project-files/qemu/test"))
with open(os.path.join(project.path, "test.gns3")) as f:
content = json.load(f)
assert content["name"] == "test"
assert content["project_id"] == project_id
assert content["topology"]["servers"] == [
{
"id": 1,
"local": True,
"vm": False
},
{
"id": 2,
"local": False,
"vm": True
},
]
assert content["topology"]["nodes"][0]["server_id"] == 1
assert content["topology"]["nodes"][1]["server_id"] == 2

Binary file not shown.

Binary file not shown.

View File

@ -77,7 +77,7 @@ def test_parse_arguments(capsys, tmpdir):
assert run.parse_arguments([]).host == "192.168.1.2" assert run.parse_arguments([]).host == "192.168.1.2"
assert run.parse_arguments(["--port", "8002"]).port == 8002 assert run.parse_arguments(["--port", "8002"]).port == 8002
assert run.parse_arguments([]).port == 8000 assert run.parse_arguments([]).port == 3080
server_config["port"] = "8003" server_config["port"] = "8003"
assert run.parse_arguments([]).port == 8003 assert run.parse_arguments([]).port == 8003