mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-24 17:28:08 +00:00
Merge branch '2.0' into 2.1
This commit is contained in:
commit
51bf979284
35
CHANGELOG
35
CHANGELOG
@ -1,5 +1,38 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 2.0.0 beta 4 16/02/2017
|
||||||
|
|
||||||
|
* Lock aiohttp to 1.2.0 because 1.3 create bug with Qt
|
||||||
|
* Avoid a crash in some conditions when reading the serial console
|
||||||
|
* Disallow export of project with VirtualBox linked clone
|
||||||
|
* Fix linked_clone property lost during topology convert
|
||||||
|
* Catch permission error when restoring a snapshot
|
||||||
|
* Fix a rare crash when closing a project
|
||||||
|
* Fix error when you have error on your filesystem during project convertion
|
||||||
|
* Catch error when we can't access to a unix socket
|
||||||
|
* If we can't resolve compute name return 0.0.0.0
|
||||||
|
* Raise an error if you put an invalid key in node name
|
||||||
|
* Improve a lot project loading speed
|
||||||
|
* Fix a potential crash
|
||||||
|
* Fix the server don't start if a remote is unavailable
|
||||||
|
* Do not crash if you pass {name} in name
|
||||||
|
* Fix import/export of dynamips configuration
|
||||||
|
* Simplify conversion process from 1.3 to 2.0
|
||||||
|
* Prevent corruption of VM in VirtualBox when using linked clone
|
||||||
|
* Fix creation of qemu img
|
||||||
|
* Fix rare race condition when stopping ubridge
|
||||||
|
* Prevent renaming of a running VirtualBox linked VM
|
||||||
|
* Avoid crash when you broke your system permissions
|
||||||
|
* Do not crash when you broke permission on your file system during execution
|
||||||
|
* Fix a crash when you broke permission on your file system
|
||||||
|
* Fix a rare race condition when exporting debug informations
|
||||||
|
* Do not try to start the GNS3 VM if the name is none
|
||||||
|
* Fix version check for VPCS
|
||||||
|
* Fix pcap for PPP link with IOU
|
||||||
|
* Correct link are not connected to the correct ethernet switch port after conversion
|
||||||
|
* Fix an error if you don't have permissions on your symbols directory
|
||||||
|
* Fix an error when converting some topologies from 1.3
|
||||||
|
|
||||||
## 2.0.0 beta 3 19/01/2017
|
## 2.0.0 beta 3 19/01/2017
|
||||||
|
|
||||||
* Force the dependency on typing because otherwise it's broke on 3.4
|
* Force the dependency on typing because otherwise it's broke on 3.4
|
||||||
@ -45,7 +78,7 @@
|
|||||||
* Replace JSONDecodeError by ValueError (Python 3.4 compatibility)
|
* Replace JSONDecodeError by ValueError (Python 3.4 compatibility)
|
||||||
* Catch an error when we can't create the IOU directory
|
* Catch an error when we can't create the IOU directory
|
||||||
|
|
||||||
## 1.5.3 12/01/2016
|
## 1.5.3 12/01/2017
|
||||||
|
|
||||||
* Fix sporadically systemd is unable to start gns3-server
|
* Fix sporadically systemd is unable to start gns3-server
|
||||||
|
|
||||||
|
34
Dockerfile
Normal file
34
Dockerfile
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# Dockerfile for GNS3 server development
|
||||||
|
|
||||||
|
FROM ubuntu:16.04
|
||||||
|
|
||||||
|
ENV DEBIAN_FRONTEND noninteractive
|
||||||
|
|
||||||
|
# Set the locale
|
||||||
|
RUN locale-gen en_US.UTF-8
|
||||||
|
ENV LANG en_US.UTF-8
|
||||||
|
ENV LANGUAGE en_US:en
|
||||||
|
ENV LC_ALL en_US.UTF-8
|
||||||
|
|
||||||
|
RUN apt-get update && apt-get install -y software-properties-common
|
||||||
|
RUN add-apt-repository ppa:gns3/ppa
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
python3-pip \
|
||||||
|
python3-dev \
|
||||||
|
qemu-system-x86 \
|
||||||
|
qemu-system-arm \
|
||||||
|
qemu-kvm \
|
||||||
|
libvirt-bin \
|
||||||
|
x11vnc
|
||||||
|
|
||||||
|
# Install uninstall to install dependencies
|
||||||
|
RUN apt-get install -y vpcs ubridge
|
||||||
|
|
||||||
|
ADD . /server
|
||||||
|
WORKDIR /server
|
||||||
|
|
||||||
|
RUN pip3 install -r /server/requirements.txt
|
||||||
|
|
||||||
|
EXPOSE 3080
|
||||||
|
|
||||||
|
CMD python3 -m gns3server --local
|
10
README.rst
10
README.rst
@ -71,6 +71,16 @@ To run tests use:
|
|||||||
py.test -v
|
py.test -v
|
||||||
|
|
||||||
|
|
||||||
|
Docker container
|
||||||
|
****************
|
||||||
|
|
||||||
|
For development you can run the GNS3 server in a container
|
||||||
|
|
||||||
|
.. code:: bash
|
||||||
|
|
||||||
|
bash scripts/docker_dev_server.sh
|
||||||
|
|
||||||
|
|
||||||
Run as daemon (Unix only)
|
Run as daemon (Unix only)
|
||||||
**************************
|
**************************
|
||||||
|
|
||||||
|
@ -19,14 +19,16 @@
|
|||||||
VirtualBox VM instance.
|
VirtualBox VM instance.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import sys
|
|
||||||
import shlex
|
|
||||||
import re
|
import re
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import sys
|
||||||
import json
|
import json
|
||||||
|
import uuid
|
||||||
|
import shlex
|
||||||
|
import shutil
|
||||||
import socket
|
import socket
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import tempfile
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
from gns3server.utils import parse_version
|
from gns3server.utils import parse_version
|
||||||
@ -209,7 +211,16 @@ class VirtualBoxVM(BaseNode):
|
|||||||
if os.path.exists(self._linked_vbox_file()):
|
if os.path.exists(self._linked_vbox_file()):
|
||||||
tree = ET.parse(self._linked_vbox_file())
|
tree = ET.parse(self._linked_vbox_file())
|
||||||
machine = tree.getroot().find("{http://www.virtualbox.org/}Machine")
|
machine = tree.getroot().find("{http://www.virtualbox.org/}Machine")
|
||||||
if machine is not None:
|
if machine is not None and machine.get("uuid") != "{" + self.id + "}":
|
||||||
|
|
||||||
|
for image in tree.getroot().findall("{http://www.virtualbox.org/}Image"):
|
||||||
|
currentSnapshot = machine.get("currentSnapshot")
|
||||||
|
if currentSnapshot:
|
||||||
|
newSnapshot = re.sub("\{.*\}", "{" + str(uuid.uuid4()) + "}", currentSnapshot)
|
||||||
|
shutil.move(os.path.join(self.working_dir, self._vmname, "Snapshots", currentSnapshot) + ".vdi",
|
||||||
|
os.path.join(self.working_dir, self._vmname, "Snapshots", newSnapshot) + ".vdi")
|
||||||
|
image.set("uuid", newSnapshot)
|
||||||
|
|
||||||
machine.set("uuid", "{" + self.id + "}")
|
machine.set("uuid", "{" + self.id + "}")
|
||||||
tree.write(self._linked_vbox_file())
|
tree.write(self._linked_vbox_file())
|
||||||
|
|
||||||
|
@ -367,6 +367,7 @@ class Controller:
|
|||||||
return project
|
return project
|
||||||
|
|
||||||
def remove_project(self, project):
|
def remove_project(self, project):
|
||||||
|
if project.id in self._projects:
|
||||||
del self._projects[project.id]
|
del self._projects[project.id]
|
||||||
|
|
||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
|
@ -136,6 +136,8 @@ def _export_project_file(project, path, z, include_images, keep_compute_id, allo
|
|||||||
if "topology" in topology:
|
if "topology" in topology:
|
||||||
if "nodes" in topology["topology"]:
|
if "nodes" in topology["topology"]:
|
||||||
for node in topology["topology"]["nodes"]:
|
for node in topology["topology"]["nodes"]:
|
||||||
|
if node["node_type"] == "virtualbox" and node.get("properties", {}).get("linked_clone"):
|
||||||
|
raise aiohttp.web.HTTPConflict(text="Topology with a linked {} clone could not be exported. Use qemu instead.".format(node["node_type"]))
|
||||||
if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware", "cloud"]:
|
if not allow_all_nodes and node["node_type"] in ["virtualbox", "vmware", "cloud"]:
|
||||||
raise aiohttp.web.HTTPConflict(text="Topology with a {} could not be exported".format(node["node_type"]))
|
raise aiohttp.web.HTTPConflict(text="Topology with a {} could not be exported".format(node["node_type"]))
|
||||||
|
|
||||||
|
@ -536,7 +536,7 @@ class Project:
|
|||||||
@asyncio.coroutine
|
@asyncio.coroutine
|
||||||
def close(self, ignore_notification=False):
|
def close(self, ignore_notification=False):
|
||||||
yield from self.stop_all()
|
yield from self.stop_all()
|
||||||
for compute in self._project_created_on_compute:
|
for compute in list(self._project_created_on_compute):
|
||||||
try:
|
try:
|
||||||
yield from compute.post("/projects/{}/close".format(self._id), dont_connect=True)
|
yield from compute.post("/projects/{}/close".format(self._id), dont_connect=True)
|
||||||
# We don't care if a compute is down at this step
|
# We don't care if a compute is down at this step
|
||||||
|
@ -20,6 +20,7 @@ import os
|
|||||||
import uuid
|
import uuid
|
||||||
import shutil
|
import shutil
|
||||||
import asyncio
|
import asyncio
|
||||||
|
import aiohttp.web
|
||||||
from datetime import datetime, timezone
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
|
||||||
@ -80,10 +81,13 @@ class Snapshot:
|
|||||||
# We don't send close notif to clients because the close / open dance is purely internal
|
# We don't send close notif to clients because the close / open dance is purely internal
|
||||||
yield from self._project.close(ignore_notification=True)
|
yield from self._project.close(ignore_notification=True)
|
||||||
self._project.controller.notification.emit("snapshot.restored", self.__json__())
|
self._project.controller.notification.emit("snapshot.restored", self.__json__())
|
||||||
|
try:
|
||||||
if os.path.exists(os.path.join(self._project.path, "project-files")):
|
if os.path.exists(os.path.join(self._project.path, "project-files")):
|
||||||
shutil.rmtree(os.path.join(self._project.path, "project-files"))
|
shutil.rmtree(os.path.join(self._project.path, "project-files"))
|
||||||
with open(self._path, "rb") as f:
|
with open(self._path, "rb") as f:
|
||||||
project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path)
|
project = yield from import_project(self._project.controller, self._project.id, f, location=self._project.path)
|
||||||
|
except (OSError, PermissionError) as e:
|
||||||
|
raise aiohttp.web.HTTPConflict(text=str(e))
|
||||||
yield from project.open()
|
yield from project.open()
|
||||||
return project
|
return project
|
||||||
|
|
||||||
|
@ -321,10 +321,12 @@ def _convert_1_3_later(topo, topo_path):
|
|||||||
node["properties"]["ram"] = PLATFORMS_DEFAULT_RAM[old_node["type"].lower()]
|
node["properties"]["ram"] = PLATFORMS_DEFAULT_RAM[old_node["type"].lower()]
|
||||||
elif old_node["type"] == "VMwareVM":
|
elif old_node["type"] == "VMwareVM":
|
||||||
node["node_type"] = "vmware"
|
node["node_type"] = "vmware"
|
||||||
|
node["properties"]["linked_clone"] = old_node.get("linked_clone", False)
|
||||||
if node["symbol"] is None:
|
if node["symbol"] is None:
|
||||||
node["symbol"] = ":/symbols/vmware_guest.svg"
|
node["symbol"] = ":/symbols/vmware_guest.svg"
|
||||||
elif old_node["type"] == "VirtualBoxVM":
|
elif old_node["type"] == "VirtualBoxVM":
|
||||||
node["node_type"] = "virtualbox"
|
node["node_type"] = "virtualbox"
|
||||||
|
node["properties"]["linked_clone"] = old_node.get("linked_clone", False)
|
||||||
if node["symbol"] is None:
|
if node["symbol"] is None:
|
||||||
node["symbol"] = ":/symbols/vbox_guest.svg"
|
node["symbol"] = ":/symbols/vbox_guest.svg"
|
||||||
elif old_node["type"] == "IOUDevice":
|
elif old_node["type"] == "IOUDevice":
|
||||||
|
@ -53,7 +53,7 @@ class CrashReport:
|
|||||||
Report crash to a third party service
|
Report crash to a third party service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DSN = "sync+https://b7430bad849c4b88b3a928032d6cce5e:f140bfdd2ebb4bf4b929c002b45b2357@sentry.io/38482"
|
DSN = "sync+https://83564b27a6f6475488a3eb74c78f1760:ed5ac7c6d3f7428d960a84da98450b69@sentry.io/38482"
|
||||||
if hasattr(sys, "frozen"):
|
if hasattr(sys, "frozen"):
|
||||||
cacert = get_resource("cacert.pem")
|
cacert = get_resource("cacert.pem")
|
||||||
if cacert is not None and os.path.isfile(cacert):
|
if cacert is not None and os.path.isfile(cacert):
|
||||||
|
@ -34,6 +34,7 @@ class SerialReaderWriterProtocol(asyncio.Protocol):
|
|||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self._output = asyncio.StreamReader()
|
self._output = asyncio.StreamReader()
|
||||||
|
self._closed = False
|
||||||
self.transport = None
|
self.transport = None
|
||||||
|
|
||||||
def read(self, n=-1):
|
def read(self, n=-1):
|
||||||
@ -54,9 +55,11 @@ class SerialReaderWriterProtocol(asyncio.Protocol):
|
|||||||
self.transport = transport
|
self.transport = transport
|
||||||
|
|
||||||
def data_received(self, data):
|
def data_received(self, data):
|
||||||
|
if not self._closed:
|
||||||
self._output.feed_data(data)
|
self._output.feed_data(data)
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
|
self._closed = True
|
||||||
self._output.feed_eof()
|
self._output.feed_eof()
|
||||||
|
|
||||||
|
|
||||||
|
@ -25,3 +25,14 @@
|
|||||||
|
|
||||||
__version__ = "2.1.0dev1"
|
__version__ = "2.1.0dev1"
|
||||||
__version_info__ = (2, 1, 0, -99)
|
__version_info__ = (2, 1, 0, -99)
|
||||||
|
|
||||||
|
# If it's a git checkout try to add the commit
|
||||||
|
if "dev" in __version__:
|
||||||
|
try:
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".git")):
|
||||||
|
r = subprocess.run(["git", "rev-parse", "--short", "HEAD"], stdout=subprocess.PIPE).stdout.decode().strip("\n")
|
||||||
|
__version__ += "-" + r
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
jsonschema>=2.4.0
|
jsonschema>=2.4.0
|
||||||
aiohttp>=1.2.0
|
aiohttp==1.2.0
|
||||||
aiohttp_cors>=0.4.0
|
aiohttp_cors>=0.4.0
|
||||||
yarl>=0.8.1
|
yarl>=0.9.6
|
||||||
typing>=3.5.3.0 # Otherwise yarl fail with python 3.4
|
typing>=3.5.3.0 # Otherwise yarl fail with python 3.4
|
||||||
Jinja2>=2.7.3
|
Jinja2>=2.7.3
|
||||||
raven>=5.23.0
|
raven>=5.23.0
|
||||||
|
23
scripts/docker_dev_server.sh
Normal file
23
scripts/docker_dev_server.sh
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# A docker server use for localy test a remote GNS3 server
|
||||||
|
|
||||||
|
docker build -t gns3-server .
|
||||||
|
docker run -i -h gns3vm -p 8001:8001/tcp -t gns3-server python3 -m gns3server --local --port 8001
|
||||||
|
|
||||||
|
|
@ -191,7 +191,7 @@ def test_export_disallow_some_type(tmpdir, project, async_run):
|
|||||||
"topology": {
|
"topology": {
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"node_type": "virtualbox"
|
"node_type": "cloud"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -204,6 +204,24 @@ def test_export_disallow_some_type(tmpdir, project, async_run):
|
|||||||
z = async_run(export_project(project, str(tmpdir)))
|
z = async_run(export_project(project, str(tmpdir)))
|
||||||
z = async_run(export_project(project, str(tmpdir), allow_all_nodes=True))
|
z = async_run(export_project(project, str(tmpdir), allow_all_nodes=True))
|
||||||
|
|
||||||
|
# VirtualBox is always disallowed
|
||||||
|
topology = {
|
||||||
|
"topology": {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"node_type": "virtualbox",
|
||||||
|
"properties": {
|
||||||
|
"linked_clone": True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
with open(os.path.join(path, "test.gns3"), 'w+') as f:
|
||||||
|
json.dump(topology, f)
|
||||||
|
with pytest.raises(aiohttp.web.HTTPConflict):
|
||||||
|
z = async_run(export_project(project, str(tmpdir), allow_all_nodes=True))
|
||||||
|
|
||||||
|
|
||||||
def test_export_fix_path(tmpdir, project, async_run):
|
def test_export_fix_path(tmpdir, project, async_run):
|
||||||
"""
|
"""
|
||||||
|
@ -427,7 +427,7 @@ def test_duplicate(project, async_run, controller):
|
|||||||
remote_vpcs = async_run(project.add_node(compute, "test", None, node_type="vpcs", properties={"startup_config": "test.cfg"}))
|
remote_vpcs = async_run(project.add_node(compute, "test", None, node_type="vpcs", properties={"startup_config": "test.cfg"}))
|
||||||
|
|
||||||
# We allow node not allowed for standard import / export
|
# We allow node not allowed for standard import / export
|
||||||
remote_virtualbox = async_run(project.add_node(compute, "test", None, node_type="virtualbox", properties={"startup_config": "test.cfg"}))
|
remote_virtualbox = async_run(project.add_node(compute, "test", None, node_type="vmware", properties={"startup_config": "test.cfg"}))
|
||||||
|
|
||||||
new_project = async_run(project.duplicate(name="Hello"))
|
new_project = async_run(project.duplicate(name="Hello"))
|
||||||
assert new_project.id != project.id
|
assert new_project.id != project.id
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
"port_segment_size": 0,
|
"port_segment_size": 0,
|
||||||
"first_port_name": null,
|
"first_port_name": null,
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"linked_clone": false,
|
||||||
"acpi_shutdown": false,
|
"acpi_shutdown": false,
|
||||||
"adapter_type": "Intel PRO/1000 MT Desktop (82540EM)",
|
"adapter_type": "Intel PRO/1000 MT Desktop (82540EM)",
|
||||||
"adapters": 1,
|
"adapters": 1,
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
"port_segment_size": 0,
|
"port_segment_size": 0,
|
||||||
"first_port_name": null,
|
"first_port_name": null,
|
||||||
"properties": {
|
"properties": {
|
||||||
|
"linked_clone": false,
|
||||||
"acpi_shutdown": false,
|
"acpi_shutdown": false,
|
||||||
"adapter_type": "e1000",
|
"adapter_type": "e1000",
|
||||||
"adapters": 1,
|
"adapters": 1,
|
||||||
|
Loading…
Reference in New Issue
Block a user