mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-18 06:18:08 +00:00
Merge branch '2.0' into 2.1
This commit is contained in:
commit
cd17ef6b12
@ -9,7 +9,8 @@ install:
|
||||
- python setup.py install
|
||||
- pip install -rdev-requirements.txt
|
||||
script:
|
||||
- py.test -v -s tests
|
||||
- mkdir ‡
|
||||
- py.test -v -s tests --basetemp=‡
|
||||
deploy:
|
||||
provider: pypi
|
||||
user: noplay
|
||||
|
@ -1,5 +1,13 @@
|
||||
# Change Log
|
||||
|
||||
## 2.0.0 02/05/2017
|
||||
|
||||
* Fix connection to websocket with last docker release
|
||||
* Lower docker requirements in tests also
|
||||
* Docker minimum api is 1.25
|
||||
* Handling server disconnect error when docker daemon die
|
||||
* Handle some invalid SVG images
|
||||
|
||||
## 2.0.0rc4 20/04/2017
|
||||
|
||||
* Fix a race condition when handling error at project opening
|
||||
|
@ -1,6 +1,6 @@
|
||||
-rrequirements.txt
|
||||
|
||||
sphinx==1.5.5
|
||||
sphinx==1.5.6
|
||||
pytest==3.0.7
|
||||
pep8==1.7.0
|
||||
pytest-catchlog==1.2.2
|
||||
|
@ -344,7 +344,7 @@ class BaseManager:
|
||||
# test the 2nd byte and check if the 13th bit (CAP_NET_RAW) is set
|
||||
if struct.unpack("<IIIII", caps)[1] & 1 << 13:
|
||||
return True
|
||||
except OSError as e:
|
||||
except (AttributeError, OSError) as e:
|
||||
log.error("could not determine if CAP_NET_RAW capability is set for {}: {}".format(executable, e))
|
||||
|
||||
return False
|
||||
|
@ -541,6 +541,7 @@ class BaseNode:
|
||||
self._ubridge_hypervisor = Hypervisor(self._project, self.ubridge_path, self.working_dir, server_host)
|
||||
log.info("Starting new uBridge hypervisor {}:{}".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
||||
yield from self._ubridge_hypervisor.start()
|
||||
if self._ubridge_hypervisor:
|
||||
log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
||||
yield from self._ubridge_hypervisor.connect()
|
||||
|
||||
|
@ -393,7 +393,7 @@ class DockerVM(BaseNode):
|
||||
# We can not use the API because docker doesn't expose a websocket api for exec
|
||||
# https://github.com/GNS3/gns3-gui/issues/1039
|
||||
process = yield from asyncio.subprocess.create_subprocess_exec(
|
||||
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do /gns3/bin/busybox sh; done", "/dev/null",
|
||||
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do TERM=vt100 /gns3/bin/busybox sh; done", "/dev/null",
|
||||
stdout=asyncio.subprocess.PIPE,
|
||||
stderr=asyncio.subprocess.STDOUT,
|
||||
stdin=asyncio.subprocess.PIPE)
|
||||
|
@ -188,22 +188,33 @@ class ATMSwitch(Device):
|
||||
raise DynamipsError("Port {} is not allocated".format(port_number))
|
||||
|
||||
# remove VCs mapped with the port
|
||||
pvc_entry = re.compile(r"""^([0-9]*):([0-9]*):([0-9]*)$""")
|
||||
for source, destination in self._active_mappings.copy().items():
|
||||
match_source_pvc = pvc_entry.search(source)
|
||||
match_destination_pvc = pvc_entry.search(destination)
|
||||
if match_source_pvc and match_destination_pvc:
|
||||
if len(source) == 3 and len(destination) == 3:
|
||||
# remove the virtual channels mapped with this port/nio
|
||||
source_port, source_vpi, source_vci = map(int, match_source_pvc.group(1, 2, 3))
|
||||
destination_port, destination_vpi, destination_vci = map(int, match_destination_pvc.group(1, 2, 3))
|
||||
source_port, source_vpi, source_vci = source
|
||||
destination_port, destination_vpi, destination_vci = destination
|
||||
if port_number == source_port:
|
||||
log.info('ATM switch "{name}" [{id}]: unmapping VCC between port {source_port} VPI {source_vpi} VCI {source_vci} and port {destination_port} VPI {destination_vpi} VCI {destination_vci}'.format(name=self._name,
|
||||
id=self._id,
|
||||
source_port=source_port,
|
||||
source_vpi=source_vpi,
|
||||
source_vci=source_vci,
|
||||
destination_port=destination_port,
|
||||
destination_vpi=destination_vpi,
|
||||
destination_vci=destination_vci))
|
||||
yield from self.unmap_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci)
|
||||
yield from self.unmap_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci)
|
||||
else:
|
||||
# remove the virtual paths mapped with this port/nio
|
||||
source_port, source_vpi = map(int, source.split(':'))
|
||||
destination_port, destination_vpi = map(int, destination.split(':'))
|
||||
source_port, source_vpi = source
|
||||
destination_port, destination_vpi = destination
|
||||
if port_number == source_port:
|
||||
log.info('ATM switch "{name}" [{id}]: unmapping VPC between port {source_port} VPI {source_vpi} and port {destination_port} VPI {destination_vpi}'.format(name=self._name,
|
||||
id=self._id,
|
||||
source_port=source_port,
|
||||
source_vpi=source_vpi,
|
||||
destination_port=destination_port,
|
||||
destination_vpi=destination_vpi))
|
||||
yield from self.unmap_vp(source_port, source_vpi, destination_port, destination_vpi)
|
||||
yield from self.unmap_vp(destination_port, destination_vpi, source_port, source_vpi)
|
||||
|
||||
@ -239,6 +250,14 @@ class ATMSwitch(Device):
|
||||
if self.has_port(destination_port):
|
||||
if (source_port, source_vpi, source_vci) not in self._active_mappings and \
|
||||
(destination_port, destination_vpi, destination_vci) not in self._active_mappings:
|
||||
log.info('ATM switch "{name}" [{id}]: mapping VCC between port {source_port} VPI {source_vpi} VCI {source_vci} and port {destination_port} VPI {destination_vpi} VCI {destination_vci}'.format(name=self._name,
|
||||
id=self._id,
|
||||
source_port=source_port,
|
||||
source_vpi=source_vpi,
|
||||
source_vci=source_vci,
|
||||
destination_port=destination_port,
|
||||
destination_vpi=destination_vpi,
|
||||
destination_vci=destination_vci))
|
||||
yield from self.map_pvc(source_port, source_vpi, source_vci, destination_port, destination_vpi, destination_vci)
|
||||
yield from self.map_pvc(destination_port, destination_vpi, destination_vci, source_port, source_vpi, source_vci)
|
||||
else:
|
||||
@ -247,6 +266,12 @@ class ATMSwitch(Device):
|
||||
destination_port, destination_vpi = map(int, destination.split(':'))
|
||||
if self.has_port(destination_port):
|
||||
if (source_port, source_vpi) not in self._active_mappings and (destination_port, destination_vpi) not in self._active_mappings:
|
||||
log.info('ATM switch "{name}" [{id}]: mapping VPC between port {source_port} VPI {source_vpi} and port {destination_port} VPI {destination_vpi}'.format(name=self._name,
|
||||
id=self._id,
|
||||
source_port=source_port,
|
||||
source_vpi=source_vpi,
|
||||
destination_port=destination_port,
|
||||
destination_vpi=destination_vpi))
|
||||
yield from self.map_vp(source_port, source_vpi, destination_port, destination_vpi)
|
||||
yield from self.map_vp(destination_port, destination_vpi, source_port, source_vpi)
|
||||
|
||||
@ -262,10 +287,10 @@ class ATMSwitch(Device):
|
||||
"""
|
||||
|
||||
if port1 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port1))
|
||||
return
|
||||
|
||||
if port2 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port2))
|
||||
return
|
||||
|
||||
nio1 = self._nios[port1]
|
||||
nio2 = self._nios[port2]
|
||||
@ -283,7 +308,7 @@ class ATMSwitch(Device):
|
||||
port2=port2,
|
||||
vpi2=vpi2))
|
||||
|
||||
self._active_mappings["{}:{}".format(port1, vpi1)] = "{}:{}".format(port2, vpi2)
|
||||
self._active_mappings[(port1, vpi1)] = (port2, vpi2)
|
||||
|
||||
@asyncio.coroutine
|
||||
def unmap_vp(self, port1, vpi1, port2, vpi2):
|
||||
@ -297,10 +322,10 @@ class ATMSwitch(Device):
|
||||
"""
|
||||
|
||||
if port1 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port1))
|
||||
return
|
||||
|
||||
if port2 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port2))
|
||||
return
|
||||
|
||||
nio1 = self._nios[port1]
|
||||
nio2 = self._nios[port2]
|
||||
@ -318,7 +343,7 @@ class ATMSwitch(Device):
|
||||
port2=port2,
|
||||
vpi2=vpi2))
|
||||
|
||||
del self._active_mappings["{}:{}".format(port1, vpi1)]
|
||||
del self._active_mappings[(port1, vpi1)]
|
||||
|
||||
@asyncio.coroutine
|
||||
def map_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2):
|
||||
@ -334,10 +359,10 @@ class ATMSwitch(Device):
|
||||
"""
|
||||
|
||||
if port1 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port1))
|
||||
return
|
||||
|
||||
if port2 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port2))
|
||||
return
|
||||
|
||||
nio1 = self._nios[port1]
|
||||
nio2 = self._nios[port2]
|
||||
@ -359,7 +384,7 @@ class ATMSwitch(Device):
|
||||
vpi2=vpi2,
|
||||
vci2=vci2))
|
||||
|
||||
self._active_mappings["{}:{}:{}".format(port1, vpi1, vci1)] = "{}:{}:{}".format(port2, vpi2, vci2)
|
||||
self._active_mappings[(port1, vpi1, vci1)] = (port2, vpi2, vci2)
|
||||
|
||||
@asyncio.coroutine
|
||||
def unmap_pvc(self, port1, vpi1, vci1, port2, vpi2, vci2):
|
||||
@ -375,10 +400,10 @@ class ATMSwitch(Device):
|
||||
"""
|
||||
|
||||
if port1 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port1))
|
||||
return
|
||||
|
||||
if port2 not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port2))
|
||||
return
|
||||
|
||||
nio1 = self._nios[port1]
|
||||
nio2 = self._nios[port2]
|
||||
@ -399,7 +424,7 @@ class ATMSwitch(Device):
|
||||
port2=port2,
|
||||
vpi2=vpi2,
|
||||
vci2=vci2))
|
||||
del self._active_mappings["{}:{}:{}".format(port1, vpi1, vci1)]
|
||||
del self._active_mappings[(port1, vpi1, vci1)]
|
||||
|
||||
@asyncio.coroutine
|
||||
def start_capture(self, port_number, output_file, data_link_type="DLT_ATM_RFC1483"):
|
||||
|
@ -103,7 +103,7 @@ class Bridge(Device):
|
||||
|
||||
:param nio: NIO instance to remove
|
||||
"""
|
||||
|
||||
if self._hypervisor:
|
||||
yield from self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio))
|
||||
self._nios.remove(nio)
|
||||
|
||||
|
@ -246,7 +246,7 @@ class EthernetSwitch(Device):
|
||||
elif settings["type"] == "dot1q":
|
||||
yield from self.set_dot1q_port(port_number, settings["vlan"])
|
||||
elif settings["type"] == "qinq":
|
||||
yield from self.set_qinq_port(port_number, settings["vlan"], settings["ethertype"])
|
||||
yield from self.set_qinq_port(port_number, settings["vlan"], settings.get("ethertype"))
|
||||
|
||||
@asyncio.coroutine
|
||||
def set_access_port(self, port_number, vlan_id):
|
||||
|
@ -191,9 +191,15 @@ class FrameRelaySwitch(Device):
|
||||
|
||||
# remove VCs mapped with the port
|
||||
for source, destination in self._active_mappings.copy().items():
|
||||
source_port, source_dlci = map(int, source.split(':'))
|
||||
destination_port, destination_dlci = map(int, destination.split(':'))
|
||||
source_port, source_dlci = source
|
||||
destination_port, destination_dlci = destination
|
||||
if port_number == source_port:
|
||||
log.info('Frame Relay switch "{name}" [{id}]: unmapping VC between port {source_port} DLCI {source_dlci} and port {destination_port} DLCI {destination_dlci}'.format(name=self._name,
|
||||
id=self._id,
|
||||
source_port=source_port,
|
||||
source_dlci=source_dlci,
|
||||
destination_port=destination_port,
|
||||
destination_dlci=destination_dlci))
|
||||
yield from self.unmap_vc(source_port, source_dlci, destination_port, destination_dlci)
|
||||
yield from self.unmap_vc(destination_port, destination_dlci, source_port, source_dlci)
|
||||
|
||||
@ -224,6 +230,13 @@ class FrameRelaySwitch(Device):
|
||||
destination_port, destination_dlci = map(int, destination.split(':'))
|
||||
if self.has_port(destination_port):
|
||||
if (source_port, source_dlci) not in self._active_mappings and (destination_port, destination_dlci) not in self._active_mappings:
|
||||
log.info('Frame Relay switch "{name}" [{id}]: mapping VC between port {source_port} DLCI {source_dlci} and port {destination_port} DLCI {destination_dlci}'.format(name=self._name,
|
||||
id=self._id,
|
||||
source_port=source_port,
|
||||
source_dlci=source_dlci,
|
||||
destination_port=destination_port,
|
||||
destination_dlci=destination_dlci))
|
||||
|
||||
yield from self.map_vc(source_port, source_dlci, destination_port, destination_dlci)
|
||||
yield from self.map_vc(destination_port, destination_dlci, source_port, source_dlci)
|
||||
|
||||
@ -260,7 +273,7 @@ class FrameRelaySwitch(Device):
|
||||
port2=port2,
|
||||
dlci2=dlci2))
|
||||
|
||||
self._active_mappings["{}:{}".format(port1, dlci1)] = "{}:{}".format(port2, dlci2)
|
||||
self._active_mappings[(port1, dlci1)] = (port2, dlci2)
|
||||
|
||||
@asyncio.coroutine
|
||||
def unmap_vc(self, port1, dlci1, port2, dlci2):
|
||||
@ -294,7 +307,7 @@ class FrameRelaySwitch(Device):
|
||||
dlci1=dlci1,
|
||||
port2=port2,
|
||||
dlci2=dlci2))
|
||||
del self._active_mappings["{}:{}".format(port1, dlci1)]
|
||||
del self._active_mappings[(port1, dlci1)]
|
||||
|
||||
@asyncio.coroutine
|
||||
def start_capture(self, port_number, output_file, data_link_type="DLT_FRELAY"):
|
||||
|
@ -71,7 +71,10 @@ class Router(BaseNode):
|
||||
super().__init__(name, node_id, project, manager, console=console, aux=aux, allocate_aux=aux)
|
||||
|
||||
self._working_directory = os.path.join(self.project.module_working_directory(self.manager.module_name.lower()), self.id)
|
||||
try:
|
||||
os.makedirs(os.path.join(self._working_directory, "configs"), exist_ok=True)
|
||||
except OSError as e:
|
||||
raise DynamipsError("Can't create the dynamips config directory: {}".format(str(e)))
|
||||
if dynamips_id:
|
||||
self._convert_before_2_0_0_b3(dynamips_id)
|
||||
|
||||
|
@ -1132,9 +1132,13 @@ class QemuVM(BaseNode):
|
||||
adapter_number=adapter_number))
|
||||
|
||||
if self.ubridge:
|
||||
try:
|
||||
yield from self._add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
|
||||
self._local_udp_tunnels[adapter_number][1],
|
||||
nio)
|
||||
except IndexError:
|
||||
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
|
||||
adapter_number=adapter_number))
|
||||
elif self.is_running():
|
||||
raise QemuError("Sorry, adding a link to a started Qemu VM is not supported without using uBridge.")
|
||||
|
||||
|
@ -94,7 +94,7 @@ class VirtualBox(BaseManager):
|
||||
if not vboxmanage_path:
|
||||
vboxmanage_path = self.find_vboxmanage()
|
||||
if not vboxmanage_path:
|
||||
raise VirtualBoxError("Could not found VBoxManage")
|
||||
raise VirtualBoxError("Could not find VBoxManage")
|
||||
|
||||
command = [vboxmanage_path, "--nologo", subcommand]
|
||||
command.extend(args)
|
||||
|
@ -968,9 +968,13 @@ class VirtualBoxVM(BaseNode):
|
||||
adapter_number=adapter_number))
|
||||
|
||||
if self.ubridge:
|
||||
try:
|
||||
yield from self._add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
|
||||
self._local_udp_tunnels[adapter_number][1],
|
||||
nio)
|
||||
except KeyError:
|
||||
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
||||
else:
|
||||
vm_state = yield from self._get_vm_state()
|
||||
|
@ -160,7 +160,7 @@ class Controller:
|
||||
for c in computes:
|
||||
try:
|
||||
yield from self.add_compute(**c)
|
||||
except (aiohttp.web_exceptions.HTTPConflict):
|
||||
except (aiohttp.web_exceptions.HTTPConflict, KeyError):
|
||||
pass # Skip not available servers at loading
|
||||
yield from self.load_projects()
|
||||
try:
|
||||
|
@ -23,6 +23,7 @@ import json
|
||||
import uuid
|
||||
import sys
|
||||
import io
|
||||
from operator import itemgetter
|
||||
|
||||
from ..utils import parse_version
|
||||
from ..utils.images import list_images
|
||||
@ -600,6 +601,9 @@ class Compute:
|
||||
for local_image in list_images(type):
|
||||
if local_image['filename'] not in [i['filename'] for i in images]:
|
||||
images.append(local_image)
|
||||
images = sorted(images, key=itemgetter('filename'))
|
||||
else:
|
||||
images = sorted(images, key=itemgetter('image'))
|
||||
except OSError as e:
|
||||
raise ComputeError("Can't list images: {}".format(str(e)))
|
||||
return images
|
||||
|
@ -190,6 +190,5 @@ def _export_images(project, image, z):
|
||||
if os.path.exists(path):
|
||||
arcname = os.path.join("images", directory, os.path.basename(image))
|
||||
z.write(path, arcname)
|
||||
break
|
||||
else:
|
||||
raise aiohttp.web.HTTPConflict(text="Topology could not be exported because the image {} is not available. If you use multiple server, we need a copy of the image on the main server.".format(path))
|
||||
return
|
||||
raise aiohttp.web.HTTPConflict(text="Topology could not be exported because the image {} is not available. If you use multiple server, we need a copy of the image on the main server.".format(image))
|
||||
|
@ -228,7 +228,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
|
||||
try:
|
||||
resp = None
|
||||
resp = yield from session.get('http://127.0.0.1:{}/v2/compute/network/interfaces'.format(api_port))
|
||||
except (OSError, aiohttp.errors.ClientHttpProcessingError, TimeoutError):
|
||||
except (OSError, aiohttp.errors.ClientHttpProcessingError, TimeoutError, asyncio.TimeoutError):
|
||||
pass
|
||||
|
||||
if resp:
|
||||
|
@ -118,7 +118,10 @@ class VMwareGNS3VM(BaseGNS3VM):
|
||||
if vmware_tools_state not in ("installed", "running"):
|
||||
raise GNS3VMError("VMware tools are not installed in {}".format(self.vmname))
|
||||
|
||||
try:
|
||||
running = yield from self._is_running()
|
||||
except VMwareError as e:
|
||||
raise GNS3VMError("Could not list VMware VMs: {}".format(str(e)))
|
||||
if not running:
|
||||
log.info("Update GNS3 VM settings")
|
||||
# set the number of vCPUs and amount of RAM
|
||||
|
@ -114,7 +114,7 @@ def import_project(controller, project_id, stream, location=None, name=None, kee
|
||||
# unless it's a linux host without GNS3 VM
|
||||
if not sys.platform.startswith("linux") or controller.has_compute("vm"):
|
||||
for node in topology["topology"]["nodes"]:
|
||||
if node["node_type"] in ("docker", "qemu", "iou"):
|
||||
if node["node_type"] in ("docker", "qemu", "iou", "nat"):
|
||||
node["compute_id"] = "vm"
|
||||
else:
|
||||
for node in topology["topology"]["nodes"]:
|
||||
|
@ -70,6 +70,10 @@ class Port:
|
||||
return self._name
|
||||
return self.short_name_type + "{}/{}".format(self._interface_number, self._port_number)
|
||||
|
||||
@short_name.setter
|
||||
def short_name(self, val):
|
||||
self._short_name = val
|
||||
|
||||
def __json__(self):
|
||||
return {
|
||||
"name": self._name,
|
||||
|
@ -181,7 +181,9 @@ class DynamipsPortFactory:
|
||||
ports = []
|
||||
|
||||
adapter_number = 0
|
||||
wic_port_number = 16
|
||||
wic_slot = 1
|
||||
wic_port_number = wic_slot * 16
|
||||
display_wic_port_number = 0
|
||||
for name in sorted(properties.keys()):
|
||||
if name.startswith("slot") and properties[name]:
|
||||
port_class = cls.ADAPTER_MATRIX[properties[name]]["port"]
|
||||
@ -194,7 +196,13 @@ class DynamipsPortFactory:
|
||||
port_class = cls.WIC_MATRIX[properties[name]]["port"]
|
||||
if port_class:
|
||||
for port_number in range(0, cls.WIC_MATRIX[properties[name]]["nb_ports"]):
|
||||
name = "{}{}/{}".format(port_class.long_name_type(), 0, wic_port_number)
|
||||
ports.append(port_class(name, 0, 0, wic_port_number))
|
||||
name = "{}{}/{}".format(port_class.long_name_type(), 0, display_wic_port_number)
|
||||
port = port_class(name, 0, 0, wic_port_number)
|
||||
port.short_name = "{}{}/{}".format(port.short_name_type, 0, display_wic_port_number)
|
||||
ports.append(port)
|
||||
display_wic_port_number += 1
|
||||
wic_port_number += 1
|
||||
wic_slot += 1
|
||||
wic_port_number = wic_slot * 16
|
||||
|
||||
return ports
|
||||
|
@ -55,6 +55,8 @@ class Symbols:
|
||||
for file in os.listdir(directory):
|
||||
if file.startswith('.'):
|
||||
continue
|
||||
if not os.path.isfile(os.path.join(directory, file)):
|
||||
continue
|
||||
symbol_id = file
|
||||
symbols.append({
|
||||
'symbol_id': symbol_id,
|
||||
|
@ -125,7 +125,10 @@ def load_topology(path):
|
||||
if "revision" not in topo or topo["revision"] < GNS3_FILE_FORMAT_REVISION:
|
||||
# If it's an old GNS3 file we need to convert it
|
||||
# first we backup the file
|
||||
try:
|
||||
shutil.copy(path, path + ".backup{}".format(topo.get("revision", 0)))
|
||||
except (OSError) as e:
|
||||
raise aiohttp.web.HTTPConflict(text="Can't write backup of the topology {}: {}".format(path, str(e)))
|
||||
changed = True
|
||||
|
||||
if "revision" not in topo or topo["revision"] < 5:
|
||||
@ -150,8 +153,11 @@ def load_topology(path):
|
||||
raise e
|
||||
|
||||
if changed:
|
||||
try:
|
||||
with open(path, "w+", encoding="utf-8") as f:
|
||||
json.dump(topo, f, indent=4, sort_keys=True)
|
||||
except (OSError) as e:
|
||||
raise aiohttp.web.HTTPConflict(text="Can't write the topology {}: {}".format(path, str(e)))
|
||||
return topo
|
||||
|
||||
|
||||
@ -332,7 +338,7 @@ def _convert_1_3_later(topo, topo_path):
|
||||
node["console_type"] = None
|
||||
node["symbol"] = ":/symbols/hub.svg"
|
||||
node["properties"]["ports_mapping"] = []
|
||||
for port in old_node["ports"]:
|
||||
for port in old_node.get("ports", []):
|
||||
node["properties"]["ports_mapping"].append({
|
||||
"name": "Ethernet{}".format(port["port_number"] - 1),
|
||||
"port_number": port["port_number"] - 1
|
||||
@ -353,13 +359,15 @@ def _convert_1_3_later(topo, topo_path):
|
||||
node["node_type"] = "frame_relay_switch"
|
||||
node["symbol"] = ":/symbols/frame_relay_switch.svg"
|
||||
node["console_type"] = None
|
||||
elif old_node["type"] in ["C1700", "C2600", "C2691", "C3600", "C3725", "C3745", "C7200", "EtherSwitchRouter"]:
|
||||
elif old_node["type"] in ["C1700", "C2600", "C2691", "C3600", "C3620", "C3640", "C3660", "C3725", "C3745", "C7200", "EtherSwitchRouter"]:
|
||||
if node["symbol"] is None:
|
||||
node["symbol"] = ":/symbols/router.svg"
|
||||
node["node_type"] = "dynamips"
|
||||
node["properties"]["dynamips_id"] = old_node.get("dynamips_id")
|
||||
if "platform" not in node["properties"] and old_node["type"].startswith("C"):
|
||||
node["properties"]["platform"] = old_node["type"].lower()
|
||||
if node["properties"]["platform"].startswith("c36"):
|
||||
node["properties"]["platform"] = "c3600"
|
||||
if "ram" not in node["properties"] and old_node["type"].startswith("C"):
|
||||
node["properties"]["ram"] = PLATFORMS_DEFAULT_RAM[old_node["type"].lower()]
|
||||
elif old_node["type"] == "VMwareVM":
|
||||
|
@ -54,7 +54,7 @@ class CrashReport:
|
||||
Report crash to a third party service
|
||||
"""
|
||||
|
||||
DSN = "sync+https://19cca90b55874be5862caf9b507fbd7b:1c0897efd092467a874e89b2e4803b29@sentry.io/38482"
|
||||
DSN = "sync+https://fd4397dee2e145da9227af29df24ded1:61a0e2c9b9f64204bb8ef7ac17b98e3e@sentry.io/38482"
|
||||
if hasattr(sys, "frozen"):
|
||||
cacert = get_resource("cacert.pem")
|
||||
if cacert is not None and os.path.isfile(cacert):
|
||||
|
@ -112,7 +112,10 @@ class ProjectHandler:
|
||||
if ProjectHandler._notifications_listening.setdefault(project.id, 0) <= 1:
|
||||
yield from project.close()
|
||||
pm.remove_project(project.id)
|
||||
try:
|
||||
del ProjectHandler._notifications_listening[project.id]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
log.warning("Skip project closing, another client is listening for project notifications")
|
||||
response.set_status(204)
|
||||
|
@ -98,6 +98,16 @@ class WinStreamHandler(logging.StreamHandler):
|
||||
self.handleError(record)
|
||||
|
||||
|
||||
class LogFilter:
|
||||
"""
|
||||
This filter some noise from the logs
|
||||
"""
|
||||
def filter(record):
|
||||
if record.name == "aiohttp.access" and "/settings" in record.msg and "200" in record.msg:
|
||||
return 0
|
||||
return 1
|
||||
|
||||
|
||||
def init_logger(level, logfile=None, quiet=False):
|
||||
if logfile and len(logfile) > 0:
|
||||
stream_handler = logging.FileHandler(logfile)
|
||||
@ -111,5 +121,7 @@ def init_logger(level, logfile=None, quiet=False):
|
||||
if quiet:
|
||||
stream_handler.addFilter(logging.Filter(name="user_facing"))
|
||||
logging.getLogger('user_facing').propagate = False
|
||||
if level > logging.DEBUG:
|
||||
stream_handler.addFilter(LogFilter)
|
||||
logging.basicConfig(level=level, handlers=[stream_handler])
|
||||
return logging.getLogger('user_facing')
|
||||
|
@ -1,5 +1,6 @@
|
||||
description "GNS3 server"
|
||||
author "GNS3 Team"
|
||||
env LANG=en_US.UTF-8
|
||||
|
||||
start on filesystem or runlevel [2345]
|
||||
stop on shutdown
|
||||
|
@ -6,3 +6,4 @@ Jinja2>=2.7.3
|
||||
raven>=5.23.0
|
||||
psutil>=3.0.0
|
||||
zipstream>=1.1.4
|
||||
typing>=3.5.3.0 # Otherwise yarl fail with python 3.4
|
||||
|
3
setup.py
3
setup.py
@ -40,9 +40,6 @@ class PyTest(TestCommand):
|
||||
|
||||
dependencies = open("requirements.txt", "r").read().splitlines()
|
||||
|
||||
if sys.version_info <= (3, 4):
|
||||
dependencies.append('typing>=3.5.3.0 # Otherwise yarl fail with python 3.4')
|
||||
|
||||
setup(
|
||||
name="gns3-server",
|
||||
version=__import__("gns3server").__version__,
|
||||
|
@ -879,7 +879,7 @@ def test_start_aux(vm, loop):
|
||||
|
||||
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=MagicMock()) as mock_exec:
|
||||
loop.run_until_complete(asyncio.async(vm._start_aux()))
|
||||
mock_exec.assert_called_with('docker', 'exec', '-i', 'e90e34656842', '/gns3/bin/busybox', 'script', '-qfc', 'while true; do /gns3/bin/busybox sh; done', '/dev/null', stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
|
||||
mock_exec.assert_called_with('docker', 'exec', '-i', 'e90e34656842', '/gns3/bin/busybox', 'script', '-qfc', 'while true; do TERM=vt100 /gns3/bin/busybox sh; done', '/dev/null', stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
|
||||
|
||||
|
||||
def test_create_network_interfaces(vm):
|
||||
|
@ -360,8 +360,10 @@ def test_images(compute, async_run, images_dir):
|
||||
images = async_run(compute.images("qemu"))
|
||||
mock.assert_called_with("GET", "https://example.com:84/v2/compute/qemu/images", auth=None, data=None, headers={'content-type': 'application/json'}, chunked=False, timeout=None)
|
||||
|
||||
assert images == [{"filename": "linux.qcow2", "path": "linux.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0},
|
||||
{"filename": "asa.qcow2", "path": "asa.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0}]
|
||||
assert images == [
|
||||
{"filename": "asa.qcow2", "path": "asa.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0},
|
||||
{"filename": "linux.qcow2", "path": "linux.qcow2", "md5sum": "d41d8cd98f00b204e9800998ecf8427e", "filesize": 0}
|
||||
]
|
||||
|
||||
|
||||
def test_list_files(project, async_run, compute):
|
||||
|
@ -252,6 +252,50 @@ def test_import_iou_linux_with_vm(linux_platform, async_run, tmpdir, controller)
|
||||
assert topo["topology"]["nodes"][0]["compute_id"] == "vm"
|
||||
|
||||
|
||||
def test_import_nat_non_linux(windows_platform, async_run, tmpdir, controller):
|
||||
"""
|
||||
On non linux host NAT should be moved to the GNS3 VM
|
||||
"""
|
||||
project_id = str(uuid.uuid4())
|
||||
controller._computes["vm"] = AsyncioMagicMock()
|
||||
|
||||
topology = {
|
||||
"project_id": str(uuid.uuid4()),
|
||||
"name": "test",
|
||||
"type": "topology",
|
||||
"topology": {
|
||||
"nodes": [
|
||||
{
|
||||
"compute_id": "local",
|
||||
"node_id": "0fd3dd4d-dc93-4a04-a9b9-7396a9e22e8b",
|
||||
"node_type": "nat",
|
||||
"name": "test",
|
||||
"properties": {}
|
||||
}
|
||||
],
|
||||
"links": [],
|
||||
"computes": [],
|
||||
"drawings": []
|
||||
},
|
||||
"revision": 5,
|
||||
"version": "2.0.0"
|
||||
}
|
||||
|
||||
with open(str(tmpdir / "project.gns3"), 'w+') as f:
|
||||
json.dump(topology, f)
|
||||
|
||||
zip_path = str(tmpdir / "project.zip")
|
||||
with zipfile.ZipFile(zip_path, 'w') as myzip:
|
||||
myzip.write(str(tmpdir / "project.gns3"), "project.gns3")
|
||||
|
||||
with open(zip_path, "rb") as f:
|
||||
project = async_run(import_project(controller, project_id, f))
|
||||
|
||||
with open(os.path.join(project.path, "test.gns3")) as f:
|
||||
topo = json.load(f)
|
||||
assert topo["topology"]["nodes"][0]["compute_id"] == "vm"
|
||||
|
||||
|
||||
def test_import_iou_non_linux(windows_platform, async_run, tmpdir, controller):
|
||||
"""
|
||||
On non linux host IOU should be moved to the GNS3 VM
|
||||
|
@ -548,8 +548,8 @@ def test_list_ports_dynamips(project, compute):
|
||||
"link_type": "ethernet"
|
||||
},
|
||||
{
|
||||
"name": "Serial0/16",
|
||||
"short_name": "s0/16",
|
||||
"name": "Serial0/0",
|
||||
"short_name": "s0/0",
|
||||
"data_link_types": {
|
||||
"Cisco HDLC": "DLT_C_HDLC",
|
||||
"Cisco PPP": "DLT_PPP_SERIAL",
|
||||
@ -559,8 +559,8 @@ def test_list_ports_dynamips(project, compute):
|
||||
"link_type": "serial"
|
||||
},
|
||||
{
|
||||
"name": "Serial0/17",
|
||||
"short_name": "s0/17",
|
||||
"name": "Serial0/1",
|
||||
"short_name": "s0/1",
|
||||
"data_link_types": {
|
||||
"Cisco HDLC": "DLT_C_HDLC",
|
||||
"Cisco PPP": "DLT_PPP_SERIAL",
|
||||
@ -570,24 +570,24 @@ def test_list_ports_dynamips(project, compute):
|
||||
"link_type": "serial"
|
||||
},
|
||||
{
|
||||
"name": "Serial0/18",
|
||||
"short_name": "s0/18",
|
||||
"name": "Serial0/2",
|
||||
"short_name": "s0/2",
|
||||
"data_link_types": {
|
||||
"Cisco HDLC": "DLT_C_HDLC",
|
||||
"Cisco PPP": "DLT_PPP_SERIAL",
|
||||
"Frame Relay": "DLT_FRELAY"},
|
||||
"port_number": 18,
|
||||
"port_number": 32,
|
||||
"adapter_number": 0,
|
||||
"link_type": "serial"
|
||||
},
|
||||
{
|
||||
"name": "Serial0/19",
|
||||
"short_name": "s0/19",
|
||||
"name": "Serial0/3",
|
||||
"short_name": "s0/3",
|
||||
"data_link_types": {
|
||||
"Cisco HDLC": "DLT_C_HDLC",
|
||||
"Cisco PPP": "DLT_PPP_SERIAL",
|
||||
"Frame Relay": "DLT_FRELAY"},
|
||||
"port_number": 19,
|
||||
"port_number": 33,
|
||||
"adapter_number": 0,
|
||||
"link_type": "serial"
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user