Merge tag 'v2.1.3'

pull/1302/head
ziajka 6 years ago
commit daf058a306

@ -1,5 +1,25 @@
# Change Log
## 2.1.3 19/01/2018
* Update appliance files.
* Suspend for Docker nodes.
* Unlock yarl version and multidict
* Fix same MAC address for duplicated Qemu nodes.
* Fix same base MAC for duplicated IOS routers. Fixes #1264.
* Fix "Creating multiple IOU nodes at once assigns the same application id". Fixes #1239.
* Fix "Transport selection via DSN is deprecated" message. Sync is configured with HTTPTransport.
* Refresh CPU/RAM info every 1 second. Ref #2262.
* Rename ethernet switch arp command to mac
* Fix error while getting appliance list. Fixes #1258.
* Fix UnboundLocalError: local variable 'node' referenced before assignment. Fixes #1256.
* Default symbol must be computer.svg
* Compatibility for old node templates (those with default_symbol and hover_symbol properties).
* Fix problem when searching for VBoxManage. Fixes #1261.
* Improve the search for VBoxManage.
* Fixing race condition when starting the GNS3 VM.
* Default VPCS name format is now PC-{0}.
## 2.1.2 08/01/2018
* Do not show log message if configuration file doesn't exist. Fixes #1206.

@ -25,6 +25,13 @@
"kvm": "require"
},
"images": [
{
"filename": "VirtualTrafficManager-174.qcow2",
"version": "17.4",
"md5sum": "3c44f385e5faf310ca8e3d46bf4e0564",
"filesize": 2036465664,
"download_url": "http://www1.brocade.com/forms/jsp/steelapp-traffic-manager-developer/index.jsp?src=WS&lsd=BRCD&lst=English&cn=PA-GDG-16Q1-EVAL-TrafficManagerDeveloper&intcmp=lp_en_vTMdeveloper_eval_bn_00001"
},
{
"filename": "VirtualTrafficManager-173.qcow2",
"version": "17.3",
@ -76,6 +83,12 @@
}
],
"versions": [
{
"name": "17.4",
"images": {
"hda_disk_image": "VirtualTrafficManager-174.qcow2"
}
},
{
"name": "17.3",
"images": {

@ -25,6 +25,13 @@
"kvm": "require"
},
"images": [
{
"filename": "nxosv-final.7.0.3.I7.2.qcow2",
"version": "7.0.3.I7.2",
"md5sum": "17295efb13e83b24a439148449bfd5ab",
"filesize": 906231808,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "nxosv-final.7.0.3.I7.1.qcow2",
"version": "7.0.3.I7.1",
@ -64,6 +71,13 @@
}
],
"versions": [
{
"name": "7.0.3.I7.2",
"images": {
"bios_image": "OVMF-20160813.fd",
"hda_disk_image": "nxosv-final.7.0.3.I7.2.qcow2"
}
},
{
"name": "7.0.3.I7.1",
"images": {

@ -21,6 +21,15 @@
"kvm": "allow"
},
"images": [
{
"filename": "coreos_production_qemu_image.1576.4.0.img",
"version": "1576.4.0",
"md5sum": "7d3c647807afe1f18fd0c76730e612b4",
"filesize": 849739776,
"download_url": "http://stable.release.core-os.net/amd64-usr/1576.4.0/",
"direct_download_url": "http://stable.release.core-os.net/amd64-usr/1576.4.0/coreos_production_qemu_image.img.bz2",
"compression": "bzip2"
},
{
"filename": "coreos_production_qemu_image.1520.8.0.img",
"version": "1520.8.0",
@ -140,6 +149,12 @@
}
],
"versions": [
{
"name": "1576.4.0",
"images": {
"hda_disk_image": "coreos_production_qemu_image.1576.4.0.img"
}
},
{
"name": "1520.8.0",
"images": {

@ -23,6 +23,14 @@
"kvm": "require"
},
"images": [
{
"filename": "cumulus-linux-3.5.0-vx-amd64.qcow2",
"version": "3.5.0",
"md5sum": "9ad1f352d0603becf4bcc749b77c99dd",
"filesize": 1044250624,
"download_url": "https://cumulusnetworks.com/cumulus-vx/download/",
"direct_download_url": "http://cumulusfiles.s3.amazonaws.com/cumulus-linux-3.5.0-vx-amd64.qcow2"
},
{
"filename": "cumulus-linux-3.4.3-vx-amd64.qcow2",
"version": "3.4.3",
@ -125,6 +133,12 @@
}
],
"versions": [
{
"name": "3.5.0",
"images": {
"hda_disk_image": "cumulus-linux-3.5.0-vx-amd64.qcow2"
}
},
{
"name": "3.4.3",
"images": {

@ -26,6 +26,14 @@
"options": "-smp 2 -cpu core2duo"
},
"images": [
{
"filename": "exosvm-22.4.1.4.iso",
"version": "22.4.1.4",
"md5sum": "2134a511084519a5f8ad00a6f7cd71a9",
"filesize": 49993728,
"download_url": "https://github.com/extremenetworks/Virtual_EXOS",
"direct_download_url": "https://github.com/extremenetworks/Virtual_EXOS/raw/master/vm-22.4.1.4.iso"
},
{
"filename": "exosvm-22.2.1.5.iso",
"version": "22.2.1.5",
@ -92,6 +100,13 @@
}
],
"versions": [
{
"name": "22.4.1.4",
"images": {
"hda_disk_image": "empty8G.qcow2",
"cdrom_image": "exosvm-22.4.1.4.iso"
}
},
{
"name": "22.2.1.5",
"images": {

@ -27,6 +27,13 @@
"options": "-smp 2 -cpu host"
},
"images": [
{
"filename": "BIGIP-13.1.0.1.0.0.8.qcow2",
"version": "13.1.0 HF1",
"md5sum": "70f92192e66a82cb8f47bdae0cb267d8",
"filesize": 4352966656,
"download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-ip/big-ip_v13.x/13.1.0/english/13.1.0.1_virtual-edition/&sw=BIG-IP&pro=big-ip_v13.x&ver=13.1.0&container=13.1.0.1_Virtual-Edition&file=BIGIP-13.1.0.1.0.0.8.ALL.qcow2.zip"
},
{
"filename": "BIGIP-13.0.0.2.0.1671.qcow2",
"version": "13.0.0 HF2",
@ -107,6 +114,13 @@
}
],
"versions": [
{
"name": "13.1.0 HF1",
"images": {
"hda_disk_image": "BIGIP-13.1.0.1.0.0.8.qcow2",
"hdb_disk_image": "empty100G.qcow2"
}
},
{
"name": "13.0.0 HF2",
"images": {

@ -29,6 +29,13 @@
"options": "-smp 2 -cpu host"
},
"images": [
{
"filename": "BIG-IQ-5.4.0.0.0.7437.qcow2",
"version": "5.4.0",
"md5sum": "068b1f4d21048b9b2a082c0c27ef4d53",
"filesize": 3300917248,
"download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-iq/big-iq_cm/5.4.0/english/v5.4.0/&sw=BIG-IQ&pro=big-iq_CM&ver=5.4.0&container=v5.4.0&file=BIG-IQ-5.4.0.0.0.7437.qcow2.zip"
},
{
"filename": "BIG-IQ-5.3.0.0.0.1119.qcow2",
"version": "5.3.0",
@ -74,6 +81,13 @@
}
],
"versions": [
{
"name": "5.4.0",
"images": {
"hda_disk_image": "BIG-IQ-5.4.0.0.0.7437.qcow2",
"hdb_disk_image": "empty100G.qcow2"
}
},
{
"name": "5.3.0",
"images": {

@ -34,6 +34,13 @@
"filesize": 30998528,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FAD_KVM-V400-build0983-FORTINET.out.kvm-boot.qcow2",
"version": "4.8.3",
"md5sum": "d4cfc3b215780b2fb4c9d8f55208e8be",
"filesize": 72876032,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FAD_KVM-V400-build0977-FORTINET.out.kvm-boot.qcow2",
"version": "4.8.2",
@ -141,6 +148,13 @@
}
],
"versions": [
{
"name": "4.8.3",
"images": {
"hda_disk_image": "FAD_KVM-V400-build0983-FORTINET.out.kvm-boot.qcow2",
"hdb_disk_image": "FAD_KVM-v400-FORTINET.out.kvm-data.qcow2"
}
},
{
"name": "4.8.2",
"images": {

@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FAZ_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
"version": "5.6.1",
"md5sum": "1bd94c920f8747de671832ef92e8dfbc",
"filesize": 105705472,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FAZ_VM64_KVM-v5-build1557-FORTINET.out.kvm.qcow2",
"version": "5.6.0",
@ -106,6 +113,13 @@
}
],
"versions": [
{
"name": "5.6.1",
"images": {
"hda_disk_image": "FAZ_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.6.0",
"images": {

@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FAC_VM_KVM-v5-build0155-FORTINET.out.kvm.qcow2",
"version": "5.2.0",
"md5sum": "69b55ce7c8094ccd736bbfe8a3262b31",
"filesize": 71782400,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FAC_VM_KVM-v500-build0091-FORTINET.out.kvm.qcow2",
"version": "5.1.2",
@ -63,6 +70,13 @@
}
],
"versions": [
{
"name": "5.2.0",
"images": {
"hda_disk_image": "FAC_VM_KVM-v5-build0155-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "FAC_VM_KVM-v500-DATADRIVE.qcow2"
}
},
{
"name": "5.1.2",
"images": {

@ -26,6 +26,13 @@
"kvm": "require"
},
"images": [
{
"filename": "FCHKVM-v400-build0216-FORTINET.out.kvm.qcow2",
"version": "4.2.6",
"md5sum": "867e0569b8466db744547422a1d6f17a",
"filesize": 27553792,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FCHKVM-v400-build0213-FORTINET.out.kvm.qcow2",
"version": "4.2.5",
@ -92,6 +99,13 @@
}
],
"versions": [
{
"name": "4.2.6",
"images": {
"hda_disk_image": "FCHKVM-v400-build0216-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty100G.qcow2"
}
},
{
"name": "4.2.5",
"images": {

@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FGT_VM64_KVM-v5-build1547-FORTINET.out.kvm.qcow2",
"version": "5.6.3",
"md5sum": "a908f8620e8bbccce8794733f3637e13",
"filesize": 40939520,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v5-build1486-FORTINET.out.kvm.qcow2",
"version": "5.6.2",
@ -47,6 +54,13 @@
"filesize": 38760448,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v5-build6446-FORTINET.out.kvm.qcow2",
"version": "5.4.7",
"md5sum": "17d3dfebd4b222569cf10cfab83e0e56",
"filesize": 38715392,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v5-build1165-FORTINET.out.kvm.qcow2",
"version": "5.4.6",
@ -96,6 +110,20 @@
"filesize": 35373056,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v5-build0762-FORTINET.out.kvm.qcow2",
"version": "5.2.13",
"md5sum": "78df232e516a863f233de88ffba5bc4b",
"filesize": 38776832,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v5-build0760-FORTINET.out.kvm.qcow2",
"version": "5.2.12",
"md5sum": "2efa0c110abed83b71927145d1e87805",
"filesize": 38363136,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v5-build0754-FORTINET.out.kvm.qcow2",
"version": "5.2.11",
@ -148,6 +176,13 @@
}
],
"versions": [
{
"name": "5.6.3",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v5-build1547-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.6.2",
"images": {
@ -169,6 +204,13 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.4.7",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v5-build6446-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.4.6",
"images": {
@ -218,6 +260,20 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.2.13",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v5-build0762-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.2.12",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v5-build0760-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.2.11",
"images": {

@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FML_VMKV-64-v54-build0712-FORTINET.out.kvm.qcow2",
"version": "5.4.3",
"md5sum": "977effe7b885ca5cedec7740a2a637aa",
"filesize": 93454336,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FML_VMKV-64-v54-build0707-FORTINET.out.kvm.qcow2",
"version": "5.4.2",
@ -127,6 +134,13 @@
}
],
"versions": [
{
"name": "5.4.3",
"images": {
"hda_disk_image": "FML_VMKV-64-v54-build0712-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.4.2",
"images": {

@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FMG_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
"version": "5.6.1",
"md5sum": "8cc553842564d232af295d6a0c784c1f",
"filesize": 106831872,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FMG_VM64_KVM-v5-build1557-FORTINET.out.kvm.qcow2",
"version": "5.6.0",
@ -106,6 +113,13 @@
}
],
"versions": [
{
"name": "5.6.1",
"images": {
"hda_disk_image": "FMG_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "5.6.0",
"images": {

@ -15,7 +15,7 @@
"qemu": {
"adapter_type": "e1000",
"adapters": 1,
"ram": 8096,
"ram": 8192,
"hda_disk_interface": "ide",
"hdb_disk_interface": "ide",
"arch": "x86_64",
@ -24,6 +24,14 @@
"kvm": "require"
},
"images": [
{
"filename": "FreeNAS-11.1-RELEASE.iso",
"version": "11.1",
"md5sum": "67bea5816bc889169e5e3054362b2053",
"filesize": 626761728,
"download_url": "http://www.freenas.org/download/",
"direct_download_url": "http://download.freenas.org/11/11.1-RELEASE/x64/FreeNAS-11.1-RELEASE.iso"
},
{
"filename": "FreeNAS-11.0-U4.iso",
"version": "11.0-U4",
@ -50,6 +58,14 @@
}
],
"versions": [
{
"name": "11.1",
"images": {
"hda_disk_image": "empty30G.qcow2",
"hdb_disk_image": "empty30G.qcow2",
"cdrom_image": "FreeNAS-11.1-RELEASE.iso"
}
},
{
"name": "11.0",
"images": {

@ -23,6 +23,13 @@
"options": "-smp 2"
},
"images": [
{
"filename": "media-vsrx-vmdisk-17.4R1.16.qcow2",
"version": "17.4R1",
"md5sum": "616c4742b09652318c73a7cc598468e7",
"filesize": 3965386752,
"download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
},
{
"filename": "media-vsrx-vmdisk-17.3R1.10.qcow2",
"version": "17.3R1",
@ -30,6 +37,13 @@
"filesize": 3782541312,
"download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
},
{
"filename": "media-vsrx-vmdisk-15.1X49-D120.3.qcow2",
"version": "15.1X49-D120",
"md5sum": "02cf4df3dc988a407ccd5ddc30ee5385",
"filesize": 3280273408,
"download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
},
{
"filename": "media-vsrx-vmdisk-15.1X49-D110.4.qcow2",
"version": "15.1X49-D110",
@ -109,12 +123,24 @@
}
],
"versions": [
{
"name": "17.4R1",
"images": {
"hda_disk_image": "media-vsrx-vmdisk-17.4R1.16.qcow2"
}
},
{
"name": "17.3R1",
"images": {
"hda_disk_image": "media-vsrx-vmdisk-17.3R1.10.qcow2"
}
},
{
"name": "15.1X49-D120",
"images": {
"hda_disk_image": "media-vsrx-vmdisk-15.1X49-D120.3.qcow2"
}
},
{
"name": "15.1X49-D110",
"images": {

@ -20,6 +20,14 @@
"kvm": "require"
},
"images": [
{
"filename": "kali-linux-2017.3-amd64.iso",
"version": "2017.3",
"md5sum": "b465580c897e94675ac1daf031fa66b9",
"filesize": 2886402048,
"download_url": "http://cdimage.kali.org/kali-2017.3/",
"direct_download_url": "http://cdimage.kali.org/kali-2017.3/kali-linux-2017.3-amd64.iso"
},
{
"filename": "kali-linux-2017.2-amd64.iso",
"version": "2017.2",
@ -62,6 +70,12 @@
}
],
"versions": [
{
"name": "2017.3",
"images": {
"cdrom_image": "kali-linux-2017.3-amd64.iso"
}
},
{
"name": "2017.2",
"images": {

@ -21,6 +21,22 @@
"kvm": "allow"
},
"images": [
{
"filename": "lede-17.01.4-x86-generic-combined-squashfs.img",
"version": "17.01.4",
"md5sum": "ae5d8d3fcab109565fe337d28e51c4b4",
"filesize": 19779546,
"download_url": "https://downloads.lede-project.org/releases/17.01.4/targets/x86/generic/",
"direct_download_url": "https://downloads.lede-project.org/releases/17.01.4/targets/x86/generic/lede-17.01.4-x86-generic-combined-squashfs.img"
},
{
"filename": "lede-17.01.3-x86-generic-combined-squashfs.img",
"version": "17.01.3",
"md5sum": "d315fc638160a9aec0966d58828bfccf",
"filesize": 19775618,
"download_url": "https://downloads.lede-project.org/releases/17.01.3/targets/x86/generic/",
"direct_download_url": "https://downloads.lede-project.org/releases/17.01.3/targets/x86/generic/lede-17.01.3-x86-generic-combined-squashfs.img"
},
{
"filename": "lede-17.01.2-x86-generic-combined-squashfs.img",
"version": "17.01.2",
@ -47,6 +63,18 @@
}
],
"versions": [
{
"name": "lede 17.01.4",
"images": {
"hda_disk_image": "lede-17.01.4-x86-generic-combined-squashfs.img"
}
},
{
"name": "lede 17.01.3",
"images": {
"hda_disk_image": "lede-17.01.3-x86-generic-combined-squashfs.img"
}
},
{
"name": "lede 17.01.2",
"images": {

@ -25,12 +25,12 @@
},
"images": [
{
"filename": "ostinato-0.8-97c7d79.qcow2",
"version": "0.8-97c7d79",
"md5sum": "5aad15c1eb7baac588a4c8c3faafa380",
"filesize": 98631680,
"filename": "ostinato-0.9-1.qcow2",
"version": "0.9",
"md5sum": "00b4856ec9fffbcbcab7a8f757355d69",
"filesize": 101646336,
"download_url": "http://www.bernhard-ehlers.de/projects/ostinato4gns3/index.html",
"direct_download_url": "http://www.bernhard-ehlers.de/projects/ostinato4gns3/ostinato-0.8-97c7d79.qcow2"
"direct_download_url": "http://www.bernhard-ehlers.de/projects/ostinato4gns3/ostinato-0.9-1.qcow2"
},
{
"filename": "ostinato-0.8-1.qcow2",
@ -43,9 +43,9 @@
],
"versions": [
{
"name": "0.8-97c7d79",
"name": "0.9",
"images": {
"hda_disk_image": "ostinato-0.8-97c7d79.qcow2"
"hda_disk_image": "ostinato-0.9-1.qcow2"
}
},
{

@ -24,6 +24,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "asg-9.506-2.1.iso",
"version": "9.506-2.1",
"md5sum": "6b4374f8c5ee66ccdf9683f7349f59cb",
"filesize": 1006057472,
"download_url": "https://www.sophos.com/en-us/support/utm-downloads.aspx"
},
{
"filename": "asg-9.500-9.1.iso",
"version": "9.500-9.1",
@ -31,6 +38,13 @@
"filesize": 981612544,
"download_url": "https://www.sophos.com/en-us/support/utm-downloads.aspx"
},
{
"filename": "asg-9.415-1.1.iso",
"version": "9.415-1.1",
"md5sum": "505004bf5a5d5f2234b2056ec7b553d8",
"filesize": 961087488,
"download_url": "https://www.sophos.com/en-us/support/utm-downloads.aspx"
},
{
"filename": "asg-9.413-4.1.iso",
"version": "9.413-4.1",
@ -125,6 +139,13 @@
}
],
"versions": [
{
"name": "9.506-2.1",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "asg-9.506-2.1.iso"
}
},
{
"name": "9.500-9.1",
"images": {
@ -132,6 +153,13 @@
"cdrom_image": "asg-9.500-9.1.iso"
}
},
{
"name": "9.415-1.1",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "asg-9.415-1.1.iso"
}
},
{
"name": "9.413-4.1",
"images": {

@ -23,6 +23,20 @@
"kvm": "require"
},
"images": [
{
"filename": "VI-SFOS_17.0.2_MR-2.KVM-116-PRIMARY.qcow2",
"version": "17.0.2 MR2",
"md5sum": "2555fa6dcdcecad02c9f02dcb1c0c5e5",
"filesize": 324599808,
"download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
},
{
"filename": "VI-SFOS_17.0.2_MR-2.KVM-116-AUXILARY.qcow2",
"version": "16.05.1 MR1",
"md5sum": "c3ef795423dbfc01771348b0daa75125",
"filesize": 59441152,
"download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
},
{
"filename": "VI-SFOS_16.05.4_MR-4.KVM-215-PRIMARY.qcow2",
"version": "16.05.4 MR4",
@ -95,6 +109,13 @@
}
],
"versions": [
{
"name": "17.0.2 MR2",
"images": {
"hda_disk_image": "VI-SFOS_17.0.2_MR-2.KVM-116-PRIMARY.qcow2",
"hdb_disk_image": "VI-SFOS_17.0.2_MR-2.KVM-116-AUXILARY.qcow2"
}
},
{
"name": "16.05.4 MR4",
"images": {

@ -31,29 +31,37 @@
"download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/",
"direct_download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/vyos-1.2.0-beta1-amd64.iso"
},
{
"filename": "vyos-1.1.8-amd64.iso",
"version": "1.1.8",
"md5sum": "95a141d4b592b81c803cdf7e9b11d8ea",
"filesize": 241172480,
"download_url": "https://downloads.vyos.io/?dir=release/1.1.8",
"direct_download_url": "https://downloads.vyos.io/release/1.1.8/vyos-1.1.8-amd64.iso"
},
{
"filename": "vyos-1.1.7-amd64.iso",
"version": "1.1.7",
"md5sum": "9a7f745a0b0db0d4f1d9eee2a437fb54",
"filesize": 245366784,
"download_url": "http://mirror.vyos.net/iso/release/1.1.7/",
"direct_download_url": "http://mirror.vyos.net/iso/release/1.1.7/vyos-1.1.7-amd64.iso"
"download_url": "https://downloads.vyos.io/?dir=release/1.1.7/",
"direct_download_url": "https://downloads.vyos.io/release/1.1.7/vyos-1.1.7-amd64.iso"
},
{
"filename": "vyos-1.1.6-amd64.iso",
"version": "1.1.6",
"md5sum": "3128954d026e567402a924c2424ce2bf",
"filesize": 245366784,
"download_url": "http://mirror.vyos.net/iso/release/1.1.6/",
"direct_download_url": "http://mirror.vyos.net/iso/release/1.1.6/vyos-1.1.6-amd64.iso"
"download_url": "hhttps://downloads.vyos.io/?dir=release/1.1.6/",
"direct_download_url": "https://downloads.vyos.io/release/1.1.6/vyos-1.1.6-amd64.iso"
},
{
"filename": "vyos-1.1.5-amd64.iso",
"version": "1.1.5",
"md5sum": "193179532011ceaa87ee725bd8f22022",
"filesize": 247463936,
"download_url": "http://mirror.vyos.net/iso/release/1.1.5/",
"direct_download_url": "http://mirror.vyos.net/iso/release/1.1.5/vyos-1.1.5-amd64.iso"
"download_url": "https://downloads.vyos.io/?dir=release/1.1.5/",
"direct_download_url": "https://downloads.vyos.io/release/1.1.5/vyos-1.1.5-amd64.iso"
},
{
"filename": "empty8G.qcow2",

@ -20,6 +20,24 @@
"kvm": "allow"
},
"images": [
{
"filename": "ZeroShell-3.8.2-X86-USB.img",
"version": "3.8.2",
"md5sum": "bb8c7f24c86eb59e26ce36ff1979ecd4",
"filesize": 1992294400,
"download_url": "http://www.zeroshell.org/download/",
"direct_download_url": "http://www.zeroshell.net/listing/ZeroShell-3.8.2-X86-USB.img.gz",
"compression": "gzip"
},
{
"filename": "ZeroShell-3.8.1-X86-USB.img",
"version": "3.8.1",
"md5sum": "49256e396d160e88fbc3a3889e172482",
"filesize": 1992294400,
"download_url": "http://www.zeroshell.org/download/",
"direct_download_url": "http://www.zeroshell.net/listing/ZeroShell-3.8.1-X86-USB.img.gz",
"compression": "gzip"
},
{
"filename": "ZeroShell-3.8.0-X86-USB.img",
"version": "3.8.0",
@ -40,6 +58,18 @@
}
],
"versions": [
{
"name": "3.8.2",
"images": {
"hda_disk_image": "ZeroShell-3.8.2-X86-USB.img"
}
},
{
"name": "3.8.1",
"images": {
"hda_disk_image": "ZeroShell-3.8.1-X86-USB.img"
}
},
{
"name": "3.8.0",
"images": {

@ -335,11 +335,14 @@ class BaseManager:
:returns: Node instance
"""
node = None
try:
node = yield from self.close_node(node_id)
node = self.get_node(node_id)
yield from self.close_node(node_id)
finally:
node.project.emit("node.deleted", node)
yield from node.project.remove_node(node)
if node:
node.project.emit("node.deleted", node)
yield from node.project.remove_node(node)
if node.id in self._nodes:
del self._nodes[node.id]
return node

@ -17,7 +17,6 @@
import asyncio
from ...error import NodeError
from ...base_node import BaseNode
import logging

@ -17,7 +17,6 @@
import asyncio
from ...error import NodeError
from ...base_node import BaseNode
import logging

@ -16,7 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import asyncio
from .cloud import Cloud
from ...error import NodeError

@ -44,9 +44,9 @@ class EthernetSwitchConsole(EmbedShell):
self._node = node
@asyncio.coroutine
def arp(self):
def mac(self):
"""
Show arp table
Show MAC address table
"""
res = 'Port Mac VLAN\n'
result = (yield from self._node._hypervisor.send('ethsw show_mac_addr_table {}'.format(self._node.name)))

@ -1142,6 +1142,7 @@ class IOUVM(BaseNode):
:returns: integer between 1 and 512
"""
if self._application_id is None:
#FIXME: is this necessary? application ID is allocated by controller and should not be None
return self._manager.get_application_id(self.id)
return self._application_id

@ -57,24 +57,34 @@ class VirtualBox(BaseManager):
# look for VBoxManage
vboxmanage_path = self.config.get_section_config("VirtualBox").get("vboxmanage_path")
if not vboxmanage_path:
if vboxmanage_path:
if not os.path.isabs(vboxmanage_path):
vboxmanage_path = shutil.which(vboxmanage_path)
else:
log.info("A path to VBoxManage has not been configured, trying to find it...")
if sys.platform.startswith("win"):
if "VBOX_INSTALL_PATH" in os.environ:
vboxmanage_path = os.path.join(os.environ["VBOX_INSTALL_PATH"], "VBoxManage.exe")
vboxmanage_path_windows = os.path.join(os.environ["VBOX_INSTALL_PATH"], "VBoxManage.exe")
if os.path.exists(vboxmanage_path_windows):
vboxmanage_path = vboxmanage_path_windows
elif "VBOX_MSI_INSTALL_PATH" in os.environ:
vboxmanage_path = os.path.join(os.environ["VBOX_MSI_INSTALL_PATH"], "VBoxManage.exe")
vboxmanage_path_windows = os.path.join(os.environ["VBOX_MSI_INSTALL_PATH"], "VBoxManage.exe")
if os.path.exists(vboxmanage_path_windows):
vboxmanage_path = vboxmanage_path_windows
elif sys.platform.startswith("darwin"):
vboxmanage_path = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage"
else:
vboxmanage_path = "vboxmanage"
vboxmanage_path_osx = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage"
if os.path.exists(vboxmanage_path_osx):
vboxmanage_path = vboxmanage_path_osx
if not vboxmanage_path:
vboxmanage_path = shutil.which("vboxmanage")
if vboxmanage_path and not os.path.isabs(vboxmanage_path):
vboxmanage_path = shutil.which(vboxmanage_path)
if vboxmanage_path and not os.path.exists(vboxmanage_path):
log.error("VBoxManage path '{}' doesn't exist".format(vboxmanage_path))
if not vboxmanage_path:
raise VirtualBoxError("Could not find VBoxManage if you just install VirtualBox you need to reboot")
raise VirtualBoxError("Could not find VBoxManage, please reboot if VirtualBox has just been installed")
if not os.path.isfile(vboxmanage_path):
raise VirtualBoxError("VBoxManage {} is not accessible".format(vboxmanage_path))
raise VirtualBoxError("VBoxManage '{}' is not accessible".format(vboxmanage_path))
if not os.access(vboxmanage_path, os.X_OK):
raise VirtualBoxError("VBoxManage is not executable")
if os.path.basename(vboxmanage_path) not in ["VBoxManage", "VBoxManage.exe", "vboxmanage"]:

@ -134,20 +134,6 @@ class VPCSVM(BaseNode):
"project_id": self.project.id,
"command_line": self.command_line}
@property
def relative_startup_script(self):
"""
Returns the startup config file relative to the project directory.
:returns: path to config file. None if the file doesn't exist
"""
path = os.path.join(self.working_dir, 'startup.vpc')
if os.path.exists(path):
return 'startup.vpc'
else:
return None
def _vpcs_path(self):
"""
Returns the VPCS executable path.

@ -123,14 +123,19 @@ class Controller:
if prop in ["enable_remote_console", "use_ubridge"]:
del vm[prop]
vm.setdefault("appliance_id", str(uuid.uuid4()))
appliance = Appliance(vm["appliance_id"], vm)
self._appliances[appliance.id] = appliance
try:
appliance = Appliance(vm["appliance_id"], vm)
self._appliances[appliance.id] = appliance
except KeyError as e:
# appliance data is not complete (missing name or type)
log.warning("Could not load appliance template {} ('{}'): {}".format(vm["appliance_id"], vm.get("name", "unknown"), e))
continue
# Add builtins
builtins = []
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "cloud"), {"node_type": "cloud", "name": "Cloud", "category": 2, "symbol": ":/symbols/cloud.svg"}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "nat"), {"node_type": "nat", "name": "NAT", "category": 2, "symbol": ":/symbols/cloud.svg"}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "vpcs"), {"node_type": "vpcs", "name": "VPCS", "category": 2, "symbol": ":/symbols/vpcs_guest.svg", "properties": {"base_script_file": "vpcs_base_config.txt"}}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "vpcs"), {"node_type": "vpcs", "name": "VPCS", "default_name_format": "PC-{0}", "category": 2, "symbol": ":/symbols/vpcs_guest.svg", "properties": {"base_script_file": "vpcs_base_config.txt"}}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "ethernet_switch"), {"node_type": "ethernet_switch", "name": "Ethernet switch", "category": 1, "symbol": ":/symbols/ethernet_switch.svg"}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "ethernet_hub"), {"node_type": "ethernet_hub", "name": "Ethernet hub", "category": 1, "symbol": ":/symbols/hub.svg"}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "frame_relay_switch"), {"node_type": "frame_relay_switch", "name": "Frame Relay switch", "category": 1, "symbol": ":/symbols/frame_relay_switch.svg"}, builtin=True))
@ -405,6 +410,7 @@ class Controller:
:param connect: True connect to the compute immediately
:param kwargs: See the documentation of Compute
"""
if compute_id not in self._computes:
# We disallow to create from the outside the local and VM server

@ -377,13 +377,13 @@ class Compute:
"""
:param dont_connect: If true do not reconnect if not connected
"""
if not self._connected and not dont_connect:
if self._id == "vm" and not self._controller.gns3vm.running:
yield from self._controller.gns3vm.start()
yield from self.connect()
if not self._connected and not dont_connect:
raise ComputeError("Can't connect to {}".format(self._name))
raise ComputeError("Cannot connect to compute '{}' with request {} {}".format(self._name, method, path))
response = yield from self._run_http_query(method, path, data=data, **kwargs)
return response
@ -402,20 +402,20 @@ class Compute:
"""
Check if remote server is accessible
"""
if not self._connected and not self._closed:
try:
log.info("Connecting to compute '{}'".format(self._id))
response = yield from self._run_http_query("GET", "/capabilities")
except ComputeError:
except ComputeError as e:
# Try to reconnect after 2 seconds if server unavailable only if not during tests (otherwise we create a ressources usage bomb)
if not hasattr(sys, "_called_from_test") or not sys._called_from_test:
self._connection_failure += 1
# After 5 failure we close the project using the compute to avoid sync issues
if self._connection_failure == 5:
log.warning("Can't connect to compute %s", self._id)
log.warning("Cannot connect to compute '{}': {}".format(self._id, e))
yield from self._controller.close_compute_projects(self)
asyncio.get_event_loop().call_later(2, lambda: asyncio.async(self._try_reconnect()))
return
except aiohttp.web.HTTPNotFound:
raise aiohttp.web.HTTPConflict(text="The server {} is not a GNS3 server or it's a 1.X server".format(self._id))

@ -27,6 +27,7 @@ from .virtualbox_gns3_vm import VirtualBoxGNS3VM
from .remote_gns3_vm import RemoteGNS3VM
from .gns3_vm_error import GNS3VMError
from ...version import __version__
from ..compute import ComputeError
import logging
log = logging.getLogger(__name__)
@ -281,7 +282,8 @@ class GNS3VM:
compute = yield from self._controller.add_compute(compute_id="vm",
name="GNS3 VM is starting ({})".format(engine.vmname),
host=None,
force=True)
force=True,
connect=False)
try:
yield from engine.start()
@ -290,6 +292,7 @@ class GNS3VM:
log.error("Can't start the GNS3 VM: {}".format(str(e)))
yield from compute.update(name="GNS3 VM ({})".format(engine.vmname))
raise e
yield from compute.connect() # we can connect now that the VM has started
yield from compute.update(name="GNS3 VM ({})".format(engine.vmname),
protocol=self.protocol,
host=self.ip_address,
@ -297,7 +300,9 @@ class GNS3VM:
user=self.user,
password=self.password)
yield from self._check_network(compute)
# check if the VM is in the same subnet as the local server, start 10 seconds later to give
# some time for the compute in the VM to be ready for requests
asyncio.get_event_loop().call_later(10, lambda: asyncio.async(self._check_network(compute)))
@asyncio.coroutine
def _check_network(self, compute):
@ -305,28 +310,32 @@ class GNS3VM:
Check that the VM is in the same subnet as the local server
"""
vm_interfaces = yield from compute.interfaces()
vm_interface_netmask = None
for interface in vm_interfaces:
if interface["ip_address"] == self.ip_address:
vm_interface_netmask = interface["netmask"]
break
if vm_interface_netmask:
vm_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, vm_interface_netmask)).network
for compute_id in self._controller.computes:
if compute_id == "local":
compute = self._controller.get_compute(compute_id)
interfaces = yield from compute.interfaces()
netmask = None
for interface in interfaces:
if interface["ip_address"] == compute.host_ip:
netmask = interface["netmask"]
break
if netmask:
compute_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, netmask)).network
if vm_network.compare_networks(compute_network) != 0:
msg = "The GNS3 VM ({}) is not on the same network as the {} server ({}), please make sure the local server binding is in the same network as the GNS3 VM".format(vm_network, compute_id, compute_network)
self._controller.notification.emit("log.warning", {"message": msg})
try:
vm_interfaces = yield from compute.interfaces()
vm_interface_netmask = None
for interface in vm_interfaces:
if interface["ip_address"] == self.ip_address:
vm_interface_netmask = interface["netmask"]
break
if vm_interface_netmask:
vm_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, vm_interface_netmask)).network
for compute_id in self._controller.computes:
if compute_id == "local":
compute = self._controller.get_compute(compute_id)
interfaces = yield from compute.interfaces()
netmask = None
for interface in interfaces:
if interface["ip_address"] == compute.host_ip:
netmask = interface["netmask"]
break
if netmask:
compute_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, netmask)).network
if vm_network.compare_networks(compute_network) != 0:
msg = "The GNS3 VM ({}) is not on the same network as the {} server ({}), please make sure the local server binding is in the same network as the GNS3 VM".format(
vm_network, compute_id, compute_network)
self._controller.notification.emit("log.warning", {"message": msg})
except ComputeError as e:
log.warning("Could not check the VM is in the same subnet as the local server: {}".format(e))
@locked_coroutine
def _suspend(self):

@ -171,7 +171,7 @@ class VMwareGNS3VM(BaseGNS3VM):
trial -= 1
# If ip not found fallback on old method
if trial == 0:
log.warn("No IP found for the VM via readVariable fallback to getGuestIPAddress")
log.warning("No IP found for the VM via readVariable fallback to getGuestIPAddress")
guest_ip_address = yield from self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120)
break
yield from asyncio.sleep(1)

@ -86,8 +86,7 @@ class Node:
self._first_port_name = None
# This properties will be recompute
ignore_properties = ("width", "height")
ignore_properties = ("width", "height", "hover_symbol")
self.properties = kwargs.pop('properties', {})
# Update node properties with additional elements
@ -104,7 +103,15 @@ class Node:
self.properties[prop] = kwargs[prop]
if self._symbol is None:
self.symbol = ":/symbols/computer.svg"
# compatibility with old node templates
if "default_symbol" in self.properties:
default_symbol = self.properties.pop("default_symbol")
if default_symbol.endswith("normal.svg"):
self.symbol = default_symbol[:-11] + ".svg"
else:
self.symbol = default_symbol
else:
self.symbol = ":/symbols/computer.svg"
def is_always_running(self):
"""

@ -86,6 +86,7 @@ class Project:
self._show_grid = show_grid
self._show_interface_labels = show_interface_labels
self._loading = False
self._add_node_lock = asyncio.Lock()
# Disallow overwrite of existing project
if project_id is None and path is not None:
@ -434,34 +435,38 @@ class Project:
:param dump: Dump topology to disk
:param kwargs: See the documentation of node
"""
if node_id in self._nodes:
return self._nodes[node_id]
if node_type == "iou" and 'application_id' not in kwargs.keys():
kwargs['application_id'] = get_next_application_id(self._nodes.values())
node = Node(self, compute, name, node_id=node_id, node_type=node_type, **kwargs)
if compute not in self._project_created_on_compute:
# For a local server we send the project path
if compute.id == "local":
yield from compute.post("/projects", data={
"name": self._name,
"project_id": self._id,
"path": self._path
})
else:
yield from compute.post("/projects", data={
"name": self._name,
"project_id": self._id,
})
self._project_created_on_compute.add(compute)
yield from node.create()
self._nodes[node.id] = node
self.controller.notification.emit("node.created", node.__json__())
if dump:
self.dump()
with (yield from self._add_node_lock):
# wait for a node to be completely created before adding a new one
# this is important otherwise we allocate the same application ID
# when creating multiple IOU node at the same time
if node_type == "iou" and 'application_id' not in kwargs.keys():
kwargs['application_id'] = get_next_application_id(self._nodes.values())
node = Node(self, compute, name, node_id=node_id, node_type=node_type, **kwargs)
if compute not in self._project_created_on_compute:
# For a local server we send the project path
if compute.id == "local":
yield from compute.post("/projects", data={
"name": self._name,
"project_id": self._id,
"path": self._path
})
else:
yield from compute.post("/projects", data={
"name": self._name,
"project_id": self._id,
})
self._project_created_on_compute.add(compute)
yield from node.create()
self._nodes[node.id] = node
self.controller.notification.emit("node.created", node.__json__())
if dump:
self.dump()
return node
@locked_coroutine
@ -941,10 +946,12 @@ class Project:
raise aiohttp.web.HTTPConflict(text="Cannot duplicate node data while the node is running")
data = copy.deepcopy(node.__json__(topology_dump=True))
# Some properties like internal ID should not be duplicate
# Some properties like internal ID should not be duplicated
for unique_property in (
'node_id',
'name',
'mac_addr',
'mac_address',
'compute_id',
'application_id',
'dynamips_id'):

@ -57,7 +57,7 @@ class CrashReport:
Report crash to a third party service
"""
DSN = "sync+https://abb552c4f16c45c2ab75c84641100d6e:279c28ac32794198be94f0d17ad50a54@sentry.io/38482"
DSN = "sync+https://9bd029d7f92b48178b01868465532d6e:9f4a6a513bd1452fbfd1771ae2ca8b66@sentry.io/38482"
if hasattr(sys, "frozen"):
cacert = get_resource("cacert.pem")
if cacert is not None and os.path.isfile(cacert):

@ -188,7 +188,7 @@ class ATMSwitchHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend an ATM Relay switch")
description="Suspend an ATM Relay switch (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])

@ -168,7 +168,7 @@ class CloudHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend a cloud")
description="Suspend a cloud (does nothing)")
def suspend(request, response):
Builtin.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])

@ -105,6 +105,24 @@ class DockerHandler:
yield from container.stop()
response.set_status(204)
@Route.post(
r"/projects/{project_id}/docker/nodes/{node_id}/suspend",
parameters={
"project_id": "Project UUID",
"node_id": "Node UUID"
},
status_codes={
204: "Instance suspended",
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend a Docker container")
def suspend(request, response):
docker_manager = Docker.instance()
container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
yield from container.pause()
response.set_status(204)
@Route.post(
r"/projects/{project_id}/docker/nodes/{node_id}/reload",
parameters={

@ -191,7 +191,7 @@ class EthernetHubHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend an Ethernet hub")
description="Suspend an Ethernet hub (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])

@ -204,7 +204,7 @@ class EthernetSwitchHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend an Ethernet switch")
description="Suspend an Ethernet switch (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])

@ -188,7 +188,7 @@ class FrameRelaySwitchHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend a Frame Relay switch")
description="Suspend a Frame Relay switch (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])

@ -207,6 +207,24 @@ class IOUHandler:
yield from vm.stop()
response.set_status(204)
@Route.post(
r"/projects/{project_id}/iou/nodes/{node_id}/suspend",
parameters={
"project_id": "Project UUID",
"node_id": "Node UUID"
},
status_codes={
204: "Instance suspended",
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend an IOU instance (does nothing)")
def suspend(request, response):
iou_manager = IOU.instance()
iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
response.set_status(204)
@Route.post(
r"/projects/{project_id}/iou/nodes/{node_id}/reload",
parameters={

@ -166,7 +166,7 @@ class NatHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend a nat")
description="Suspend a nat (does nothing)")
def suspend(request, response):
Builtin.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])

@ -48,7 +48,7 @@ class NotificationHandler:
with notifications.queue() as queue:
while True:
try:
notification = yield from queue.get_json(5)
notification = yield from queue.get_json(1)
except asyncio.futures.CancelledError:
break
if ws.closed:

@ -184,16 +184,15 @@ class VPCSHandler:
"node_id": "Node UUID"
},
status_codes={
204: "Instance stopped",
204: "Instance suspended",
400: "Invalid request",
404: "Instance doesn't exist"
},
description="Suspend a VPCS instance (stop it)")
def stop(request, response):
description="Suspend a VPCS instance (does nothing)")
def suspend(request, response):
vpcs_manager = VPCS.instance()
vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
yield from vm.stop()
vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
response.set_status(204)
@Route.post(

@ -95,7 +95,7 @@ class ServerHandler:
})
def check_version(request, response):
if request.json["version"] != __version__:
raise HTTPConflict(text="Client version {} differs with server version {}".format(request.json["version"], __version__))
raise HTTPConflict(text="Client version {} is not the same as server version {}".format(request.json["version"], __version__))
response.json({"version": __version__})
@Route.get(

@ -16,7 +16,6 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import psutil
import json
import psutil
@ -33,10 +32,10 @@ class NotificationQueue(asyncio.Queue):
@asyncio.coroutine
def get(self, timeout):
"""
When timeout is expire we send a ping notification with server informations
When timeout is expire we send a ping notification with server information
"""
# At first get we return a ping so the client receive immediately data
# At first get we return a ping so the client immediately receives data
if self._first:
self._first = False
return ("ping", self._getPing(), {})

@ -23,8 +23,8 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
__version__ = "2.1.2"
__version_info__ = (2, 1, 2, 0)
__version__ = "2.1.3"
__version_info__ = (2, 1, 3, 0)
# If it's a git checkout try to add the commit
if "dev" in __version__:

@ -1,11 +1,10 @@
jsonschema>=2.4.0
aiohttp>=2.2.0,<2.4.0 # pyup: ignore
aiohttp-cors>=0.5.3,<0.6.0 # pyup: ignore
yarl>=0.11,<0.12 # pyup: ignore
yarl>=0.11
Jinja2>=2.7.3
raven>=5.23.0
psutil>=3.0.0
zipstream>=1.1.4
typing>=3.5.3.0 # Otherwise yarl fails with python 3.4
multidict<3.2.0 # Otherwise fails when upgraded to v3.2.0
prompt-toolkit

@ -25,7 +25,7 @@ function help {
echo "Usage:" >&2
echo "--with-openvpn: Install Open VPN" >&2
echo "--with-iou: Install IOU" >&2
echo "--with-i386-repository: Add i386 repositories require by IOU if they are not available on the system. Warning this will replace your source.list in order to use official ubuntu mirror" >&2
echo "--with-i386-repository: Add the i386 repositories required by IOU if they are not already available on the system. Warning: this will replace your source.list in order to use the official Ubuntu mirror" >&2
echo "--unstable: Use the GNS3 unstable repository"
echo "--help: This help" >&2
}
@ -37,7 +37,7 @@ function log {
lsb_release -d | grep "LTS" > /dev/null
if [ $? != 0 ]
then
echo "You can use this script on Ubuntu LTS only"
echo "This script can only be run on a Linux Ubuntu LTS release"
exit 1
fi

@ -20,7 +20,7 @@ from gns3server.compute.dynamips.nodes.ethernet_switch import EthernetSwitchCons
from gns3server.compute.nios.nio_udp import NIOUDP
def test_arp_command(async_run):
def test_mac_command(async_run):
node = AsyncioMagicMock()
node.name = "Test"
node.nios = {}
@ -30,7 +30,7 @@ def test_arp_command(async_run):
node.nios[1].name = "Ethernet1"
node._hypervisor.send = AsyncioMagicMock(return_value=["0050.7966.6801 1 Ethernet0", "0050.7966.6802 1 Ethernet1"])
console = EthernetSwitchConsole(node)
assert async_run(console.arp()) == \
assert async_run(console.mac()) == \
"Port Mac VLAN\n" \
"Ethernet0 00:50:79:66:68:01 1\n" \
"Ethernet1 00:50:79:66:68:02 1\n"

@ -45,7 +45,8 @@ def test_version_invalid_input(http_controller):
query = {'version': "0.4.2"}
response = http_controller.post('/version', query)
assert response.status == 409
assert response.json == {'message': 'Client version 0.4.2 differs with server version {}'.format(__version__),
assert response.json == {'message': 'Client version 0.4.2 is not the same as server version {}'.format(__version__),
'status': 409}

Loading…
Cancel
Save