1
0
mirror of https://github.com/GNS3/gns3-server synced 2024-11-24 09:18:08 +00:00

Merge branch '2.2'

This commit is contained in:
grossmj 2019-07-12 12:07:44 +02:00
commit 6a7d47ce97
48 changed files with 926 additions and 803 deletions

View File

@ -1,5 +1,48 @@
# Change Log # Change Log
## 2.2.0b4 11/07/2019
* Requires a project to be opened to start/stop/suspend all nodes. Fixes #1609
* Fix issue when starting GNS3 VM for Hyper-V
* Set defaults for custom cloud nodes.
* Fix issue when trying to rename a Dynamips node that is already powered on. Fixes #2824
* Remove deprecated Qemu parameter to run legacy ASA VMs. Fixes #2827
* Add debug message when searching for an image. Ref https://github.com/GNS3/gns3-gui/issues/2828
## 2.2.0b3 15/06/2019
* Fix template migration issues from GUI to controller. Fixes https://github.com/GNS3/gns3-gui/issues/2803
* Refresh mounted media after ISO switch.
* Resolve conflicts in docker volumes instead of error. Fixes https://github.com/GNS3/gns3-server/issues/1595
* %guest-cid% variable implementation for Qemu VMs. Fixes https://github.com/GNS3/gns3-gui/issues/2804
* Fix KeyError: 'usage' exception when configuring IOU template. Fixes https://github.com/GNS3/gns3-gui/issues/2806
## 2.2.0b2 29/05/2019
* Ignore Unicode errors when reading base config file contents.
* Sync appliances.
* Support snapshots for portable projects. Fixes https://github.com/GNS3/gns3-gui/issues/2792
* Update the GNS3 version in topology file if converted. Ref https://github.com/GNS3/gns3-gui/issues/2798
* Support for log rotation and compression. Fixes #1586
* Do not start QEMU console if QEMU process is not started. Fixes https://github.com/GNS3/gns3-gui/issues/2712
* Avoid sending warning message all the time for Ethernet switch.
* Support to include snapshots in portable projects.
## 2.1.20 29/05/2019
* Ignore Unicode errors when reading base config file contents.
## 2.1.19 28/05/2019
* Sync appliances.
* Remove yarl from requirements.txt since it is installed by aiohttp.
* Drop typing dependency.
## 2.1.18 22/05/2019
* Revert "Force aiohttp version to 2.3.10 and aiohttp-cors version to 0.5.3" Ref https://github.com/GNS3/gns3-server/issues/1583 Ref https://github.com/GNS3/gns3-server/issues/1592
* Fix invalid reStructuredText for long description in setup.py
## 2.2.0b1 21/05/2019 ## 2.2.0b1 21/05/2019
* Upgrade GNS3 Web UI to v2019.2.0-alpha.3 * Upgrade GNS3 Web UI to v2019.2.0-alpha.3

View File

@ -0,0 +1,68 @@
{
"name": "ParrotOS",
"category": "guest",
"description": " Parrot is a GNU/Linux distribution based on Debian Testing and designed with Security, Development and Privacy in mind. It includes a full portable laboratory for security and digital forensics experts, but it also includes all you need to develop your own software or protect your privacy while surfing the net.",
"vendor_name": "Parrot Project",
"vendor_url": "https://parrotsec.org/",
"documentation_url": "https://docs.parrotsec.org/doku.php",
"product_name": "ParrotOS",
"product_url": "https://parrotsec.org/",
"registry_version": 3,
"status": "stable",
"maintainer": "Brent Stewart",
"maintainer_email": "brent@stewart.tc",
"usage": "Passwords are set during installation.",
"symbol": "parrotlogo.png",
"qemu": {
"adapter_type": "e1000",
"adapters": 1,
"ram": 2048,
"arch": "x86_64",
"console_type": "vnc",
"kvm": "require"
},
"images": [
{
"filename": "Parrot-security-4.6_amd64.iso",
"version": "Security Build 4.6",
"md5sum": "ead812edc83119e8bcb4ee9daabdc105",
"filesize": 3788668928,
"download_url": "https://www.parrotsec.org/download-security.php",
"direct_download_url": "https://download.parrotsec.org/parrot/iso/4.6/Parrot-security-4.6_amd64.iso"
},
{
"filename": "Parrot-security-3.11_amd64.iso",
"version": "Security Build 3.11",
"md5sum": "71c94474fb474f682da0844d95f0040b",
"filesize": 3788668928,
"download_url": "https://www.parrotsec.org/download.fx",
"direct_download_url": "https://www.parrotsec.org/download-full.fx"
},
{
"filename": "empty30G.qcow2",
"version": "1.0",
"md5sum": "3411a599e822f2ac6be560a26405821a",
"filesize": 197120,
"download_url": "https://sourceforge.net/projects/gns-3/files/Empty%20Qemu%30disk/",
"direct_download_url": "http://sourceforge.net/projects/gns-3/files/Empty%20Qemu%20disk/empty30G.qcow2/download"
}
],
"versions": [
{
"name": "Security Build 4.6",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "Parrot-security-4.6_amd64.iso"
}
},
{
"name": "Security Build 3.11",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "Parrot-security-3.11_amd64.iso"
}
}
]
}

View File

@ -24,9 +24,17 @@
"kvm": "allow" "kvm": "allow"
}, },
"images": [ "images": [
{
"filename": "SNG7-FPBX-64bit-1904-2.iso",
"version": "14-1904",
"md5sum": "f37c316bc0ff208682769b6f2d468e93",
"filesize": 2015363072,
"download_url": "https://www.freepbx.org/downloads/",
"direct_download_url": "https://downloads.freepbxdistro.org/ISO/SNG7-FPBX-64bit-1904-2.iso"
},
{ {
"filename": "SNG7-FPBX-64bit-1805-2.iso", "filename": "SNG7-FPBX-64bit-1805-2.iso",
"version": "14", "version": "14-1805",
"md5sum": "64f0c38c17ce680f7106f94183bc5745", "md5sum": "64f0c38c17ce680f7106f94183bc5745",
"filesize": 1755316224, "filesize": 1755316224,
"download_url": "https://www.freepbx.org/downloads/", "download_url": "https://www.freepbx.org/downloads/",
@ -67,7 +75,14 @@
], ],
"versions": [ "versions": [
{ {
"name": "14", "name": "14-1904",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "SNG7-FPBX-64bit-1904-2.iso"
}
},
{
"name": "14-1805",
"images": { "images": {
"hda_disk_image": "empty30G.qcow2", "hda_disk_image": "empty30G.qcow2",
"cdrom_image": "SNG7-FPBX-64bit-1805-2.iso" "cdrom_image": "SNG7-FPBX-64bit-1805-2.iso"

View File

@ -22,7 +22,7 @@
{ {
"filename": "BSDRP-1.92-full-amd64-serial.img", "filename": "BSDRP-1.92-full-amd64-serial.img",
"version": "1.92", "version": "1.92",
"md5sum": "1a123f3ba7ce73aa5823a65a7c571be1", "md5sum": "b02f2502188915490bb9756ffca36910",
"filesize": 1000000000, "filesize": 1000000000,
"download_url": "https://bsdrp.net/downloads", "download_url": "https://bsdrp.net/downloads",
"direct_download_url": "https://sourceforge.net/projects/bsdrp/files/BSD_Router_Project/1.92/amd64/BSDRP-1.92-full-amd64-serial.img.xz/download", "direct_download_url": "https://sourceforge.net/projects/bsdrp/files/BSD_Router_Project/1.92/amd64/BSDRP-1.92-full-amd64-serial.img.xz/download",

View File

@ -27,6 +27,22 @@
}, },
"images": [ "images": [
{
"filename": "CentOS 7-18.10 (64bit).vmdk",
"version": "7-1810",
"md5sum": "7307e143c11910f3c782b1e013e6f14f",
"filesize": 4886233088,
"download_url": "http://www.osboxes.org/centos/"
},
{
"filename": "CentOS 7-1804 (64bit).vmdk",
"version": "7-1804",
"md5sum": "eae8dc48bb390c0c2d9a9a0432ced9bc",
"filesize": 4701356032,
"download_url": "http://www.osboxes.org/centos/"
},
{ {
"filename": "CentOS 7-1611 (64bit).vmdk", "filename": "CentOS 7-1611 (64bit).vmdk",
"version": "7-1611", "version": "7-1611",
@ -36,6 +52,20 @@
} }
], ],
"versions": [ "versions": [
{
"name": "7-1810",
"images": {
"hda_disk_image": "CentOS 7-18.10 (64bit).vmdk"
}
},
{
"name": "7-1804",
"images": {
"hda_disk_image": "CentOS 7-1804 (64bit).vmdk"
}
},
{ {
"name": "7-1611", "name": "7-1611",
"images": { "images": {

View File

@ -24,17 +24,17 @@
}, },
"images": [ "images": [
{ {
"filename": "Check_Point_R80.10_T421_Gaia.iso", "filename": "Check_Point_R80.10_T479_Gaia.iso",
"version": "80.10", "version": "80.10",
"md5sum": "12d9723fadb89bb722e20ca3f89012ce", "md5sum": "1b97cce21dbee78fec505b44e637cc9a",
"filesize": 3420127232, "filesize": 3301212160,
"download_url": "https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk104859" "download_url": "https://supportcenter.checkpoint.com/supportcenter/portal/user/anon/page/default.psml/media-type/html?action=portlets.DCFileAction&eventSubmit_doGetdcdetails=&fileid=54509"
}, },
{ {
"filename": "Check_Point_R77.30_T204_Install_and_Upgrade.Gaia.iso", "filename": "Check_Point_R77.30_Install_and_Upgrade_T5.Gaia.iso",
"version": "77.30", "version": "77.30",
"md5sum": "6fa7586bbb6832fa965d3173276c5b87", "md5sum": "3f6f459df3fb3beaf7b2457f08982425",
"filesize": 2799271936, "filesize": 289692076,
"download_url": "https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk104859" "download_url": "https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk104859"
}, },
{ {
@ -58,14 +58,14 @@
"name": "80.10", "name": "80.10",
"images": { "images": {
"hda_disk_image": "empty100G.qcow2", "hda_disk_image": "empty100G.qcow2",
"cdrom_image": "Check_Point_R80.10_T421_Gaia.iso" "cdrom_image": "Check_Point_R80.10_T479_Gaia.iso"
} }
}, },
{ {
"name": "77.30", "name": "77.30",
"images": { "images": {
"hda_disk_image": "empty100G.qcow2", "hda_disk_image": "empty100G.qcow2",
"cdrom_image": "Check_Point_R77.30_T204_Install_and_Upgrade.Gaia.iso" "cdrom_image": "Check_Point_R77.30_Install_and_Upgrade_T5.Gaia.iso"
} }
}, },
{ {

View File

@ -26,6 +26,13 @@
"options": "-smp 2 -smbios type=1,product=KVM" "options": "-smp 2 -smbios type=1,product=KVM"
}, },
"images": [ "images": [
{
"filename": "ise-2.4.0.357.SPA.x86_64.iso",
"version": "2.4.0.357",
"md5sum": "766945618a0ff35f6c720b3bc4b46bfb",
"filesize": 8326062080,
"download_url": "https://software.cisco.com/download/home/283801620/type/283802505/release/2.4.0"
},
{ {
"filename": "ise-2.2.0.470.SPA.x86_64.iso", "filename": "ise-2.2.0.470.SPA.x86_64.iso",
"version": "2.2.0.470", "version": "2.2.0.470",
@ -64,6 +71,13 @@
} }
], ],
"versions": [ "versions": [
{
"name": "2.4.0.357",
"images": {
"hda_disk_image": "empty200G.qcow2",
"cdrom_image": "ise-2.4.0.357.SPA.x86_64.iso"
}
},
{ {
"name": "2.2.0.470", "name": "2.2.0.470",
"images": { "images": {

View File

@ -21,6 +21,15 @@
"kvm": "allow" "kvm": "allow"
}, },
"images": [ "images": [
{
"filename": "coreos_production_qemu_image.img",
"version": "2079.4.0",
"md5sum": "d5e28d68bcadf252ff9c909a159b9504",
"filesize": 970129408,
"download_url": "http://stable.release.core-os.net/amd64-usr/2079.4.0/",
"direct_download_url": "http://stable.release.core-os.net/amd64-usr/2079.4.0/coreos_production_qemu_image.img.bz2",
"compression": "bzip2"
},
{ {
"filename": "coreos_production_qemu_image.2023.5.0.img", "filename": "coreos_production_qemu_image.2023.5.0.img",
"version": "2023.5.0", "version": "2023.5.0",
@ -194,6 +203,12 @@
} }
], ],
"versions": [ "versions": [
{
"name": "2079.4.0",
"images": {
"hda_disk_image": "coreos_production_qemu_image.img"
}
},
{ {
"name": "2023.5.0", "name": "2023.5.0",
"images": { "images": {

View File

@ -23,6 +23,14 @@
"kvm": "require" "kvm": "require"
}, },
"images": [ "images": [
{
"filename": "cumulus-linux-3.7.6-vx-amd64-qemu.qcow2",
"version": "3.7.6",
"md5sum": "34de965074332cbc40d51832da7d6a5d",
"filesize": 540278784,
"download_url": "https://cumulusnetworks.com/cumulus-vx/download/",
"direct_download_url": "http://cumulusfiles.s3.amazonaws.com/CumulusLinux-3.7.6/cumulus-linux-3.7.6-vx-amd64-qemu.qcow2"
},
{ {
"filename": "cumulus-linux-3.7.3-vx-amd64-qemu.qcow2", "filename": "cumulus-linux-3.7.3-vx-amd64-qemu.qcow2",
"version": "3.7.3", "version": "3.7.3",
@ -197,6 +205,12 @@
} }
], ],
"versions": [ "versions": [
{
"name": "3.7.6",
"images": {
"hda_disk_image": "cumulus-linux-3.7.6-vx-amd64-qemu.qcow2"
}
},
{ {
"name": "3.7.3", "name": "3.7.3",
"images": { "images": {

View File

@ -28,6 +28,13 @@
"images": [ "images": [
{
"filename": "EXOS-VM_v30.2.1.8.qcow2",
"version": "30.2.1.8",
"md5sum": "4bdbf3ddff7a030e19c6bb71270b56d2",
"filesize": 355205120,
"direct_download_url": "https://akamai-ep.extremenetworks.com/Extreme_P/github-en/Virtual_EXOS/EXOS-VM_v30.2.1.8.qcow2"
},
{ {
"filename": "EXOS-VM_v30.1.1.4.qcow2", "filename": "EXOS-VM_v30.1.1.4.qcow2",
"version": "30.1.1.4", "version": "30.1.1.4",
@ -88,6 +95,13 @@
], ],
"versions": [ "versions": [
{
"name": "30.2.1.8",
"images": {
"hda_disk_image": "EXOS-VM_v30.2.1.8.qcow2"
}
},
{ {
"name": "30.1.1.4", "name": "30.1.1.4",
"images": { "images": {

View File

@ -24,6 +24,15 @@
"kvm": "allow" "kvm": "allow"
}, },
"images": [ "images": [
{
"filename": "ipfire-2.23.2gb-ext4.x86_64-full-core131.img",
"version": "2.23",
"md5sum": "604c06e20c36985ff00802da4509340c",
"filesize": 1541160960,
"download_url": "http://www.ipfire.org/download",
"direct_download_url": "https://muug.ca/mirror/ipfire/releases/ipfire-2.x/2.23-core131/ipfire-2.23.2gb-ext4.x86_64-full-core131.img.xz",
"compression": "xz"
},
{ {
"filename": "ipfire-2.21.2gb-ext4.x86_64-full-core129.img", "filename": "ipfire-2.21.2gb-ext4.x86_64-full-core129.img",
"version": "2.21.129", "version": "2.21.129",
@ -98,6 +107,12 @@
} }
], ],
"versions": [ "versions": [
{
"name": "2.23",
"images": {
"hda_disk_image": "ipfire-2.23.2gb-ext4.x86_64-full-core131.img"
}
},
{ {
"name": "2.21.129", "name": "2.21.129",
"images": { "images": {

View File

@ -10,16 +10,34 @@
"status": "stable", "status": "stable",
"maintainer": "GNS3 Team", "maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net", "maintainer_email": "developers@gns3.net",
"usage": "Default password is toor", "usage": "Default password is toor\nEnable persistence by selecting boot option 'Live USB Persistence'",
"port_name_format": "eth{0}",
"qemu": { "qemu": {
"adapter_type": "e1000", "adapter_type": "e1000",
"adapters": 8, "adapters": 8,
"ram": 1024, "ram": 1024,
"arch": "x86_64", "arch": "x86_64",
"console_type": "vnc", "console_type": "vnc",
"boot_priority": "d",
"kvm": "require" "kvm": "require"
}, },
"images": [ "images": [
{
"filename": "kali-linux-2019.2-amd64.iso",
"version": "2019.2",
"md5sum": "0f89b6225d7ea9c18682f7cc541c1179",
"filesize": 3353227264,
"download_url": "https://www.kali.org/downloads/",
"direct_download_url": "http://cdimage.kali.org/kali-2019.2/kali-linux-2019.2-amd64.iso"
},
{
"filename": "kali-linux-mate-2019.2-amd64.iso",
"version": "2019.2 (MATE)",
"md5sum": "fec8dd7009f932c51a74323df965a709",
"filesize": 3313217536,
"download_url": "https://www.kali.org/downloads/",
"direct_download_url": "http://cdimage.kali.org/kali-2019.2/kali-linux-mate-2019.2-amd64.iso"
},
{ {
"filename": "kali-linux-2019.1a-amd64.iso", "filename": "kali-linux-2019.1a-amd64.iso",
"version": "2019.1a", "version": "2019.1a",
@ -99,66 +117,98 @@
"filesize": 3320512512, "filesize": 3320512512,
"download_url": "https://www.offensive-security.com/kali-linux-vmware-arm-image-download/", "download_url": "https://www.offensive-security.com/kali-linux-vmware-arm-image-download/",
"direct_download_url": "http://images.kali.org/Kali-Linux-2.0.0-vm-amd64.7z" "direct_download_url": "http://images.kali.org/Kali-Linux-2.0.0-vm-amd64.7z"
},
{
"filename": "kali-linux-persistence-1gb.qcow2",
"version": "1.0",
"md5sum": "14e9c92f3ba5a0bd1128c1ea26a129ea",
"filesize": 34734080,
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/kali-linux-persistence-1gb.qcow2/download"
} }
], ],
"versions": [ "versions": [
{
"name": "2019.2",
"images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2019.2-amd64.iso"
}
},
{
"name": "2019.2 (MATE)",
"images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-mate-2019.2-amd64.iso"
}
},
{ {
"name": "2019.1a", "name": "2019.1a",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2019.1a-amd64.iso" "cdrom_image": "kali-linux-2019.1a-amd64.iso"
} }
}, },
{ {
"name": "2018.4", "name": "2018.4",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2018.4-amd64.iso" "cdrom_image": "kali-linux-2018.4-amd64.iso"
} }
}, },
{ {
"name": "2018.3a", "name": "2018.3a",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2018.3a-amd64.iso" "cdrom_image": "kali-linux-2018.3a-amd64.iso"
} }
}, },
{ {
"name": "2018.1", "name": "2018.1",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2018.1-amd64.iso" "cdrom_image": "kali-linux-2018.1-amd64.iso"
} }
}, },
{ {
"name": "2017.3", "name": "2017.3",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2017.3-amd64.iso" "cdrom_image": "kali-linux-2017.3-amd64.iso"
} }
}, },
{ {
"name": "2017.2", "name": "2017.2",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2017.2-amd64.iso" "cdrom_image": "kali-linux-2017.2-amd64.iso"
} }
}, },
{ {
"name": "2017.1", "name": "2017.1",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2017.1-amd64.iso" "cdrom_image": "kali-linux-2017.1-amd64.iso"
} }
}, },
{ {
"name": "2016.2", "name": "2016.2",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2016.2-amd64.iso" "cdrom_image": "kali-linux-2016.2-amd64.iso"
} }
}, },
{ {
"name": "2016.1", "name": "2016.1",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2016.1-amd64.iso" "cdrom_image": "kali-linux-2016.1-amd64.iso"
} }
}, },
{ {
"name": "2.0", "name": "2.0",
"images": { "images": {
"hda_disk_image": "kali-linux-persistence-1gb.qcow2",
"cdrom_image": "kali-linux-2.0-amd64.iso" "cdrom_image": "kali-linux-2.0-amd64.iso"
} }
} }

View File

@ -1,97 +0,0 @@
{
"name": "LEDE",
"category": "router",
"description": "LEDE is a highly extensible GNU/Linux distribution for embedded devices (typically wireless routers). Unlike many other distributions for these routers, OpenWrt is built from the ground up to be a full-featured, easily modifiable operating system for your router. In practice, this means that you can have all the features you need with none of the bloat, powered by a Linux kernel that's more recent than most other distributions.",
"vendor_name": "LEDE Project",
"vendor_url": "https://lede-project.org/",
"documentation_url": "http://wiki.openwrt.org/doc/",
"product_name": "LEDE",
"product_url": "https://lede-project.org/",
"registry_version": 3,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Ethernet0 is the LAN link, Ethernet1 the WAN link.",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 2,
"ram": 64,
"arch": "i386",
"console_type": "telnet",
"kvm": "allow"
},
"images": [
{
"filename": "lede-17.01.4-x86-generic-combined-squashfs.img",
"version": "17.01.4",
"md5sum": "ae5d8d3fcab109565fe337d28e51c4b4",
"filesize": 19779546,
"download_url": "https://downloads.lede-project.org/releases/17.01.4/targets/x86/generic/",
"direct_download_url": "https://downloads.lede-project.org/releases/17.01.4/targets/x86/generic/lede-17.01.4-x86-generic-combined-squashfs.img"
},
{
"filename": "lede-17.01.3-x86-generic-combined-squashfs.img",
"version": "17.01.3",
"md5sum": "d315fc638160a9aec0966d58828bfccf",
"filesize": 19775618,
"download_url": "https://downloads.lede-project.org/releases/17.01.3/targets/x86/generic/",
"direct_download_url": "https://downloads.lede-project.org/releases/17.01.3/targets/x86/generic/lede-17.01.3-x86-generic-combined-squashfs.img"
},
{
"filename": "lede-17.01.2-x86-generic-combined-squashfs.img",
"version": "17.01.2",
"md5sum": "a466e493ef12935dad5e0c622b1a7859",
"filesize": 19774794,
"download_url": "https://downloads.lede-project.org/releases/17.01.2/targets/x86/generic/",
"direct_download_url": "https://downloads.lede-project.org/releases/17.01.2/targets/x86/generic/lede-17.01.2-x86-generic-combined-squashfs.img"
},
{
"filename": "lede-17.01.1-x86-generic-combined-squashfs.img",
"version": "17.01.1",
"md5sum": "b050e734c605a34a429389c752ae7c30",
"filesize": 19771166,
"download_url": "https://downloads.lede-project.org/releases/17.01.1/targets/x86/generic/",
"direct_download_url": "https://downloads.lede-project.org/releases/17.01.1/targets/x86/generic/lede-17.01.1-x86-generic-combined-squashfs.img"
},
{
"filename": "lede-17.01.0-r3205-59508e3-x86-generic-combined-squashfs.img",
"version": "17.01.0",
"md5sum": "3c5e068d50a377d4e26b548ab1ca7b1e",
"filesize": 19755118,
"download_url": "https://downloads.lede-project.org/releases/17.01.0/targets/x86/generic/",
"direct_download_url": "https://downloads.lede-project.org/releases/17.01.0/targets/x86/generic/lede-17.01.0-r3205-59508e3-x86-generic-combined-squashfs.img"
}
],
"versions": [
{
"name": "lede 17.01.4",
"images": {
"hda_disk_image": "lede-17.01.4-x86-generic-combined-squashfs.img"
}
},
{
"name": "lede 17.01.3",
"images": {
"hda_disk_image": "lede-17.01.3-x86-generic-combined-squashfs.img"
}
},
{
"name": "lede 17.01.2",
"images": {
"hda_disk_image": "lede-17.01.2-x86-generic-combined-squashfs.img"
}
},
{
"name": "lede 17.01.1",
"images": {
"hda_disk_image": "lede-17.01.1-x86-generic-combined-squashfs.img"
}
},
{
"name": "lede 17.01.0",
"images": {
"hda_disk_image": "lede-17.01.0-r3205-59508e3-x86-generic-combined-squashfs.img"
}
}
]
}

View File

@ -26,6 +26,15 @@
"options": "-nographic" "options": "-nographic"
}, },
"images": [ "images": [
{
"filename": "chr-6.44.3.img",
"version": "6.44.2",
"md5sum": "c46b33125d536faa24473a519abbb89d",
"filesize": 67108864,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download.mikrotik.com/routeros/6.44.2/chr-6.44.2.img.zip",
"compression": "zip"
},
{ {
"filename": "chr-6.44.2.img", "filename": "chr-6.44.2.img",
"version": "6.44.2", "version": "6.44.2",
@ -97,198 +106,15 @@
"download_url": "http://www.mikrotik.com/download", "download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.40.3/chr-6.40.3.img.zip", "direct_download_url": "https://download2.mikrotik.com/routeros/6.40.3/chr-6.40.3.img.zip",
"compression": "zip" "compression": "zip"
},
{
"filename": "chr-6.39.2.img",
"version": "6.39.2",
"md5sum": "ecb37373dedfba04267a999d23b8e203",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.39.2/chr-6.39.2.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.39.1.img",
"version": "6.39.1",
"md5sum": "c53293bc41f76d85a8642005fd1cbd54",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.39.1/chr-6.39.1.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.39.img",
"version": "6.39",
"md5sum": "7e77c8ac4c9aeaf88f6ff15897f33163",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.39/chr-6.39.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.38.7.img",
"version": "6.38.7",
"md5sum": "69a51c96b1247bbaf1253d2873617122",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.38.7/chr-6.38.7.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.38.5.img",
"version": "6.38.5",
"md5sum": "8147f42ea1ee96f580a35a298b7f9354",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.38.5/chr-6.38.5.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.38.1.img",
"version": "6.38.1",
"md5sum": "753ed7c86e0f54fd9e18d044db64538d",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "https://download2.mikrotik.com/routeros/6.38.1/chr-6.38.1.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.38.img",
"version": "6.38",
"md5sum": "37e2165112f8a9beccac06a9a6009000",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.38/chr-6.38.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.37.3.img",
"version": "6.37.3",
"md5sum": "bda87db475f80debdf3181accf6b78e2",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.37.3/chr-6.37.3.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.37.1.img",
"version": "6.37.1",
"md5sum": "713b14a5aba9f967f7bdd9029c8d85b6",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.37.1/chr-6.37.1.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.36.4.img",
"version": "6.36.4",
"md5sum": "09527bde50697711926c08d545940c1e",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.36.4/chr-6.36.4.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.34.2.vmdk",
"version": "6.34.2 (.vmdk)",
"md5sum": "0360f121b76a8b491a05dc37640ca319",
"filesize": 30277632,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.34.2/chr-6.34.2.vmdk"
},
{
"filename": "chr-6.34.2.vdi",
"version": "6.34.2 (.vdi)",
"md5sum": "e7e4021aeeee2eaabd024d48702bb2e1",
"filesize": 30409728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.34.2/chr-6.34.2.vdi"
},
{
"filename": "chr-6.34.2.img",
"version": "6.34.2 (.img)",
"md5sum": "984d4d11c2ff209fcdc21ac42895edbe",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.34.2/chr-6.34.2.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.34.vmdk",
"version": "6.34 (.vmdk)",
"md5sum": "c5e6d192ae19d263a9a313d4b4bee7e4",
"filesize": 30277632,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.34/chr-6.34.vmdk"
},
{
"filename": "chr-6.34.vdi",
"version": "6.34 (.vdi)",
"md5sum": "34b161f83a792c744c76a529afc094a8",
"filesize": 30409728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.34/chr-6.34.vdi"
},
{
"filename": "chr-6.34.img",
"version": "6.34 (.img)",
"md5sum": "32ffde7fb934c7bfee555c899ccd77b6",
"filesize": 134217728,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.34/chr-6.34.img.zip",
"compression": "zip"
},
{
"filename": "chr-6.33.5.vmdk",
"version": "6.33.5 (.vmdk)",
"md5sum": "cd284e28aa02ae59f55ed8f43ff27fbf",
"filesize": 23920640,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.33.5/chr-6.33.5.vmdk"
},
{
"filename": "chr-6.33.5.vdi",
"version": "6.33.5 (.vdi)",
"md5sum": "fa84e63a558e7c61d7d338386cfd08c9",
"filesize": 24118272,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.33.5/chr-6.33.5.vdi"
},
{
"filename": "chr-6.33.5.img",
"version": "6.33.5 (.img)",
"md5sum": "210cc8ad06f25c9f27b6b99f6e00bd91",
"filesize": 67108864,
"direct_download_url": "http://download2.mikrotik.com/routeros/6.33.5/chr-6.33.5.img.zip",
"download_url": "http://www.mikrotik.com/download",
"compression": "zip"
},
{
"filename": "chr-6.33.3.vmdk",
"version": "6.33.3 (.vmdk)",
"md5sum": "08532a5af1a830182d65c416eab2b089",
"filesize": 23920640,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.33.3/chr-6.33.3.vmdk"
},
{
"filename": "chr-6.33.2.vmdk",
"version": "6.33.2 (.vmdk)",
"md5sum": "6291893c2c9626603c6d38d23390a8be",
"filesize": 23920640,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.33.2/chr-6.33.2.vmdk"
},
{
"filename": "chr-6.33.vmdk",
"version": "6.33 (.vmdk)",
"md5sum": "63bee5405fa1e209388adc6b5f78bb70",
"filesize": 23920640,
"download_url": "http://www.mikrotik.com/download",
"direct_download_url": "http://download2.mikrotik.com/routeros/6.33/chr-6.33.vmdk"
} }
], ],
"versions": [ "versions": [
{
"name": "6.44.3",
"images": {
"hda_disk_image": "chr-6.44.3.img"
}
},
{ {
"name": "6.44.2", "name": "6.44.2",
"images": { "images": {
@ -330,138 +156,6 @@
"images": { "images": {
"hda_disk_image": "chr-6.40.3.img" "hda_disk_image": "chr-6.40.3.img"
} }
},
{
"name": "6.39.2",
"images": {
"hda_disk_image": "chr-6.39.2.img"
}
},
{
"name": "6.39.1",
"images": {
"hda_disk_image": "chr-6.39.1.img"
}
},
{
"name": "6.39",
"images": {
"hda_disk_image": "chr-6.39.img"
}
},
{
"name": "6.38.7",
"images": {
"hda_disk_image": "chr-6.38.7.img"
}
},
{
"name": "6.38.5",
"images": {
"hda_disk_image": "chr-6.38.5.img"
}
},
{
"name": "6.38.1",
"images": {
"hda_disk_image": "chr-6.38.1.img"
}
},
{
"name": "6.38",
"images": {
"hda_disk_image": "chr-6.38.img"
}
},
{
"name": "6.37.3",
"images": {
"hda_disk_image": "chr-6.37.3.img"
}
},
{
"name": "6.37.1",
"images": {
"hda_disk_image": "chr-6.37.1.img"
}
},
{
"name": "6.36.4",
"images": {
"hda_disk_image": "chr-6.36.4.img"
}
},
{
"name": "6.34.2 (.vmdk)",
"images": {
"hda_disk_image": "chr-6.34.2.vmdk"
}
},
{
"name": "6.34.2 (.vdi)",
"images": {
"hda_disk_image": "chr-6.34.2.vdi"
}
},
{
"name": "6.34.2 (.img)",
"images": {
"hda_disk_image": "chr-6.34.2.img"
}
},
{
"name": "6.34 (.vmdk)",
"images": {
"hda_disk_image": "chr-6.34.vmdk"
}
},
{
"name": "6.34 (.vdi)",
"images": {
"hda_disk_image": "chr-6.34.vdi"
}
},
{
"name": "6.34 (.img)",
"images": {
"hda_disk_image": "chr-6.34.img"
}
},
{
"name": "6.33.5 (.vmdk)",
"images": {
"hda_disk_image": "chr-6.33.5.vmdk"
}
},
{
"name": "6.33.5 (.vdi)",
"images": {
"hda_disk_image": "chr-6.33.5.vdi"
}
},
{
"name": "6.33.5 (.img)",
"images": {
"hda_disk_image": "chr-6.33.5.img"
}
},
{
"name": "6.33.3 (.vmdk)",
"images": {
"hda_disk_image": "chr-6.33.3.vmdk"
}
},
{
"name": "6.33.2 (.vmdk)",
"images": {
"hda_disk_image": "chr-6.33.2.vmdk"
}
},
{
"name": "6.33 (.vmdk)",
"images": {
"hda_disk_image": "chr-6.33.vmdk"
}
} }
] ]
} }

View File

@ -0,0 +1,44 @@
{
"name": "OpenNAC",
"category": "guest",
"description": "openNAC is an opensource Network Access Control for corporate LAN / WAN environments. It enables authentication, authorization and audit policy-based all access to network. It supports diferent network vendors like Cisco, Alcatel, 3Com or Extreme Networks, and different clients like PCs with Windows or Linux, Mac,devices like smartphones and tablets. Based on open source components and self-development It is based on industry standards such as FreeRadius, 802.1x, AD, ldap, ...It is very extensible, new features can be incorporated because it is architectured in plugins. Easily integrated with existing systems Last but not least, It provides value added services such as configuration management, network, backup configurations, Network Discovery and Network Monitoring. Download the OVA, then extract the VMDK (tar -xvf FILE.ova), then convert to qcow2 (qemu-img convert -O qcow2 FILE.vmdk FILE.qcow2).",
"vendor_name": "Opennactech",
"vendor_url": "http://www.opennac.org/opennac/en.html",
"documentation_url": "http://www.opennac.org/opennac/en/support.html",
"product_name": "OpenNAC",
"product_url": "https://opennac.org/",
"registry_version": 3,
"status": "stable",
"maintainer": "Brent Stewart",
"maintainer_email": "brent@stewart.tc",
"usage": "Passwords are set during installation.",
"symbol": "opennac.png",
"qemu": {
"adapter_type": "e1000",
"adapters": 1,
"ram": 512,
"arch": "x86_64",
"console_type": "vnc",
"kvm": "require",
"options": "-smp 2"
},
"images": [
{
"filename": "opennac_3711_img.qcow2",
"version": "OpenNAC 3711",
"md5sum": "88d6129265860aa58c5306cd7b413aab",
"filesize": 3968729088,
"download_url": "http://www.opennac.org/opennac/en/download.html",
"direct_download_url": "http://sourceforge.net/projects/opennac/files/ova/opennac_3711_img.ova/download"
}
],
"versions": [
{
"name": "OpenNAC 3711",
"images": {
"hda_disk_image": "opennac_3711_img.qcow2"
}
}
]
}

View File

@ -11,7 +11,8 @@
"status": "stable", "status": "stable",
"maintainer": "GNS3 Team", "maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net", "maintainer_email": "developers@gns3.net",
"usage": "Boot the live CD", "usage": "Rename the downloaded file to include the version number. Boot the live CD",
"symbol": "packetfence2.png",
"qemu": { "qemu": {
"adapter_type": "virtio-net-pci", "adapter_type": "virtio-net-pci",
"adapters": 2, "adapters": 2,
@ -22,6 +23,15 @@
"kvm": "require" "kvm": "require"
}, },
"images": [ "images": [
{
"filename": "PacketFenceZEN_USB-9.0.1.img",
"version": "9.0.1",
"md5sum": "05a18a5a3c53bf10e84082b3d8db3961",
"filesize": 3221225472,
"download_url": "https://packetfence.org/download.html#/zen",
"direct_download_url": "https://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/9.0.1/PacketFenceZEN_USB-9.0.1.tar.bz2/download",
"compression": "bzip2"
},
{ {
"filename": "PacketFenceZEN_USB-8.3.0.img", "filename": "PacketFenceZEN_USB-8.3.0.img",
"version": "8.3.0", "version": "8.3.0",
@ -75,54 +85,15 @@
"download_url": "https://packetfence.org/download.html#/zen", "download_url": "https://packetfence.org/download.html#/zen",
"direct_download_url": "https://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/7.0.0/PacketFenceZEN_USB-7.0.0.tar.bz2/download", "direct_download_url": "https://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/7.0.0/PacketFenceZEN_USB-7.0.0.tar.bz2/download",
"compression": "bzip2" "compression": "bzip2"
},
{
"filename": "PacketFenceZEN_USB-6.5.1.img",
"version": "6.5.1",
"md5sum": "937c02640bd487889b7071e8f094a62a",
"filesize": 3221225472,
"download_url": "https://packetfence.org/download.html#/zen",
"direct_download_url": "http://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/6.5.1/PacketFenceZEN_USB-6.5.1.tar.bz2/download",
"compression": "bzip2"
},
{
"filename": "PacketFenceZEN_USB-6.5.0.img",
"version": "6.5.0",
"md5sum": "5d5ff015f115e9dbcfd355f1bb22f5d9",
"filesize": 3221225472,
"download_url": "https://packetfence.org/download.html#/zen",
"direct_download_url": "http://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/6.5.0/PacketFenceZEN_USB-6.5.0.tar.bz2/download",
"compression": "bzip2"
},
{
"filename": "PacketFenceZEN_USB-6.4.0.img",
"version": "6.4.0",
"md5sum": "7f2bea58421d094152ea71f49cc3084a",
"filesize": 3221225472,
"download_url": "https://packetfence.org/download.html#/zen",
"direct_download_url": "https://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/6.4.0/PacketFenceZEN_USB-6.4.0.tar.bz2/download",
"compression": "bzip2"
},
{
"filename": "PacketFenceZEN_USB-6.3.0.img",
"version": "6.3.0",
"md5sum": "94e19349faedf292743fdc0ab48f8466",
"filesize": 3221225472,
"download_url": "https://packetfence.org/download.html#/zen",
"direct_download_url": "https://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/6.3.0/PacketFenceZEN_USB-6.3.0.tar.bz2/download",
"compression": "bzip2"
},
{
"filename": "PacketFenceZEN_USB-6.2.1.img",
"version": "6.2.1",
"md5sum": "f212be7c8621b90d973f500f00ef1277",
"filesize": 3221225472,
"download_url": "https://packetfence.org/download.html#/zen",
"direct_download_url": "http://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/6.2.1/PacketFenceZEN_USB-6.2.1.tar.bz2/download",
"compression": "bzip2"
} }
], ],
"versions": [ "versions": [
{
"name": "9.0.1",
"images": {
"hda_disk_image": "PacketFenceZEN_USB-9.0.1.img"
}
},
{ {
"name": "8.3.0", "name": "8.3.0",
"images": { "images": {
@ -158,36 +129,6 @@
"images": { "images": {
"hda_disk_image": "PacketFenceZEN_USB-7.0.0.img" "hda_disk_image": "PacketFenceZEN_USB-7.0.0.img"
} }
},
{
"name": "6.5.0",
"images": {
"hda_disk_image": "PacketFenceZEN_USB-6.5.1.img"
}
},
{
"name": "6.5.0",
"images": {
"hda_disk_image": "PacketFenceZEN_USB-6.5.0.img"
}
},
{
"name": "6.4.0",
"images": {
"hda_disk_image": "PacketFenceZEN_USB-6.4.0.img"
}
},
{
"name": "6.3.0",
"images": {
"hda_disk_image": "PacketFenceZEN_USB-6.3.0.img"
}
},
{
"name": "6.2.1",
"images": {
"hda_disk_image": "PacketFenceZEN_USB-6.2.1.img"
}
} }
] ]
} }

View File

@ -22,6 +22,14 @@
"kvm": "require" "kvm": "require"
}, },
"images": [ "images": [
{
"filename": "Parrot-security-4.6_amd64.iso",
"version": "Security Build 4.6",
"md5sum": "ead812edc83119e8bcb4ee9daabdc105",
"filesize": 3788668928,
"download_url": "https://www.parrotsec.org/download-security.php",
"direct_download_url": "https://download.parrotsec.org/parrot/iso/4.6/Parrot-security-4.6_amd64.iso"
},
{ {
"filename": "Parrot-security-3.11_amd64.iso", "filename": "Parrot-security-3.11_amd64.iso",
"version": "Security Build 3.11", "version": "Security Build 3.11",
@ -41,6 +49,13 @@
], ],
"versions": [ "versions": [
{
"name": "Security Build 4.6",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "Parrot-security-4.6_amd64.iso"
}
},
{ {
"name": "Security Build 3.11", "name": "Security Build 3.11",
"images": { "images": {

View File

@ -17,11 +17,19 @@
"adapter_type": "e1000", "adapter_type": "e1000",
"adapters": 2, "adapters": 2,
"ram": 3072, "ram": 3072,
"arch": "i386", "arch": "x86_64",
"console_type": "telnet", "console_type": "vnc",
"kvm": "allow" "kvm": "allow"
}, },
"images": [ "images": [
{
"filename": "securityonion-16.04.6.1.iso",
"version": "16.04.6.1",
"md5sum": "ca835cef92c2c0daafa16e789c343d1d",
"filesize": 2020605952,
"download_url": "https://github.com/Security-Onion-Solutions/security-onion/releases/",
"direct_download_url": "https://github.com/Security-Onion-Solutions/security-onion/releases/download/v16.04.5.3_20181010/securityonion-16.04.6.1.iso"
},
{ {
"filename": "securityonion-16.04.5.3.iso", "filename": "securityonion-16.04.5.3.iso",
"version": "16.04.5.3", "version": "16.04.5.3",
@ -58,6 +66,12 @@
], ],
"versions": [ "versions": [
{ {
"name": "16.04.6.1",
"images": {
"hda_disk_image": "empty30G.qcow2",
"cdrom_image": "securityonion-16.04.6.1.iso"
}
}, {
"name": "16.04.5.3", "name": "16.04.5.3",
"images": { "images": {
"hda_disk_image": "empty30G.qcow2", "hda_disk_image": "empty30G.qcow2",

View File

@ -26,45 +26,59 @@
}, },
"images": [ "images": [
{ {
"filename": "Ubuntu_17.04-VM-64bit.vmdk", "filename": "Ubuntu 19.04 (64bit).vmdk",
"version": "17.04", "version": "19.04",
"md5sum": "5c82d69c49ba08179e9a94901f67da1f", "md5sum": "21535675c54507e9325bf8774a7bd73e",
"filesize": 4792123392, "filesize": 5558435840,
"download_url": "http://www.osboxes.org/ubuntu/" "download_url": "http://www.osboxes.org/ubuntu/"
}, },
{ {
"filename": "Ubuntu_16.10_Yakkety-VM-64bit.vmdk", "filename": "Ubuntu 18.10 Cosmic (64Bit).vmdk",
"version": "16.10", "version": "18.10",
"md5sum": "c835f24dbb86f5f61c78d992ed38b6b1", "md5sum": "7f72be569356baa20863cd354d2efa60",
"filesize": 9133293568, "filesize": 6747389952,
"download_url": "http://www.osboxes.org/ubuntu/" "download_url": "http://www.osboxes.org/ubuntu/"
}, },
{ {
"filename": "Ubuntu_16.04.3-VM-64bit.vmdk", "filename": "Ubuntu 18.04.2 (64bit).vmdk",
"version": "16.04", "version": "18.04.2",
"md5sum": "45bccf63f2777e492f022dbf025f67d0", "md5sum": "d57b732d90759e3b3a62594a83f8f196",
"filesize": 4302110720, "filesize": 6003097600,
"download_url": "http://www.osboxes.org/ubuntu/"
},
{
"filename": "Ubuntu 16.04.6 (64bit).vmdk",
"version": "16.04.6",
"md5sum": "33b2964cef607c1c9fe748db8a2fa6ea",
"filesize": 4780982272,
"download_url": "http://www.osboxes.org/ubuntu/" "download_url": "http://www.osboxes.org/ubuntu/"
} }
], ],
"versions": [ "versions": [
{ {
"name": "17.04", "name": "19.04",
"images": { "images": {
"hda_disk_image": "Ubuntu_17.04-VM-64bit.vmdk" "hda_disk_image": "Ubuntu 19.04 (64bit).vmdk"
} }
}, },
{ {
"name": "16.10", "name": "18.10",
"images": { "images": {
"hda_disk_image": "Ubuntu_16.10_Yakkety-VM-64bit.vmdk" "hda_disk_image": "Ubuntu 18.10 Cosmic (64Bit).vmdk"
} }
}, },
{ {
"name": "16.04", "name": "18.04.2",
"images": { "images": {
"hda_disk_image": "Ubuntu_16.04.3-VM-64bit.vmdk" "hda_disk_image": "Ubuntu 18.04.2 (64bit).vmdk"
}
},
{
"name": "16.04.6",
"images": {
"hda_disk_image": "Ubuntu 16.04.6 (64bit).vmdk"
} }
} }
] ]
} }

View File

@ -1,7 +1,7 @@
{ {
"name": "VyOS", "name": "VyOS",
"category": "router", "category": "router",
"description": "VyOS is a community fork of Vyatta, a Linux-based network operating system that provides software-based network routing, firewall, and VPN functionality.", "description": "VyOS is a community fork of Vyatta, a Linux-based network operating system that provides software-based network routing, firewall, and VPN functionality. VyOS has a subscription LTS version and a community rolling release. The latest version in this appliance is in the rolling release track.",
"vendor_name": "Linux", "vendor_name": "Linux",
"vendor_url": "http://vyos.net/", "vendor_url": "http://vyos.net/",
"documentation_url": "http://vyos.net/wiki/User_Guide", "documentation_url": "http://vyos.net/wiki/User_Guide",
@ -9,6 +9,7 @@
"product_url": "http://vyos.net/", "product_url": "http://vyos.net/",
"registry_version": 3, "registry_version": 3,
"status": "stable", "status": "stable",
"symbol": "vyos.png",
"maintainer": "GNS3 Team", "maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net", "maintainer_email": "developers@gns3.net",
"usage": "Default username/password is vyos/vyos. At first boot the router will start from the cdrom, login and then type install system and follow the instructions.", "usage": "Default username/password is vyos/vyos. At first boot the router will start from the cdrom, login and then type install system and follow the instructions.",
@ -24,10 +25,10 @@
}, },
"images": [ "images": [
{ {
"filename": "vyos-1.2.0-beta1-amd64.iso", "filename": "vyos-1.2.0-rolling+201906040337-amd64.iso",
"version": "1.2.0-beta1", "version": "1.2.0-2019-06-04",
"md5sum": "c2906532d4c7a0d29b61e8eab326d6c7", "md5sum": "2a1342985b2a9edfef9f66310ce11a92",
"filesize": 243269632, "filesize": 427819008,
"download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/", "download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/",
"direct_download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/vyos-1.2.0-beta1-amd64.iso" "direct_download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/vyos-1.2.0-beta1-amd64.iso"
}, },
@ -74,10 +75,10 @@
], ],
"versions": [ "versions": [
{ {
"name": "1.2.0-beta1", "name": "1.2.0-2019-06-04",
"images": { "images": {
"hda_disk_image": "empty8G.qcow2", "hda_disk_image": "empty8G.qcow2",
"cdrom_image": "vyos-1.2.0-beta1-amd64.iso" "cdrom_image": "vyos-1.2.0-rolling+201906040337-amd64.iso"
} }
}, },
{ {

View File

@ -492,15 +492,17 @@ class BaseManager:
# Windows path should not be send to a unix server # Windows path should not be send to a unix server
if not sys.platform.startswith("win"): if not sys.platform.startswith("win"):
if re.match(r"^[A-Z]:", path) is not None: if re.match(r"^[A-Z]:", path) is not None:
raise NodeError("{} is not allowed on this remote server. Please use only a filename in {}.".format(path, img_directory)) raise NodeError("{} is not allowed on this remote server. Please only use a file from '{}'".format(path, img_directory))
if not os.path.isabs(path): if not os.path.isabs(path):
for directory in valid_directory_prefices: for directory in valid_directory_prefices:
log.debug("Searching for image '{}' in '{}'".format(orig_path, directory))
path = self._recursive_search_file_in_directory(directory, orig_path) path = self._recursive_search_file_in_directory(directory, orig_path)
if path: if path:
return force_unix_path(path) return force_unix_path(path)
# Not found we try the default directory # Not found we try the default directory
log.debug("Searching for image '{}' in default directory".format(orig_path))
s = os.path.split(orig_path) s = os.path.split(orig_path)
path = force_unix_path(os.path.join(img_directory, *s)) path = force_unix_path(os.path.join(img_directory, *s))
if os.path.exists(path): if os.path.exists(path):
@ -509,6 +511,7 @@ class BaseManager:
# For local server we allow using absolute path outside image directory # For local server we allow using absolute path outside image directory
if server_config.getboolean("local", False) is True: if server_config.getboolean("local", False) is True:
log.debug("Searching for '{}'".format(orig_path))
path = force_unix_path(path) path = force_unix_path(path)
if os.path.exists(path): if os.path.exists(path):
return path return path
@ -517,11 +520,12 @@ class BaseManager:
# Check to see if path is an absolute path to a valid directory # Check to see if path is an absolute path to a valid directory
path = force_unix_path(path) path = force_unix_path(path)
for directory in valid_directory_prefices: for directory in valid_directory_prefices:
log.debug("Searching for image '{}' in '{}'".format(orig_path, directory))
if os.path.commonprefix([directory, path]) == directory: if os.path.commonprefix([directory, path]) == directory:
if os.path.exists(path): if os.path.exists(path):
return path return path
raise ImageMissingError(orig_path) raise ImageMissingError(orig_path)
raise NodeError("{} is not allowed on this remote server. Please use only a filename in {}.".format(path, img_directory)) raise NodeError("{} is not allowed on this remote server. Please only use a file from '{}'".format(path, img_directory))
def _recursive_search_file_in_directory(self, directory, searched_file): def _recursive_search_file_in_directory(self, directory, searched_file):
""" """

View File

@ -248,27 +248,32 @@ class DockerVM(BaseNode):
# We mount our own etc/network # We mount our own etc/network
try: try:
network_config = self._create_network_config() self._create_network_config()
except OSError as e: except OSError as e:
raise DockerError("Could not create network config in the container: {}".format(e)) raise DockerError("Could not create network config in the container: {}".format(e))
binds.append("{}:/gns3volumes/etc/network:rw".format(network_config)) volumes = ["/etc/network"]
self._volumes = ["/etc/network"] volumes.extend((image_info.get("Config", {}).get("Volumes") or {}).keys())
volumes = list((image_info.get("Config", {}).get("Volumes") or {}).keys())
for volume in self._extra_volumes: for volume in self._extra_volumes:
if not volume.strip() or volume[0] != "/" or volume.find("..") >= 0: if not volume.strip() or volume[0] != "/" or volume.find("..") >= 0:
raise DockerError("Persistent volume '{}' has invalid format. It must start with a '/' and not contain '..'.".format(volume)) raise DockerError("Persistent volume '{}' has invalid format. It must start with a '/' and not contain '..'.".format(volume))
volumes.extend(self._extra_volumes) volumes.extend(self._extra_volumes)
self._volumes = []
# define lambdas for validation checks # define lambdas for validation checks
nf = lambda x: re.sub(r"//+", "/", (x if x.endswith("/") else x + "/")) nf = lambda x: re.sub(r"//+", "/", (x if x.endswith("/") else x + "/"))
incompatible = lambda v1, v2: nf(v1).startswith(nf(v2)) or nf(v2).startswith(nf(v1)) generalises = lambda v1, v2: nf(v2).startswith(nf(v1))
for volume in volumes: for volume in volumes:
if [ v for v in self._volumes if incompatible(v, volume) ] : # remove any mount that is equal or more specific, then append this one
raise DockerError("Duplicate persistent volume {} detected.\n\nVolumes specified in docker image as well as user specified persistent volumes must be unique.".format(volume)) self._volumes = list(filter(lambda v: not generalises(volume, v), self._volumes))
# if there is nothing more general, append this mount
if not [ v for v in self._volumes if generalises(v, volume) ] :
self._volumes.append(volume)
for volume in self._volumes:
source = os.path.join(self.working_dir, os.path.relpath(volume, "/")) source = os.path.join(self.working_dir, os.path.relpath(volume, "/"))
os.makedirs(source, exist_ok=True) os.makedirs(source, exist_ok=True)
binds.append("{}:/gns3volumes{}".format(source, volume)) binds.append("{}:/gns3volumes{}".format(source, volume))
self._volumes.append(volume)
return binds return binds

View File

@ -1525,6 +1525,8 @@ class Router(BaseNode):
:param new_name: new name string :param new_name: new name string
""" """
await self._hypervisor.send('vm rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
# change the hostname in the startup-config # change the hostname in the startup-config
if os.path.isfile(self.startup_config_path): if os.path.isfile(self.startup_config_path):
try: try:
@ -1547,7 +1549,6 @@ class Router(BaseNode):
except OSError as e: except OSError as e:
raise DynamipsError("Could not amend the configuration {}: {}".format(self.private_config_path, e)) raise DynamipsError("Could not amend the configuration {}: {}".format(self.private_config_path, e))
await self._hypervisor.send('vm rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
log.info('Router "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name, id=self._id, new_name=new_name)) log.info('Router "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name, id=self._id, new_name=new_name))
self._name = new_name self._name = new_name

View File

@ -30,6 +30,7 @@ from ...utils.asyncio import subprocess_check_output
from ..base_manager import BaseManager from ..base_manager import BaseManager
from .qemu_error import QemuError from .qemu_error import QemuError
from .qemu_vm import QemuVM from .qemu_vm import QemuVM
from .utils.guest_cid import get_next_guest_cid
import logging import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -40,6 +41,33 @@ class Qemu(BaseManager):
_NODE_CLASS = QemuVM _NODE_CLASS = QemuVM
_NODE_TYPE = "qemu" _NODE_TYPE = "qemu"
def __init__(self):
super().__init__()
self._guest_cid_lock = asyncio.Lock()
async def create_node(self, *args, **kwargs):
"""
Creates a new Qemu VM.
:returns: QemuVM instance
"""
node = await super().create_node(*args, **kwargs)
# allocate a guest console ID (CID)
if node.console_type != "none" and node.console:
# by default, the guest CID is equal to the console port
node.guest_cid = node.console
else:
# otherwise pick a guest CID if no console port is configured
async with self._guest_cid_lock:
# wait for a node to be completely created before adding a new one
# this is important otherwise we allocate the same guest ID
# when creating multiple Qemu VMs at the same time
node.guest_cid = get_next_guest_cid(self.nodes)
return node
@staticmethod @staticmethod
async def get_kvm_archs(): async def get_kvm_archs():
""" """

View File

@ -36,6 +36,7 @@ import json
from gns3server.utils import parse_version from gns3server.utils import parse_version
from gns3server.utils.asyncio import subprocess_check_output, cancellable_wait_run_in_executor from gns3server.utils.asyncio import subprocess_check_output, cancellable_wait_run_in_executor
from .qemu_error import QemuError from .qemu_error import QemuError
from .utils.qcow2 import Qcow2, Qcow2Error
from ..adapters.ethernet_adapter import EthernetAdapter from ..adapters.ethernet_adapter import EthernetAdapter
from ..nios.nio_udp import NIOUDP from ..nios.nio_udp import NIOUDP
from ..nios.nio_tap import NIOTAP from ..nios.nio_tap import NIOTAP
@ -43,7 +44,6 @@ from ..base_node import BaseNode
from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS from ...schemas.qemu import QEMU_OBJECT_SCHEMA, QEMU_PLATFORMS
from ...utils.asyncio import monitor_process from ...utils.asyncio import monitor_process
from ...utils.images import md5sum from ...utils.images import md5sum
from .qcow2 import Qcow2, Qcow2Error
from ...utils import macaddress_to_int, int_to_macaddress from ...utils import macaddress_to_int, int_to_macaddress
@ -80,6 +80,7 @@ class QemuVM(BaseNode):
self._qemu_img_stdout_file = "" self._qemu_img_stdout_file = ""
self._execute_lock = asyncio.Lock() self._execute_lock = asyncio.Lock()
self._local_udp_tunnels = {} self._local_udp_tunnels = {}
self._guest_cid = None
# QEMU VM settings # QEMU VM settings
if qemu_path: if qemu_path:
@ -124,6 +125,26 @@ class QemuVM(BaseNode):
self.adapters = 1 # creates 1 adapter by default self.adapters = 1 # creates 1 adapter by default
log.info('QEMU VM "{name}" [{id}] has been created'.format(name=self._name, id=self._id)) log.info('QEMU VM "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
@property
def guest_cid(self):
"""
Returns the CID (console ID) which is an unique identifier between 3 and 65535
:returns: integer between 3 and 65535
"""
return self._guest_cid
@guest_cid.setter
def guest_cid(self, guest_cid):
"""
Set the CID (console ID) which is an unique identifier between 3 and 65535
:returns: integer between 3 and 65535
"""
self._guest_cid = guest_cid
@property @property
def monitor(self): def monitor(self):
""" """
@ -413,10 +434,31 @@ class QemuVM(BaseNode):
:param cdrom_image: QEMU cdrom image path :param cdrom_image: QEMU cdrom image path
""" """
if cdrom_image:
self._cdrom_image = self.manager.get_abs_image_path(cdrom_image, self.project.path) self._cdrom_image = self.manager.get_abs_image_path(cdrom_image, self.project.path)
log.info('QEMU VM "{name}" [{id}] has set the QEMU cdrom image path to {cdrom_image}'.format(name=self._name, log.info('QEMU VM "{name}" [{id}] has set the QEMU cdrom image path to {cdrom_image}'.format(name=self._name,
id=self._id, id=self._id,
cdrom_image=self._cdrom_image)) cdrom_image=self._cdrom_image))
else:
self._cdrom_image = ""
async def update_cdrom_image(self):
"""
Update the cdrom image path for the Qemu guest OS
"""
if self.is_running():
if self._cdrom_image:
self._cdrom_option() # this will check the cdrom image is accessible
await self._control_vm("eject -f ide1-cd0")
await self._control_vm("change ide1-cd0 {}".format(self._cdrom_image))
log.info('QEMU VM "{name}" [{id}] has changed the cdrom image path to {cdrom_image}'.format(name=self._name,
id=self._id,
cdrom_image=self._cdrom_image))
else:
await self._control_vm("eject -f ide1-cd0")
log.info('QEMU VM "{name}" [{id}] has ejected the cdrom image'.format(name=self._name, id=self._id))
@property @property
def bios_image(self): def bios_image(self):
@ -972,6 +1014,7 @@ class QemuVM(BaseNode):
await self._control_vm_commands(set_link_commands) await self._control_vm_commands(set_link_commands)
try: try:
if self.is_running():
await self.start_wrap_console() await self.start_wrap_console()
except OSError as e: except OSError as e:
raise QemuError("Could not start Telnet QEMU console {}\n".format(e)) raise QemuError("Could not start Telnet QEMU console {}\n".format(e))
@ -1916,7 +1959,8 @@ class QemuVM(BaseNode):
additional_options = additional_options.replace("%vm-id%", self._id) additional_options = additional_options.replace("%vm-id%", self._id)
additional_options = additional_options.replace("%project-id%", self.project.id) additional_options = additional_options.replace("%project-id%", self.project.id)
additional_options = additional_options.replace("%project-path%", '"' + self.project.path.replace('"', '\\"') + '"') additional_options = additional_options.replace("%project-path%", '"' + self.project.path.replace('"', '\\"') + '"')
if self._console: additional_options = additional_options.replace("%guest-cid%", str(self._guest_cid))
if self._console_type != "none" and self._console:
additional_options = additional_options.replace("%console-port%", str(self._console)) additional_options = additional_options.replace("%console-port%", str(self._console))
command = [self.qemu_path] command = [self.qemu_path]
command.extend(["-name", self._name]) command.extend(["-name", self._name])

View File

@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ..qemu_error import QemuError
import logging
log = logging.getLogger(__name__)
def get_next_guest_cid(nodes):
"""
Calculates free guest_id from given nodes
:param nodes:
:raises QemuError when exceeds number
:return: integer first free cid
"""
used = set([n.guest_cid for n in nodes])
pool = set(range(3, 65535))
try:
return (pool - used).pop()
except KeyError:
raise QemuError("Cannot create a new Qemu VM (limit of 65535 guest ID on one host reached)")

View File

@ -175,7 +175,6 @@ class Controller:
try: try:
if not os.path.exists(self._config_file): if not os.path.exists(self._config_file):
await self._import_gns3_gui_conf()
self._config_loaded = True self._config_loaded = True
self.save() self.save()
with open(self._config_file) as f: with open(self._config_file) as f:
@ -254,118 +253,6 @@ class Controller:
os.makedirs(images_path, exist_ok=True) os.makedirs(images_path, exist_ok=True)
return images_path return images_path
async def _import_gns3_gui_conf(self):
"""
Import old config from GNS3 GUI
"""
config_file = os.path.join(os.path.dirname(self._config_file), "gns3_gui.conf")
if os.path.exists(config_file):
with open(config_file) as f:
settings = json.load(f)
server_settings = settings.get("Servers", {})
for remote in server_settings.get("remote_servers", []):
try:
await self.add_compute(host=remote.get("host", "localhost"),
port=remote.get("port", 3080),
protocol=remote.get("protocol", "http"),
name=remote.get("url"),
user=remote.get("user"),
password=remote.get("password"))
except aiohttp.web.HTTPConflict:
pass # if the server is broken we skip it
if "vm" in server_settings:
vmname = None
vm_settings = server_settings["vm"]
if vm_settings["virtualization"] == "VMware":
engine = "vmware"
vmname = vm_settings.get("vmname", "")
elif vm_settings["virtualization"] == "VirtualBox":
engine = "virtualbox"
vmname = vm_settings.get("vmname", "")
else:
engine = "remote"
# In case of remote server we match the compute with url parameter
for compute in self._computes.values():
if compute.host == vm_settings.get("remote_vm_host") and compute.port == vm_settings.get("remote_vm_port"):
vmname = compute.name
if vm_settings.get("auto_stop", True):
when_exit = "stop"
else:
when_exit = "keep"
self.gns3vm.settings = {
"engine": engine,
"enable": vm_settings.get("auto_start", False),
"when_exit": when_exit,
"headless": vm_settings.get("headless", False),
"vmname": vmname
}
vms = []
for vm in settings.get("Qemu", {}).get("vms", []):
vm["template_type"] = "qemu"
vms.append(vm)
for vm in settings.get("IOU", {}).get("devices", []):
vm["template_type"] = "iou"
vms.append(vm)
for vm in settings.get("Docker", {}).get("containers", []):
vm["template_type"] = "docker"
vms.append(vm)
for vm in settings.get("Builtin", {}).get("cloud_nodes", []):
vm["template_type"] = "cloud"
vms.append(vm)
for vm in settings.get("Builtin", {}).get("ethernet_switches", []):
vm["template_type"] = "ethernet_switch"
vms.append(vm)
for vm in settings.get("Builtin", {}).get("ethernet_hubs", []):
vm["template_type"] = "ethernet_hub"
vms.append(vm)
for vm in settings.get("Dynamips", {}).get("routers", []):
vm["template_type"] = "dynamips"
vms.append(vm)
for vm in settings.get("VMware", {}).get("vms", []):
vm["template_type"] = "vmware"
vms.append(vm)
for vm in settings.get("VirtualBox", {}).get("vms", []):
vm["template_type"] = "virtualbox"
vms.append(vm)
for vm in settings.get("VPCS", {}).get("nodes", []):
vm["template_type"] = "vpcs"
vms.append(vm)
for vm in settings.get("TraceNG", {}).get("nodes", []):
vm["template_type"] = "traceng"
vms.append(vm)
for vm in vms:
# remove deprecated properties
for prop in vm.copy():
if prop in ["enable_remote_console", "use_ubridge", "acpi_shutdown"]:
del vm[prop]
# remove deprecated default_symbol and hover_symbol
# and set symbol if not present
deprecated = ["default_symbol", "hover_symbol"]
if len([prop for prop in vm.keys() if prop in deprecated]) > 0:
if "default_symbol" in vm.keys():
del vm["default_symbol"]
if "hover_symbol" in vm.keys():
del vm["hover_symbol"]
if "symbol" not in vm.keys():
vm["symbol"] = ":/symbols/computer.svg"
vm.setdefault("template_id", str(uuid.uuid4()))
try:
template = Template(vm["template_id"], vm)
template.__json__() # Check if loaded without error
self.template_manager.templates[template.id] = template
except KeyError as e:
# template data is not complete (missing name or type)
log.warning("Cannot load template {} ('{}'): missing key {}".format(vm["template_id"], vm.get("name", "unknown"), e))
continue
async def add_compute(self, compute_id=None, name=None, force=False, connect=True, **kwargs): async def add_compute(self, compute_id=None, name=None, force=False, connect=True, **kwargs):
""" """
Add a server to the dictionary of computes controlled by this controller Add a server to the dictionary of computes controlled by this controller

View File

@ -248,7 +248,10 @@ class HyperVGNS3VM(BaseGNS3VM):
vnics = self._get_vm_resources(self._vm, 'Msvm_SyntheticEthernetPortSettingData') vnics = self._get_vm_resources(self._vm, 'Msvm_SyntheticEthernetPortSettingData')
while True: while True:
for port in ports: for port in ports:
try:
vnic = [v for v in vnics if port.Parent == v.path_()][0] vnic = [v for v in vnics if port.Parent == v.path_()][0]
except IndexError:
continue
config = vnic.associators(wmi_result_class='Msvm_GuestNetworkAdapterConfiguration') config = vnic.associators(wmi_result_class='Msvm_GuestNetworkAdapterConfiguration')
ip_addresses = config[0].IPAddresses ip_addresses = config[0].IPAddresses
for ip_address in ip_addresses: for ip_address in ip_addresses:

View File

@ -22,10 +22,13 @@ import uuid
import shutil import shutil
import zipfile import zipfile
import aiohttp import aiohttp
import aiofiles
import itertools import itertools
import tempfile
from .topology import load_topology from .topology import load_topology
from ..utils.asyncio import wait_run_in_executor from ..utils.asyncio import wait_run_in_executor
from ..utils.asyncio import aiozipstream
import logging import logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -159,8 +162,13 @@ async def import_project(controller, project_id, stream, location=None, name=Non
json.dump(topology, f, indent=4) json.dump(topology, f, indent=4)
os.remove(os.path.join(path, "project.gns3")) os.remove(os.path.join(path, "project.gns3"))
if os.path.exists(os.path.join(path, "images")): images_path = os.path.join(path, "images")
_import_images(controller, path) if os.path.exists(images_path):
await _import_images(controller, images_path)
snapshots_path = os.path.join(path, "snapshots")
if os.path.exists(snapshots_path):
await _import_snapshots(snapshots_path, project_name, project_id)
project = await controller.load_project(dot_gns3_path, load=False) project = await controller.load_project(dot_gns3_path, load=False)
return project return project
@ -215,13 +223,13 @@ async def _upload_file(compute, project_id, file_path, path):
await compute.http_query("POST", path, f, timeout=None) await compute.http_query("POST", path, f, timeout=None)
def _import_images(controller, path): async def _import_images(controller, images_path):
""" """
Copy images to the images directory or delete them if they already exists. Copy images to the images directory or delete them if they already exists.
""" """
image_dir = controller.images_path() image_dir = controller.images_path()
root = os.path.join(path, "images") root = images_path
for (dirpath, dirnames, filenames) in os.walk(root, followlinks=False): for (dirpath, dirnames, filenames) in os.walk(root, followlinks=False):
for filename in filenames: for filename in filenames:
path = os.path.join(dirpath, filename) path = os.path.join(dirpath, filename)
@ -229,4 +237,54 @@ def _import_images(controller, path):
continue continue
dst = os.path.join(image_dir, os.path.relpath(path, root)) dst = os.path.join(image_dir, os.path.relpath(path, root))
os.makedirs(os.path.dirname(dst), exist_ok=True) os.makedirs(os.path.dirname(dst), exist_ok=True)
shutil.move(path, dst) await wait_run_in_executor(shutil.move, path, dst)
async def _import_snapshots(snapshots_path, project_name, project_id):
"""
Import the snapshots and update their project name and ID to be the same as the main project.
"""
for snapshot in os.listdir(snapshots_path):
if not snapshot.endswith(".gns3project"):
continue
snapshot_path = os.path.join(snapshots_path, snapshot)
with tempfile.TemporaryDirectory(dir=snapshots_path) as tmpdir:
# extract everything to a temporary directory
try:
with open(snapshot_path, "rb") as f:
with zipfile.ZipFile(f) as zip_file:
await wait_run_in_executor(zip_file.extractall, tmpdir)
except OSError as e:
raise aiohttp.web.HTTPConflict(text="Cannot open snapshot '{}': {}".format(os.path.basename(snapshot), e))
except zipfile.BadZipFile:
raise aiohttp.web.HTTPConflict(text="Cannot extract files from snapshot '{}': not a GNS3 project (invalid zip)".format(os.path.basename(snapshot)))
# patch the topology with the correct project name and ID
try:
topology_file_path = os.path.join(tmpdir, "project.gns3")
with open(topology_file_path, encoding="utf-8") as f:
topology = json.load(f)
topology["name"] = project_name
topology["project_id"] = project_id
with open(topology_file_path, "w+", encoding="utf-8") as f:
json.dump(topology, f, indent=4, sort_keys=True)
except OSError as e:
raise aiohttp.web.HTTPConflict(text="Cannot update snapshot '{}': the project.gns3 file cannot be modified: {}".format(os.path.basename(snapshot), e))
except (ValueError, KeyError):
raise aiohttp.web.HTTPConflict(text="Cannot update snapshot '{}': the project.gns3 file is corrupted".format(os.path.basename(snapshot)))
# write everything back to the original snapshot file
try:
with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
for root, dirs, files in os.walk(tmpdir, topdown=True, followlinks=False):
for file in files:
path = os.path.join(root, file)
zstream.write(path, os.path.relpath(path, tmpdir))
async with aiofiles.open(snapshot_path, 'wb+') as f:
async for chunk in zstream:
await f.write(chunk)
except OSError as e:
raise aiohttp.web.HTTPConflict(text="Cannot update snapshot '{}': the snapshot cannot be recreated: {}".format(os.path.basename(snapshot), e))

View File

@ -187,7 +187,7 @@ class Node:
if not os.path.isabs(path): if not os.path.isabs(path):
path = os.path.join(self.project.controller.configs_path(), path) path = os.path.join(self.project.controller.configs_path(), path)
try: try:
with open(path, encoding="utf-8") as f: with open(path, encoding="utf-8", errors="ignore") as f:
return f.read() return f.read()
except OSError: except OSError:
return None return None
@ -388,7 +388,6 @@ class Node:
# When updating properties used only on controller we don't need to call the compute # When updating properties used only on controller we don't need to call the compute
update_compute = False update_compute = False
old_json = self.__json__() old_json = self.__json__()
compute_properties = None compute_properties = None
@ -402,6 +401,8 @@ class Node:
if prop == "properties": if prop == "properties":
compute_properties = kwargs[prop] compute_properties = kwargs[prop]
else: else:
if prop == "name" and self.node_type == "dynamips" and self.status == "started":
raise aiohttp.web.HTTPConflict(text="Sorry, it is not possible rename of a Dynamips node that is already powered on")
setattr(self, prop, kwargs[prop]) setattr(self, prop, kwargs[prop])
if compute_properties and "custom_adapters" in compute_properties: if compute_properties and "custom_adapters" in compute_properties:

View File

@ -1032,6 +1032,7 @@ class Project:
except OSError as e: except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not write topology: {}".format(e)) raise aiohttp.web.HTTPInternalServerError(text="Could not write topology: {}".format(e))
@open_required
async def start_all(self): async def start_all(self):
""" """
Start all nodes Start all nodes
@ -1041,6 +1042,7 @@ class Project:
pool.append(node.start) pool.append(node.start)
await pool.join() await pool.join()
@open_required
async def stop_all(self): async def stop_all(self):
""" """
Stop all nodes Stop all nodes
@ -1050,6 +1052,7 @@ class Project:
pool.append(node.stop) pool.append(node.stop)
await pool.join() await pool.join()
@open_required
async def suspend_all(self): async def suspend_all(self):
""" """
Suspend all nodes Suspend all nodes
@ -1059,6 +1062,7 @@ class Project:
pool.append(node.suspend) pool.append(node.suspend)
await pool.join() await pool.join()
@open_required
async def duplicate_node(self, node, x, y, z): async def duplicate_node(self, node, x, y, z):
""" """
Duplicate a node Duplicate a node

View File

@ -128,17 +128,18 @@ def load_topology(path):
raise aiohttp.web.HTTPConflict(text="Could not load topology {}: {}".format(path, str(e))) raise aiohttp.web.HTTPConflict(text="Could not load topology {}: {}".format(path, str(e)))
if topo.get("revision", 0) > GNS3_FILE_FORMAT_REVISION: if topo.get("revision", 0) > GNS3_FILE_FORMAT_REVISION:
raise aiohttp.web.HTTPConflict(text="This project is designed for a more recent version of GNS3 please update GNS3 to version {} or later".format(topo["version"])) raise aiohttp.web.HTTPConflict(text="This project was created with more recent version of GNS3 (file revision: {}). Please upgrade GNS3 to version {} or later".format(topo["revision"], topo["version"]))
changed = False changed = False
if "revision" not in topo or topo["revision"] < GNS3_FILE_FORMAT_REVISION: if "revision" not in topo or topo["revision"] < GNS3_FILE_FORMAT_REVISION:
# If it's an old GNS3 file we need to convert it # Convert the topology if this is an old one but backup the file first
# first we backup the file
try: try:
shutil.copy(path, path + ".backup{}".format(topo.get("revision", 0))) shutil.copy(path, path + ".backup{}".format(topo.get("revision", 0)))
except (OSError) as e: except OSError as e:
raise aiohttp.web.HTTPConflict(text="Can't write backup of the topology {}: {}".format(path, str(e))) raise aiohttp.web.HTTPConflict(text="Can't write backup of the topology {}: {}".format(path, str(e)))
changed = True changed = True
# update the version because we converted the topology
topo["version"] = __version__
if "revision" not in topo or topo["revision"] < 5: if "revision" not in topo or topo["revision"] < 5:
topo = _convert_1_3_later(topo, path) topo = _convert_1_3_later(topo, path)

View File

@ -58,7 +58,7 @@ class CrashReport:
Report crash to a third party service Report crash to a third party service
""" """
DSN = "https://6b4544f8cc1b42f798941c844ca6dfa6:885563130aca4d8e87ba097d098229a3@sentry.io/38482" DSN = "https://b3d56605ec7540fdb05795d2666fde28:608a6c71d16d42468f0fc1810d948c46@sentry.io/38482"
if hasattr(sys, "frozen"): if hasattr(sys, "frozen"):
cacert = get_resource("cacert.pem") cacert = get_resource("cacert.pem")
if cacert is not None and os.path.isfile(cacert): if cacert is not None and os.path.isfile(cacert):

View File

@ -116,7 +116,7 @@ class QEMUHandler:
description="Update a Qemu VM instance", description="Update a Qemu VM instance",
input=QEMU_UPDATE_SCHEMA, input=QEMU_UPDATE_SCHEMA,
output=QEMU_OBJECT_SCHEMA) output=QEMU_OBJECT_SCHEMA)
def update(request, response): async def update(request, response):
qemu_manager = Qemu.instance() qemu_manager = Qemu.instance()
vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"]) vm = qemu_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
@ -125,6 +125,9 @@ class QEMUHandler:
for name, value in request.json.items(): for name, value in request.json.items():
if hasattr(vm, name) and getattr(vm, name) != value: if hasattr(vm, name) and getattr(vm, name) != value:
setattr(vm, name, value) setattr(vm, name, value)
if name == "cdrom_image":
# let the guest know about the new cdrom image
await vm.update_cdrom_image()
vm.updated() vm.updated()
response.json(vm) response.json(vm)

View File

@ -101,6 +101,9 @@ def parse_arguments(argv):
parser.add_argument("-d", "--debug", action="store_true", help="show debug logs") parser.add_argument("-d", "--debug", action="store_true", help="show debug logs")
parser.add_argument("--shell", action="store_true", help="start a shell inside the server (debugging purpose only you need to install ptpython before)") parser.add_argument("--shell", action="store_true", help="start a shell inside the server (debugging purpose only you need to install ptpython before)")
parser.add_argument("--log", help="send output to logfile instead of console") parser.add_argument("--log", help="send output to logfile instead of console")
parser.add_argument("--logmaxsize", help="maximum logfile size in bytes (default is 10MB)")
parser.add_argument("--logbackupcount", help="number of historical log files to keep (default is 10)")
parser.add_argument("--logcompression", action="store_true", help="compress inactive (historical) logs")
parser.add_argument("--daemon", action="store_true", help="start as a daemon") parser.add_argument("--daemon", action="store_true", help="start as a daemon")
parser.add_argument("--pid", help="store process pid") parser.add_argument("--pid", help="store process pid")
parser.add_argument("--profile", help="Settings profile (blank will use default settings files)") parser.add_argument("--profile", help="Settings profile (blank will use default settings files)")
@ -123,7 +126,10 @@ def parse_arguments(argv):
"allow": config.getboolean("allow_remote_console", False), "allow": config.getboolean("allow_remote_console", False),
"quiet": config.getboolean("quiet", False), "quiet": config.getboolean("quiet", False),
"debug": config.getboolean("debug", False), "debug": config.getboolean("debug", False),
"logfile": config.getboolean("logfile", "") "logfile": config.getboolean("logfile", ""),
"logmaxsize": config.get("logmaxsize", 10000000), # default is 10MB
"logbackupcount": config.get("logbackupcount", 10),
"logcompression": config.getboolean("logcompression", False)
} }
parser.set_defaults(**defaults) parser.set_defaults(**defaults)
@ -208,7 +214,8 @@ def run():
if args.debug: if args.debug:
level = logging.DEBUG level = logging.DEBUG
user_log = init_logger(level, logfile=args.log, quiet=args.quiet) user_log = init_logger(level, logfile=args.log, max_bytes=int(args.logmaxsize), backup_count=int(args.logbackupcount),
compression=args.logcompression, quiet=args.quiet)
user_log.info("GNS3 server version {}".format(__version__)) user_log.info("GNS3 server version {}".format(__version__))
current_year = datetime.date.today().year current_year = datetime.date.today().year
user_log.info("Copyright (c) 2007-{} GNS3 Technologies Inc.".format(current_year)) user_log.info("Copyright (c) 2007-{} GNS3 Technologies Inc.".format(current_year))

View File

@ -23,27 +23,32 @@ from .port import PORT_OBJECT_SCHEMA
CLOUD_TEMPLATE_PROPERTIES = { CLOUD_TEMPLATE_PROPERTIES = {
"ports_mapping": { "ports_mapping": {
"type": "array", "type": "array",
"items": [PORT_OBJECT_SCHEMA] "items": [PORT_OBJECT_SCHEMA],
"default": []
}, },
"remote_console_host": { "remote_console_host": {
"description": "Remote console host or IP", "description": "Remote console host or IP",
"type": ["string"], "type": ["string"],
"minLength": 1 "minLength": 1,
"default": "127.0.0.1"
}, },
"remote_console_port": { "remote_console_port": {
"description": "Console TCP port", "description": "Console TCP port",
"minimum": 1, "minimum": 1,
"maximum": 65535, "maximum": 65535,
"type": "integer" "type": "integer",
"default": 23
}, },
"remote_console_type": { "remote_console_type": {
"description": "Console type", "description": "Console type",
"enum": ["telnet", "vnc", "spice", "http", "https", "none"] "enum": ["telnet", "vnc", "spice", "http", "https", "none"],
"default": "none"
}, },
"remote_console_http_path": { "remote_console_http_path": {
"description": "Path of the remote web interface", "description": "Path of the remote web interface",
"type": "string", "type": "string",
"minLength": 1 "minLength": 1,
"default": "/"
}, },
} }

View File

@ -28,6 +28,7 @@ IOU_TEMPLATE_PROPERTIES = {
"usage": { "usage": {
"description": "How to use the IOU VM", "description": "How to use the IOU VM",
"type": "string", "type": "string",
"default": ""
}, },
"ethernet_adapters": { "ethernet_adapters": {
"description": "Number of ethernet adapters", "description": "Number of ethernet adapters",

View File

@ -253,5 +253,5 @@ class Hypervisor(UBridgeHypervisor):
command = [self._path] command = [self._path]
command.extend(["-H", "{}:{}".format(self._host, self._port)]) command.extend(["-H", "{}:{}".format(self._host, self._port)])
if log.getEffectiveLevel() == logging.DEBUG: if log.getEffectiveLevel() == logging.DEBUG:
command.extend(["-d", "2"]) command.extend(["-d", "1"])
return command return command

View File

@ -120,7 +120,7 @@ def default_images_directory(type):
def images_directories(type): def images_directories(type):
""" """
Return all directory where we will look for images Return all directories where we will look for images
by priority by priority
:param type: Type of emulator :param type: Type of emulator

View File

@ -23,7 +23,7 @@
# or negative for a release candidate or beta (after the base version # or negative for a release candidate or beta (after the base version
# number has been incremented) # number has been incremented)
__version__ = "2.2.0dev12" __version__ = "2.2.0dev15"
__version_info__ = (2, 2, 0, 99) __version_info__ = (2, 2, 0, 99)
# If it's a git checkout try to add the commit # If it's a git checkout try to add the commit

View File

@ -21,6 +21,11 @@
import logging import logging
import sys import sys
import os
import shutil
import gzip
from logging.handlers import RotatingFileHandler
class ColouredFormatter(logging.Formatter): class ColouredFormatter(logging.Formatter):
@ -108,9 +113,37 @@ class LogFilter:
return 1 return 1
def init_logger(level, logfile=None, quiet=False): class CompressedRotatingFileHandler(RotatingFileHandler):
"""
Custom rotating file handler with compression support.
"""
def doRollover(self):
if self.stream:
self.stream.close()
if self.backupCount > 0:
for i in range(self.backupCount - 1, 0, -1):
sfn = "%s.%d.gz" % (self.baseFilename, i)
dfn = "%s.%d.gz" % (self.baseFilename, i + 1)
if os.path.exists(sfn):
if os.path.exists(dfn):
os.remove(dfn)
os.rename(sfn, dfn)
dfn = self.baseFilename + ".1.gz"
if os.path.exists(dfn):
os.remove(dfn)
with open(self.baseFilename, 'rb') as f_in, gzip.open(dfn, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
self.mode = 'w'
self.stream = self._open()
def init_logger(level, logfile=None, max_bytes=10000000, backup_count=10, compression=True, quiet=False):
if logfile and len(logfile) > 0: if logfile and len(logfile) > 0:
stream_handler = logging.FileHandler(logfile) if compression:
stream_handler = CompressedRotatingFileHandler(logfile, maxBytes=max_bytes, backupCount=backup_count)
else:
stream_handler = RotatingFileHandler(logfile, maxBytes=max_bytes, backupCount=backup_count)
stream_handler.formatter = ColouredFormatter("{asctime} {levelname} {filename}:{lineno} {message}", "%Y-%m-%d %H:%M:%S", "{") stream_handler.formatter = ColouredFormatter("{asctime} {levelname} {filename}:{lineno} {message}", "%Y-%m-%d %H:%M:%S", "{")
elif sys.platform.startswith("win"): elif sys.platform.startswith("win"):
stream_handler = WinStreamHandler(sys.stdout) stream_handler = WinStreamHandler(sys.stdout)

0
scripts/docker_dev_server.sh Normal file → Executable file
View File

View File

@ -99,7 +99,7 @@ def test_create(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")) "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network"))
], ],
"Privileged": True "Privileged": True
}, },
@ -138,7 +138,7 @@ def test_create_with_tag(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")) "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network"))
], ],
"Privileged": True "Privileged": True
}, },
@ -180,7 +180,7 @@ def test_create_vnc(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")), "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network")),
'/tmp/.X11-unix/:/tmp/.X11-unix/' '/tmp/.X11-unix/:/tmp/.X11-unix/'
], ],
"Privileged": True "Privileged": True
@ -296,7 +296,7 @@ def test_create_start_cmd(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")) "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network"))
], ],
"Privileged": True "Privileged": True
}, },
@ -396,7 +396,7 @@ def test_create_image_not_available(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")) "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network"))
], ],
"Privileged": True "Privileged": True
}, },
@ -439,7 +439,7 @@ def test_create_with_user(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")) "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network"))
], ],
"Privileged": True "Privileged": True
}, },
@ -509,8 +509,35 @@ def test_create_with_extra_volumes_duplicate_1_image(loop, project, manager):
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images: with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock: with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol/1"]) vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol/1"])
with pytest.raises(DockerError):
loop.run_until_complete(asyncio.ensure_future(vm.create())) loop.run_until_complete(asyncio.ensure_future(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network")),
"{}:/gns3volumes/vol/1".format(os.path.join(vm.working_dir, "vol", "1")),
],
"Privileged": True
},
"Volumes": {},
"NetworkDisabled": True,
"Name": "test",
"Hostname": "test",
"Image": "ubuntu:latest",
"Env": [
"container=docker",
"GNS3_MAX_ETHERNET=eth0",
"GNS3_VOLUMES=/etc/network:/vol/1"
],
"Entrypoint": ["/gns3/init.sh"],
"Cmd": ["/bin/sh"]
})
assert vm._cid == "e90e34656806"
def test_create_with_extra_volumes_duplicate_2_user(loop, project, manager): def test_create_with_extra_volumes_duplicate_2_user(loop, project, manager):
@ -521,8 +548,35 @@ def test_create_with_extra_volumes_duplicate_2_user(loop, project, manager):
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images: with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock: with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol/1", "/vol/1"]) vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol/1", "/vol/1"])
with pytest.raises(DockerError):
loop.run_until_complete(asyncio.ensure_future(vm.create())) loop.run_until_complete(asyncio.ensure_future(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network")),
"{}:/gns3volumes/vol/1".format(os.path.join(vm.working_dir, "vol", "1")),
],
"Privileged": True
},
"Volumes": {},
"NetworkDisabled": True,
"Name": "test",
"Hostname": "test",
"Image": "ubuntu:latest",
"Env": [
"container=docker",
"GNS3_MAX_ETHERNET=eth0",
"GNS3_VOLUMES=/etc/network:/vol/1"
],
"Entrypoint": ["/gns3/init.sh"],
"Cmd": ["/bin/sh"]
})
assert vm._cid == "e90e34656806"
def test_create_with_extra_volumes_duplicate_3_subdir(loop, project, manager): def test_create_with_extra_volumes_duplicate_3_subdir(loop, project, manager):
@ -533,8 +587,35 @@ def test_create_with_extra_volumes_duplicate_3_subdir(loop, project, manager):
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images: with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock: with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol/1/", "/vol"]) vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol/1/", "/vol"])
with pytest.raises(DockerError):
loop.run_until_complete(asyncio.ensure_future(vm.create())) loop.run_until_complete(asyncio.ensure_future(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network")),
"{}:/gns3volumes/vol".format(os.path.join(vm.working_dir, "vol")),
],
"Privileged": True
},
"Volumes": {},
"NetworkDisabled": True,
"Name": "test",
"Hostname": "test",
"Image": "ubuntu:latest",
"Env": [
"container=docker",
"GNS3_MAX_ETHERNET=eth0",
"GNS3_VOLUMES=/etc/network:/vol"
],
"Entrypoint": ["/gns3/init.sh"],
"Cmd": ["/bin/sh"]
})
assert vm._cid == "e90e34656806"
def test_create_with_extra_volumes_duplicate_4_backslash(loop, project, manager): def test_create_with_extra_volumes_duplicate_4_backslash(loop, project, manager):
@ -545,8 +626,111 @@ def test_create_with_extra_volumes_duplicate_4_backslash(loop, project, manager)
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images: with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock: with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol//", "/vol"]) vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/vol//", "/vol"])
with pytest.raises(DockerError):
loop.run_until_complete(asyncio.ensure_future(vm.create())) loop.run_until_complete(asyncio.ensure_future(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network")),
"{}:/gns3volumes/vol".format(os.path.join(vm.working_dir, "vol")),
],
"Privileged": True
},
"Volumes": {},
"NetworkDisabled": True,
"Name": "test",
"Hostname": "test",
"Image": "ubuntu:latest",
"Env": [
"container=docker",
"GNS3_MAX_ETHERNET=eth0",
"GNS3_VOLUMES=/etc/network:/vol"
],
"Entrypoint": ["/gns3/init.sh"],
"Cmd": ["/bin/sh"]
})
assert vm._cid == "e90e34656806"
def test_create_with_extra_volumes_duplicate_5_subdir_issue_1595(loop, project, manager):
response = {
"Id": "e90e34656806",
"Warnings": [],
}
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/etc"])
loop.run_until_complete(asyncio.ensure_future(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc".format(os.path.join(vm.working_dir, "etc")),
],
"Privileged": True
},
"Volumes": {},
"NetworkDisabled": True,
"Name": "test",
"Hostname": "test",
"Image": "ubuntu:latest",
"Env": [
"container=docker",
"GNS3_MAX_ETHERNET=eth0",
"GNS3_VOLUMES=/etc"
],
"Entrypoint": ["/gns3/init.sh"],
"Cmd": ["/bin/sh"]
})
assert vm._cid == "e90e34656806"
def test_create_with_extra_volumes_duplicate_6_subdir_issue_1595(loop, project, manager):
response = {
"Id": "e90e34656806",
"Warnings": [],
}
with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]) as mock_list_images:
with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock:
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu:latest", extra_volumes=["/etc/test", "/etc"])
loop.run_until_complete(asyncio.ensure_future(vm.create()))
mock.assert_called_with("POST", "containers/create", data={
"Tty": True,
"OpenStdin": True,
"StdinOnce": False,
"HostConfig":
{
"CapAdd": ["ALL"],
"Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc".format(os.path.join(vm.working_dir, "etc")),
],
"Privileged": True
},
"Volumes": {},
"NetworkDisabled": True,
"Name": "test",
"Hostname": "test",
"Image": "ubuntu:latest",
"Env": [
"container=docker",
"GNS3_MAX_ETHERNET=eth0",
"GNS3_VOLUMES=/etc"
],
"Entrypoint": ["/gns3/init.sh"],
"Cmd": ["/bin/sh"]
})
assert vm._cid == "e90e34656806"
def test_create_with_extra_volumes(loop, project, manager): def test_create_with_extra_volumes(loop, project, manager):
@ -572,7 +756,7 @@ def test_create_with_extra_volumes(loop, project, manager):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")), "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network")),
"{}:/gns3volumes/vol/1".format(os.path.join(vm.working_dir, "vol", "1")), "{}:/gns3volumes/vol/1".format(os.path.join(vm.working_dir, "vol", "1")),
"{}:/gns3volumes/vol/2".format(os.path.join(vm.working_dir, "vol", "2")), "{}:/gns3volumes/vol/2".format(os.path.join(vm.working_dir, "vol", "2")),
], ],
@ -795,7 +979,7 @@ def test_update(loop, vm):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")) "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network"))
], ],
"Privileged": True "Privileged": True
}, },
@ -864,7 +1048,7 @@ def test_update_running(loop, vm):
"CapAdd": ["ALL"], "CapAdd": ["ALL"],
"Binds": [ "Binds": [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")) "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network"))
], ],
"Privileged": True "Privileged": True
}, },
@ -1138,7 +1322,7 @@ def test_mount_binds(vm, tmpdir):
dst = os.path.join(vm.working_dir, "test/experimental") dst = os.path.join(vm.working_dir, "test/experimental")
assert vm._mount_binds(image_infos) == [ assert vm._mount_binds(image_infos) == [
"{}:/gns3:ro".format(get_resource("compute/docker/resources")), "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
"{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network")), "{}:/gns3volumes/etc/network".format(os.path.join(vm.working_dir, "etc", "network")),
"{}:/gns3volumes{}".format(dst, "/test/experimental") "{}:/gns3volumes{}".format(dst, "/test/experimental")
] ]

View File

@ -20,7 +20,7 @@ import pytest
import shutil import shutil
import asyncio import asyncio
from gns3server.compute.qemu.qcow2 import Qcow2, Qcow2Error from gns3server.compute.qemu.utils.qcow2 import Qcow2, Qcow2Error
def qemu_img(): def qemu_img():

View File

@ -106,97 +106,6 @@ def test_import_computes_1_x(controller, controller_config_path, async_run):
assert compute.password is None assert compute.password is None
def test_import_gns3vm_1_x(controller, controller_config_path, async_run):
"""
At first start the server should import the
gns3vm settings from the gns3_gui 1.X
"""
gns3_gui_conf = {
"Servers": {
"vm": {
"adjust_local_server_ip": True,
"auto_start": True,
"auto_stop": False,
"headless": True,
"remote_vm_host": "",
"remote_vm_password": "",
"remote_vm_port": 3080,
"remote_vm_protocol": "http",
"remote_vm_url": "",
"remote_vm_user": "",
"virtualization": "VMware",
"vmname": "GNS3 VM",
"vmx_path": "/Users/joe/Documents/Virtual Machines.localized/GNS3 VM.vmwarevm/GNS3 VM.vmx"
}
}
}
config_dir = os.path.dirname(controller_config_path)
os.makedirs(config_dir, exist_ok=True)
with open(os.path.join(config_dir, "gns3_gui.conf"), "w+") as f:
json.dump(gns3_gui_conf, f)
controller.gns3vm.settings["engine"] = None
async_run(controller._load_controller_settings())
assert controller.gns3vm.settings["engine"] == "vmware"
assert controller.gns3vm.settings["enable"]
assert controller.gns3vm.settings["headless"]
assert controller.gns3vm.settings["when_exit"] == "keep"
assert controller.gns3vm.settings["vmname"] == "GNS3 VM"
def test_import_remote_gns3vm_1_x(controller, controller_config_path, async_run):
"""
At first start the server should import the
computes and remote GNS3 VM from the gns3_gui 1.X
"""
gns3_gui_conf = {
"Servers": {
"remote_servers": [
{
"host": "127.0.0.1",
"password": "",
"port": 3080,
"protocol": "http",
"url": "http://127.0.0.1:3080",
"user": ""
},
{
"host": "127.0.0.1",
"password": "",
"port": 3081,
"protocol": "http",
"url": "http://127.0.0.1:3081",
"user": ""
}
],
"vm": {
"adjust_local_server_ip": True,
"auto_start": True,
"auto_stop": False,
"headless": True,
"remote_vm_host": "127.0.0.1",
"remote_vm_password": "",
"remote_vm_port": 3081,
"remote_vm_protocol": "http",
"remote_vm_url": "http://127.0.0.1:3081",
"remote_vm_user": "",
"virtualization": "remote",
"vmname": "GNS3 VM",
"vmx_path": "/Users/joe/Documents/Virtual Machines.localized/GNS3 VM.vmwarevm/GNS3 VM.vmx"
}
}
}
config_dir = os.path.dirname(controller_config_path)
os.makedirs(config_dir, exist_ok=True)
with open(os.path.join(config_dir, "gns3_gui.conf"), "w+") as f:
json.dump(gns3_gui_conf, f)
with asyncio_patch("gns3server.controller.compute.Compute.connect"):
async_run(controller._load_controller_settings())
assert controller.gns3vm.settings["engine"] == "remote"
assert controller.gns3vm.settings["vmname"] == "http://127.0.0.1:3081"
def test_load_projects(controller, projects_dir, async_run): def test_load_projects(controller, projects_dir, async_run):
controller.save() controller.save()

View File

@ -28,6 +28,7 @@ import shutil
from gns3server.controller.topology import load_topology, GNS3_FILE_FORMAT_REVISION from gns3server.controller.topology import load_topology, GNS3_FILE_FORMAT_REVISION
from gns3server.version import __version__
topologies_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "topologies") topologies_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "topologies")
@ -104,7 +105,7 @@ def compare_dict(path, source, reference):
""" """
assert isinstance(source, dict), "Source is not a dict in {}".format(path) assert isinstance(source, dict), "Source is not a dict in {}".format(path)
for key in source: for key in source:
assert key in reference, "Unexcepted {} in {} it should be {}".format(key, source, reference) assert key in reference, "Unexpected {} in {} it should be {}".format(key, source, reference)
for key in sorted(reference.keys()): for key in sorted(reference.keys()):
val = reference[key] val = reference[key]
assert key in source, "{} is missing in {}".format(key, source) assert key in source, "{} is missing in {}".format(key, source)
@ -113,10 +114,12 @@ def compare_dict(path, source, reference):
pass pass
elif val == "ANYUUID" and len(source[key]) == 36: elif val == "ANYUUID" and len(source[key]) == 36:
pass pass
# We test that the revision number has been bumpd to last version. This avoid modifying all the tests # We test that the revision number has been bumped to last version. This avoid modifying all the tests
# at each new revision bump. # at each new revision bump.
elif key == "revision": elif key == "revision":
assert source[key] == GNS3_FILE_FORMAT_REVISION assert source[key] == GNS3_FILE_FORMAT_REVISION
elif key == "version":
assert source[key] == __version__
else: else:
assert val == source[key], "Wrong value for {}: \n{}\nit should be\n{}".format(key, source[key], val) assert val == source[key], "Wrong value for {}: \n{}\nit should be\n{}".format(key, source[key], val)
elif isinstance(val, dict): elif isinstance(val, dict):