mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-19 14:58:07 +00:00
Merge branch 'master' into 2.3
# Conflicts: # gns3server/version.py
This commit is contained in:
commit
b179ca88a7
22
CHANGELOG
22
CHANGELOG
@ -1,5 +1,27 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 2.2.6 26/03/2020
|
||||||
|
|
||||||
|
* Remove --local when starting Docker dev server.
|
||||||
|
* Release 2020.1.0-alpha.1
|
||||||
|
* Monitor ubrige processes.
|
||||||
|
* Add Xvnc command to the VNC servers list. Fixes #172
|
||||||
|
* Allow controller to reconnect to compute if communication is lost. Ref #1634
|
||||||
|
* Improvement of support for docker USER directive. Fixes #1727.
|
||||||
|
* Fix cannot delete Dynamips router the content of the "usage" field. Fixes https://github.com/GNS3/gns3-gui/issues/2947
|
||||||
|
* Prevent locked drawings to be deleted. Fixes https://github.com/GNS3/gns3-gui/issues/2948
|
||||||
|
* Fix issues with empty project variables. Fixes https://github.com/GNS3/gns3-gui/issues/2941
|
||||||
|
* Upgrade psutil to version 5.6.6 due to CVE-2019-18874 https://github.com/advisories/GHSA-qfc5-mcwq-26q8
|
||||||
|
* Remove 'format=raw' from the Qemu options of the disk interfaces. Ref #1699
|
||||||
|
* Allocate application IDs for IOU nodes on the controller. An application ID is used by IOU to generate its interface Mac addresses. They must be unique across all opened projects sharing the same computes to avoid Mac address collisions.
|
||||||
|
* Require VirtualBox >= 6.0 on AMD and >= 6.1 on Intel processors (for GNS3 VM only). Fixes #1610
|
||||||
|
* Add nvme disk interface and fix scsi disk interface for Qemu VMs.
|
||||||
|
* Disallow using "legacy networking mode" with Qemu >= 2.9.0
|
||||||
|
* Add latest Qemu nic models.
|
||||||
|
* Attempt to fix error when loading wmi module. Fixes #1712
|
||||||
|
* Handle "aborted" state for VirtualBox VMs. Fixes #1702
|
||||||
|
* Change how Hyper-V VMs are found. Ref #1612
|
||||||
|
|
||||||
## 2.2.5 09/01/2020
|
## 2.2.5 09/01/2020
|
||||||
|
|
||||||
* No changes
|
* No changes
|
||||||
|
@ -16,7 +16,6 @@ RUN apt-get update && apt-get install -y \
|
|||||||
python3-pip \
|
python3-pip \
|
||||||
python3-dev \
|
python3-dev \
|
||||||
qemu-system-x86 \
|
qemu-system-x86 \
|
||||||
qemu-system-arm \
|
|
||||||
qemu-kvm \
|
qemu-kvm \
|
||||||
libvirt-bin \
|
libvirt-bin \
|
||||||
x11vnc
|
x11vnc
|
||||||
@ -33,4 +32,4 @@ RUN pip3 install -r /server/requirements.txt
|
|||||||
|
|
||||||
EXPOSE 3080
|
EXPOSE 3080
|
||||||
|
|
||||||
CMD python3 -m gns3server --local
|
CMD python3 -m gns3server
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
"maintainer": "GNS3 Team",
|
"maintainer": "GNS3 Team",
|
||||||
"maintainer_email": "developers@gns3.net",
|
"maintainer_email": "developers@gns3.net",
|
||||||
"dynamips": {
|
"dynamips": {
|
||||||
"platform": "c3600",
|
"platform": "c2691",
|
||||||
"ram": 192,
|
"ram": 192,
|
||||||
"nvram": 256,
|
"nvram": 256,
|
||||||
"startup_config": "ios_base_startup-config.txt",
|
"startup_config": "ios_base_startup-config.txt",
|
||||||
|
@ -7,7 +7,7 @@
|
|||||||
"documentation_url": "http://www.cisco.com/c/en/us/support/security/identity-services-engine/tsd-products-support-series-home.html",
|
"documentation_url": "http://www.cisco.com/c/en/us/support/security/identity-services-engine/tsd-products-support-series-home.html",
|
||||||
"product_name": "Identity Services Engine",
|
"product_name": "Identity Services Engine",
|
||||||
"product_url": "http://www.cisco.com/c/en/us/products/security/identity-services-engine/index.html",
|
"product_url": "http://www.cisco.com/c/en/us/products/security/identity-services-engine/index.html",
|
||||||
"registry_version": 3,
|
"registry_version": 4,
|
||||||
"status": "experimental",
|
"status": "experimental",
|
||||||
"maintainer": "GNS3 Team",
|
"maintainer": "GNS3 Team",
|
||||||
"maintainer_email": "developers@gns3.net",
|
"maintainer_email": "developers@gns3.net",
|
||||||
@ -15,9 +15,10 @@
|
|||||||
"symbol": "cisco-ise.svg",
|
"symbol": "cisco-ise.svg",
|
||||||
"port_name_format": "GigabitEthernet{0}",
|
"port_name_format": "GigabitEthernet{0}",
|
||||||
"qemu": {
|
"qemu": {
|
||||||
|
"cpus": 2,
|
||||||
"adapter_type": "e1000",
|
"adapter_type": "e1000",
|
||||||
"adapters": 2,
|
"adapters": 6,
|
||||||
"ram": 4096,
|
"ram": 8192,
|
||||||
"hda_disk_interface": "ide",
|
"hda_disk_interface": "ide",
|
||||||
"arch": "x86_64",
|
"arch": "x86_64",
|
||||||
"console_type": "vnc",
|
"console_type": "vnc",
|
||||||
@ -26,12 +27,33 @@
|
|||||||
"options": "-smp 2 -smbios type=1,product=KVM"
|
"options": "-smp 2 -smbios type=1,product=KVM"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "ise-2.7.0.356.SPA.x86_64.iso",
|
||||||
|
"version": "2.7.0.356",
|
||||||
|
"md5sum": "efbc831bf05513e4df8695eb3a362921",
|
||||||
|
"filesize": 9184415744,
|
||||||
|
"download_url": "https://software.cisco.com/download/home/283801620/type/283802505/release/2.7.0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "ise-2.6.0.156.SPA.x86_64.iso",
|
||||||
|
"version": "2.6.0.156",
|
||||||
|
"md5sum": "296e65b662821269ad67dd3dea8804d9",
|
||||||
|
"filesize": 8618913792,
|
||||||
|
"download_url": "https://software.cisco.com/download/home/283801620/type/283802505/release/2.6.0"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "ise-2.4.0.357.SPA.x86_64.iso",
|
"filename": "ise-2.4.0.357.SPA.x86_64.iso",
|
||||||
"version": "2.4.0.357",
|
"version": "2.4.0.357",
|
||||||
"md5sum": "766945618a0ff35f6c720b3bc4b46bfb",
|
"md5sum": "7f32a28f8d95c7525885786a6556913e",
|
||||||
"filesize": 8326062080,
|
"filesize": 8326062080,
|
||||||
"download_url": "https://software.cisco.com/download/home/283801620/type/283802505/release/2.4.0"
|
"download_url": "https://software.cisco.com/download/home/283801620/type/283802505/release/2.4.0"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "ise-2.3.0.298.SPA.x86_64.iso",
|
||||||
|
"version": "2.3.0.298",
|
||||||
|
"md5sum": "da98d1a34f6b11d63da0f29bd5ef9caf",
|
||||||
|
"filesize": 8174278656,
|
||||||
|
"download_url": "https://software.cisco.com/download/home/283801620/type/283802505/release/2.3.0"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "ise-2.2.0.470.SPA.x86_64.iso",
|
"filename": "ise-2.2.0.470.SPA.x86_64.iso",
|
||||||
@ -71,12 +93,33 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "2.7.0.356",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "empty200G.qcow2",
|
||||||
|
"cdrom_image": "ise-2.7.0.356.SPA.x86_64.iso"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "2.6.0.156",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "empty200G.qcow2",
|
||||||
|
"cdrom_image": "ise-2.6.0.156.SPA.x86_64.iso"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "2.4.0.357",
|
"name": "2.4.0.357",
|
||||||
"images": {
|
"images": {
|
||||||
"hda_disk_image": "empty200G.qcow2",
|
"hda_disk_image": "empty200G.qcow2",
|
||||||
"cdrom_image": "ise-2.4.0.357.SPA.x86_64.iso"
|
"cdrom_image": "ise-2.4.0.357.SPA.x86_64.iso"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "2.3.0.298",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "empty200G.qcow2",
|
||||||
|
"cdrom_image": "ise-2.3.0.298.SPA.x86_64.iso"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "2.2.0.470",
|
"name": "2.2.0.470",
|
||||||
|
@ -25,6 +25,20 @@
|
|||||||
"kvm": "require"
|
"kvm": "require"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "nexus9500v.9.3.3.qcow2",
|
||||||
|
"version": "9500v 9.3.3",
|
||||||
|
"md5sum": "7230c944041fdaa0e1b18cecccbc9a32",
|
||||||
|
"filesize": 1714159616,
|
||||||
|
"download_url": "https://software.cisco.com/download/home/286312239/type/282088129/release/9.3(3)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "nexus9300v.9.3.3.qcow2",
|
||||||
|
"version": "9300v 9.3.3",
|
||||||
|
"md5sum": "8e9a7c4815907ef47d850623f77042e2",
|
||||||
|
"filesize": 1714225152,
|
||||||
|
"download_url": "https://software.cisco.com/download/home/286312239/type/282088129/release/9.3(3)"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "nxosv.9.3.1.qcow2",
|
"filename": "nxosv.9.3.1.qcow2",
|
||||||
"version": "9.3.1",
|
"version": "9.3.1",
|
||||||
@ -134,6 +148,20 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "9500v 9.3.3",
|
||||||
|
"images": {
|
||||||
|
"bios_image": "OVMF-20160813.fd",
|
||||||
|
"hda_disk_image": "nexus9500v.9.3.3.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "9300v 9.3.3",
|
||||||
|
"images": {
|
||||||
|
"bios_image": "OVMF-20160813.fd",
|
||||||
|
"hda_disk_image": "nexus9300v.9.3.3.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "9.3.1",
|
"name": "9.3.1",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -27,6 +27,13 @@
|
|||||||
"options": "-smp 2 -cpu host"
|
"options": "-smp 2 -cpu host"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "BIGIP-14.1.2.3-0.0.5.qcow2",
|
||||||
|
"version": "14.1.2.3",
|
||||||
|
"md5sum": "356520eedb615c93e985474f2b2ec603",
|
||||||
|
"filesize": 5036834816,
|
||||||
|
"download_url": "https://downloads.f5.com"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "BIGIP-14.0.0.3-0.0.4.qcow2",
|
"filename": "BIGIP-14.0.0.3-0.0.4.qcow2",
|
||||||
"version": "14.0.0 HF3",
|
"version": "14.0.0 HF3",
|
||||||
@ -142,6 +149,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "14.1.2.3",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "BIGIP-14.1.2.3-0.0.5.qcow2",
|
||||||
|
"hdb_disk_image": "empty100G.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "14.0.0 HF3",
|
"name": "14.0.0 HF3",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -27,6 +27,15 @@
|
|||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
{
|
{
|
||||||
|
"filename": "chr-7.0beta5.img",
|
||||||
|
"version": "7.0beta5",
|
||||||
|
"md5sum": "3c9855f0efdc23df29511c76aee52c95",
|
||||||
|
"filesize": 67108864,
|
||||||
|
"download_url": "http://www.mikrotik.com/download",
|
||||||
|
"direct_download_url": "https://download.mikrotik.com/routeros/7.0beta5/chr-7.0beta5.img.zip",
|
||||||
|
"compression": "zip"
|
||||||
|
},
|
||||||
|
{
|
||||||
"filename": "chr-7.0beta3.img",
|
"filename": "chr-7.0beta3.img",
|
||||||
"version": "7.0beta3",
|
"version": "7.0beta3",
|
||||||
"md5sum": "938c59989df039cb9f33e0da96c22174",
|
"md5sum": "938c59989df039cb9f33e0da96c22174",
|
||||||
@ -34,6 +43,24 @@
|
|||||||
"download_url": "http://www.mikrotik.com/download",
|
"download_url": "http://www.mikrotik.com/download",
|
||||||
"direct_download_url": "https://download.mikrotik.com/routeros/7.0beta3/chr-7.0beta3.img.zip",
|
"direct_download_url": "https://download.mikrotik.com/routeros/7.0beta3/chr-7.0beta3.img.zip",
|
||||||
"compression": "zip"
|
"compression": "zip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "chr-6.46.3.img",
|
||||||
|
"version": "6.46.3",
|
||||||
|
"md5sum": "72d72c4a585a04eb9ed24ec9e4678486",
|
||||||
|
"filesize": 67108864,
|
||||||
|
"download_url": "http://www.mikrotik.com/download",
|
||||||
|
"direct_download_url": "https://download.mikrotik.com/routeros/6.46.3/chr-6.46.3.img.zip",
|
||||||
|
"compression": "zip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "chr-6.45.8.img",
|
||||||
|
"version": "6.45.8",
|
||||||
|
"md5sum": "73cc01e22e0b301dc29416f59ced8a7d",
|
||||||
|
"filesize": 67108864,
|
||||||
|
"download_url": "http://www.mikrotik.com/download",
|
||||||
|
"direct_download_url": "https://download.mikrotik.com/routeros/6.45.8/chr-6.45.8.img.zip",
|
||||||
|
"compression": "zip"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "chr-6.45.6.img",
|
"filename": "chr-6.45.6.img",
|
||||||
@ -154,11 +181,29 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "7.0beta5",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "chr-7.0beta5.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "7.0beta3",
|
"name": "7.0beta3",
|
||||||
"images": {
|
"images": {
|
||||||
"hda_disk_image": "chr-7.0beta3.img"
|
"hda_disk_image": "chr-7.0beta3.img"
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "6.46.3",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "chr-6.46.3.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "6.45.8",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "chr-6.45.8.img"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "6.45.6",
|
"name": "6.45.6",
|
||||||
|
@ -21,6 +21,42 @@
|
|||||||
"kvm": "allow"
|
"kvm": "allow"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "openwrt-19.07.1-x86-64-combined-ext4.img",
|
||||||
|
"version": "19.07.1",
|
||||||
|
"md5sum": "2d36f48bcc37edf3c5cfc28fed44b533",
|
||||||
|
"filesize": 285736960,
|
||||||
|
"download_url": "https://downloads.openwrt.org/releases/19.07.1/targets/x86/64/",
|
||||||
|
"direct_download_url": "https://downloads.openwrt.org/releases/19.07.1/targets/x86/64/openwrt-19.07.1-x86-64-combined-ext4.img.gz",
|
||||||
|
"compression": "gzip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "openwrt-18.06.7-x86-64-combined-ext4.img",
|
||||||
|
"version": "18.06.7",
|
||||||
|
"md5sum": "f463f38cccf89e1a2229f74a4c42d3ef",
|
||||||
|
"filesize": 285736960,
|
||||||
|
"download_url": "https://downloads.openwrt.org/releases/18.06.7/targets/x86/64/",
|
||||||
|
"direct_download_url": "https://downloads.openwrt.org/releases/18.06.7/targets/x86/64/openwrt-18.06.7-x86-64-combined-ext4.img.gz",
|
||||||
|
"compression": "gzip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "openwrt-19.07.0-x86-64-combined-ext4.img",
|
||||||
|
"version": "19.07.0",
|
||||||
|
"md5sum": "cf2a353d10e017b9e71cd3761f7aa724",
|
||||||
|
"filesize": 285736960,
|
||||||
|
"download_url": "https://downloads.openwrt.org/releases/19.07.0/targets/x86/64/",
|
||||||
|
"direct_download_url": "https://downloads.openwrt.org/releases/19.07.0/targets/x86/64/openwrt-19.07.0-x86-64-combined-ext4.img.gz",
|
||||||
|
"compression": "gzip"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "openwrt-18.06.6-x86-64-combined-ext4.img",
|
||||||
|
"version": "18.06.6",
|
||||||
|
"md5sum": "0afeec80ad7e5035f739e0ed0a07fb83",
|
||||||
|
"filesize": 285736960,
|
||||||
|
"download_url": "https://downloads.openwrt.org/releases/18.06.6/targets/x86/64/",
|
||||||
|
"direct_download_url": "https://downloads.openwrt.org/releases/18.06.6/targets/x86/64/openwrt-18.06.6-x86-64-combined-ext4.img.gz",
|
||||||
|
"compression": "gzip"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "openwrt-18.06.5-x86-64-combined-ext4.img",
|
"filename": "openwrt-18.06.5-x86-64-combined-ext4.img",
|
||||||
"version": "18.06.5",
|
"version": "18.06.5",
|
||||||
@ -68,6 +104,30 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "19.07.1",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "openwrt-19.07.1-x86-64-combined-ext4.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "18.06.7",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "openwrt-18.06.7-x86-64-combined-ext4.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "19.07.0",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "openwrt-19.07.0-x86-64-combined-ext4.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "18.06.6",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "openwrt-18.06.6-x86-64-combined-ext4.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "18.06.5",
|
"name": "18.06.5",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -564,12 +564,14 @@ class DockerVM(BaseNode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
self._display = self._get_free_display_port()
|
self._display = self._get_free_display_port()
|
||||||
if not (shutil.which("Xtigervnc") or shutil.which("Xvfb") and shutil.which("x11vnc")):
|
tigervnc_path = shutil.which("Xtigervnc") or shutil.which("Xvnc")
|
||||||
raise DockerError("Please install tigervnc-standalone-server (recommended) or Xvfb + x11vnc before using VNC support")
|
|
||||||
|
|
||||||
if shutil.which("Xtigervnc"):
|
if not (tigervnc_path or shutil.which("Xvfb") and shutil.which("x11vnc")):
|
||||||
|
raise DockerError("Please install TigerVNC (recommended) or Xvfb + x11vnc before using VNC support")
|
||||||
|
|
||||||
|
if tigervnc_path:
|
||||||
with open(os.path.join(self.working_dir, "vnc.log"), "w") as fd:
|
with open(os.path.join(self.working_dir, "vnc.log"), "w") as fd:
|
||||||
self._vnc_process = await asyncio.create_subprocess_exec("Xtigervnc",
|
self._vnc_process = await asyncio.create_subprocess_exec(tigervnc_path,
|
||||||
"-geometry", self._console_resolution,
|
"-geometry", self._console_resolution,
|
||||||
"-depth", "16",
|
"-depth", "16",
|
||||||
"-interface", self._manager.port_manager.console_host,
|
"-interface", self._manager.port_manager.console_host,
|
||||||
@ -606,8 +608,9 @@ class DockerVM(BaseNode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
self._display = self._get_free_display_port()
|
self._display = self._get_free_display_port()
|
||||||
if not (shutil.which("Xtigervnc") or shutil.which("Xvfb") and shutil.which("x11vnc")):
|
tigervnc_path = shutil.which("Xtigervnc") or shutil.which("Xvnc")
|
||||||
raise DockerError("Please install tigervnc-standalone-server (recommended) or Xvfb + x11vnc before using VNC support")
|
if not (tigervnc_path or shutil.which("Xvfb") and shutil.which("x11vnc")):
|
||||||
|
raise DockerError("Please install TigerVNC server (recommended) or Xvfb + x11vnc before using VNC support")
|
||||||
await self._start_vnc_process()
|
await self._start_vnc_process()
|
||||||
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
|
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
|
||||||
await wait_for_file_creation(x11_socket)
|
await wait_for_file_creation(x11_socket)
|
||||||
|
@ -87,9 +87,5 @@ done
|
|||||||
ifup -a -f
|
ifup -a -f
|
||||||
|
|
||||||
# continue normal docker startup
|
# continue normal docker startup
|
||||||
GNS3_CMD="PATH=$OLD_PATH exec"
|
eval HOME=$(echo ~${GNS3_USER-root})
|
||||||
while test "$#" -gt 0 ; do
|
exec su ${GNS3_USER-root} -p -- /gns3/run-cmd.sh "$OLD_PATH" "$@"
|
||||||
GNS3_CMD="${GNS3_CMD} \"${1//\"/\\\"}\""
|
|
||||||
shift
|
|
||||||
done
|
|
||||||
exec su ${GNS3_USER-root} -p -c "$GNS3_CMD"
|
|
||||||
|
6
gns3server/compute/docker/resources/run-cmd.sh
Executable file
6
gns3server/compute/docker/resources/run-cmd.sh
Executable file
@ -0,0 +1,6 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# run docker startup, first arg is new PATH, remainder is command
|
||||||
|
|
||||||
|
PATH="$1"
|
||||||
|
shift
|
||||||
|
exec "$@"
|
@ -493,7 +493,7 @@ class Dynamips(BaseManager):
|
|||||||
await vm.set_sparsemem(False)
|
await vm.set_sparsemem(False)
|
||||||
|
|
||||||
usage = settings.get("usage")
|
usage = settings.get("usage")
|
||||||
if usage and usage != vm.usage:
|
if usage is not None and usage != vm.usage:
|
||||||
vm.usage = usage
|
vm.usage = usage
|
||||||
|
|
||||||
# update the configs if needed
|
# update the configs if needed
|
||||||
|
@ -25,7 +25,6 @@ import asyncio
|
|||||||
from ..base_manager import BaseManager
|
from ..base_manager import BaseManager
|
||||||
from .iou_error import IOUError
|
from .iou_error import IOUError
|
||||||
from .iou_vm import IOUVM
|
from .iou_vm import IOUVM
|
||||||
from .utils.application_id import get_next_application_id
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@ -48,12 +47,7 @@ class IOU(BaseManager):
|
|||||||
:returns: IOUVM instance
|
:returns: IOUVM instance
|
||||||
"""
|
"""
|
||||||
|
|
||||||
async with self._iou_id_lock:
|
node = await super().create_node(*args, **kwargs)
|
||||||
# wait for a node to be completely created before adding a new one
|
|
||||||
# this is important otherwise we allocate the same application ID
|
|
||||||
# when creating multiple IOU node at the same time
|
|
||||||
application_id = get_next_application_id(self.nodes)
|
|
||||||
node = await super().create_node(*args, application_id=application_id, **kwargs)
|
|
||||||
return node
|
return node
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@ -70,6 +70,10 @@ class IOUVM(BaseNode):
|
|||||||
|
|
||||||
super().__init__(name, node_id, project, manager, console=console, console_type=console_type)
|
super().__init__(name, node_id, project, manager, console=console, console_type=console_type)
|
||||||
|
|
||||||
|
log.info('IOU "{name}" [{id}]: assigned with application ID {application_id}'.format(name=self._name,
|
||||||
|
id=self._id,
|
||||||
|
application_id=application_id))
|
||||||
|
|
||||||
self._iou_process = None
|
self._iou_process = None
|
||||||
self._telnet_server = None
|
self._telnet_server = None
|
||||||
self._iou_stdout_file = ""
|
self._iou_stdout_file = ""
|
||||||
|
@ -1647,7 +1647,16 @@ class QemuVM(BaseNode):
|
|||||||
options.extend(["-device", 'ahci,id=ahci{}'.format(disk_index)])
|
options.extend(["-device", 'ahci,id=ahci{}'.format(disk_index)])
|
||||||
options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk'.format(disk, disk_index, disk_index)])
|
options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk'.format(disk, disk_index, disk_index)])
|
||||||
options.extend(["-device", 'ide-drive,drive=drive{},bus=ahci{}.0,id=drive{}'.format(disk_index, disk_index, disk_index)])
|
options.extend(["-device", 'ide-drive,drive=drive{},bus=ahci{}.0,id=drive{}'.format(disk_index, disk_index, disk_index)])
|
||||||
|
elif interface == "nvme":
|
||||||
|
options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk'.format(disk, disk_index, disk_index)])
|
||||||
|
options.extend(["-device", 'nvme,drive=drive{},serial={}'.format(disk_index, disk_index)])
|
||||||
|
elif interface == "scsi":
|
||||||
|
options.extend(["-device", 'virtio-scsi-pci,id=scsi{}'.format(disk_index)])
|
||||||
|
options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk'.format(disk, disk_index, disk_index)])
|
||||||
|
options.extend(["-device", 'scsi-hd,drive=drive{}'.format(disk_index)])
|
||||||
|
#elif interface == "sd":
|
||||||
|
# options.extend(["-drive", 'file={},id=drive{},index={}'.format(disk, disk_index, disk_index)])
|
||||||
|
# options.extend(["-device", 'sd-card,drive=drive{},id=drive{}'.format(disk_index, disk_index, disk_index)])
|
||||||
else:
|
else:
|
||||||
options.extend(["-drive", 'file={},if={},index={},media=disk,id=drive{}'.format(disk, interface, disk_index, disk_index)])
|
options.extend(["-drive", 'file={},if={},index={},media=disk,id=drive{}'.format(disk, interface, disk_index, disk_index)])
|
||||||
|
|
||||||
@ -1723,8 +1732,11 @@ class QemuVM(BaseNode):
|
|||||||
|
|
||||||
patched_qemu = False
|
patched_qemu = False
|
||||||
if self._legacy_networking:
|
if self._legacy_networking:
|
||||||
version = await self.manager.get_qemu_version(self.qemu_path)
|
qemu_version = await self.manager.get_qemu_version(self.qemu_path)
|
||||||
if version and parse_version(version) < parse_version("1.1.0"):
|
if qemu_version:
|
||||||
|
if parse_version(qemu_version) >= parse_version("2.9.0"):
|
||||||
|
raise QemuError("Qemu version 2.9.0 and later doesn't support legacy networking mode")
|
||||||
|
if parse_version(qemu_version) < parse_version("1.1.0"):
|
||||||
# this is a patched Qemu if version is below 1.1.0
|
# this is a patched Qemu if version is below 1.1.0
|
||||||
patched_qemu = True
|
patched_qemu = True
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ class VirtualBoxVM(BaseNode):
|
|||||||
await self._set_network_options()
|
await self._set_network_options()
|
||||||
await self._set_serial_console()
|
await self._set_serial_console()
|
||||||
else:
|
else:
|
||||||
raise VirtualBoxError("VirtualBox VM not powered off")
|
raise VirtualBoxError("VirtualBox VM '{}' is not powered off (current state is '{}')".format(self.name, vm_state))
|
||||||
|
|
||||||
# check if there is enough RAM to run
|
# check if there is enough RAM to run
|
||||||
self.check_available_ram(self.ram)
|
self.check_available_ram(self.ram)
|
||||||
@ -320,7 +320,8 @@ class VirtualBoxVM(BaseNode):
|
|||||||
await self._stop_ubridge()
|
await self._stop_ubridge()
|
||||||
await self._stop_remote_console()
|
await self._stop_remote_console()
|
||||||
vm_state = await self._get_vm_state()
|
vm_state = await self._get_vm_state()
|
||||||
if vm_state in ("running", "paused", "stuck"):
|
log.info("Stopping VirtualBox VM '{name}' [{id}] (current state is {vm_state})".format(name=self.name, id=self.id, vm_state=vm_state))
|
||||||
|
if vm_state in ("running", "paused"):
|
||||||
|
|
||||||
if self.on_close == "save_vm_state":
|
if self.on_close == "save_vm_state":
|
||||||
# add a guest property to know the VM has been saved
|
# add a guest property to know the VM has been saved
|
||||||
@ -348,7 +349,10 @@ class VirtualBoxVM(BaseNode):
|
|||||||
result = await self._control_vm("poweroff")
|
result = await self._control_vm("poweroff")
|
||||||
self.status = "stopped"
|
self.status = "stopped"
|
||||||
log.debug("Stop result: {}".format(result))
|
log.debug("Stop result: {}".format(result))
|
||||||
|
elif vm_state == "aborted":
|
||||||
|
self.status = "stopped"
|
||||||
|
|
||||||
|
if self.status == "stopped":
|
||||||
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
|
log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
|
||||||
await asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
|
await asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
|
||||||
if self.on_close != "save_vm_state":
|
if self.on_close != "save_vm_state":
|
||||||
|
@ -481,7 +481,7 @@ class Controller:
|
|||||||
@property
|
@property
|
||||||
def projects(self):
|
def projects(self):
|
||||||
"""
|
"""
|
||||||
:returns: The dictionary of projects managed by GNS3
|
:returns: The dictionary of projects managed by the controller
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return self._projects
|
return self._projects
|
||||||
|
@ -448,11 +448,11 @@ class Compute:
|
|||||||
log.error("Error received on compute WebSocket '{}': {}".format(ws_url, ws.exception()))
|
log.error("Error received on compute WebSocket '{}': {}".format(ws_url, ws.exception()))
|
||||||
elif response.type == aiohttp.WSMsgType.CLOSED:
|
elif response.type == aiohttp.WSMsgType.CLOSED:
|
||||||
pass
|
pass
|
||||||
self._connected = False
|
|
||||||
break
|
break
|
||||||
except aiohttp.client_exceptions.ClientResponseError as e:
|
except aiohttp.client_exceptions.ClientResponseError as e:
|
||||||
log.error("Client response error received on compute WebSocket '{}': {}".format(ws_url,e))
|
log.error("Client response error received on compute WebSocket '{}': {}".format(ws_url,e))
|
||||||
finally:
|
finally:
|
||||||
|
self._connected = False
|
||||||
log.info("Connection closed to compute WebSocket '{}'".format(ws_url))
|
log.info("Connection closed to compute WebSocket '{}'".format(ws_url))
|
||||||
|
|
||||||
# Try to reconnect after 1 second if server unavailable only if not during tests (otherwise we create a ressources usage bomb)
|
# Try to reconnect after 1 second if server unavailable only if not during tests (otherwise we create a ressources usage bomb)
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import asyncio
|
|
||||||
import base64
|
import base64
|
||||||
import uuid
|
import uuid
|
||||||
import re
|
import re
|
||||||
|
@ -21,9 +21,6 @@ import asyncio
|
|||||||
import psutil
|
import psutil
|
||||||
import ipaddress
|
import ipaddress
|
||||||
|
|
||||||
if sys.platform.startswith("win"):
|
|
||||||
import wmi
|
|
||||||
|
|
||||||
from .base_gns3_vm import BaseGNS3VM
|
from .base_gns3_vm import BaseGNS3VM
|
||||||
from .gns3_vm_error import GNS3VMError
|
from .gns3_vm_error import GNS3VMError
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
@ -47,6 +44,7 @@ class HyperVGNS3VM(BaseGNS3VM):
|
|||||||
self._conn = None
|
self._conn = None
|
||||||
self._vm = None
|
self._vm = None
|
||||||
self._management = None
|
self._management = None
|
||||||
|
self._wmi = None
|
||||||
|
|
||||||
def _check_requirements(self):
|
def _check_requirements(self):
|
||||||
"""
|
"""
|
||||||
@ -64,8 +62,12 @@ class HyperVGNS3VM(BaseGNS3VM):
|
|||||||
raise GNS3VMError("Hyper-V with nested virtualization is only supported on Windows 10 Anniversary Update (build 10.0.14393) or later")
|
raise GNS3VMError("Hyper-V with nested virtualization is only supported on Windows 10 Anniversary Update (build 10.0.14393) or later")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
conn = wmi.WMI()
|
import pythoncom
|
||||||
except wmi.x_wmi as e:
|
pythoncom.CoInitialize()
|
||||||
|
import wmi
|
||||||
|
self._wmi = wmi
|
||||||
|
conn = self._wmi.WMI()
|
||||||
|
except self._wmi.x_wmi as e:
|
||||||
raise GNS3VMError("Could not connect to WMI: {}".format(e))
|
raise GNS3VMError("Could not connect to WMI: {}".format(e))
|
||||||
|
|
||||||
if not conn.Win32_ComputerSystem()[0].HypervisorPresent:
|
if not conn.Win32_ComputerSystem()[0].HypervisorPresent:
|
||||||
@ -86,8 +88,8 @@ class HyperVGNS3VM(BaseGNS3VM):
|
|||||||
self._check_requirements()
|
self._check_requirements()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self._conn = wmi.WMI(namespace=r"root\virtualization\v2")
|
self._conn = self._wmi.WMI(namespace=r"root\virtualization\v2")
|
||||||
except wmi.x_wmi as e:
|
except self._wmi.x_wmi as e:
|
||||||
raise GNS3VMError("Could not connect to WMI: {}".format(e))
|
raise GNS3VMError("Could not connect to WMI: {}".format(e))
|
||||||
|
|
||||||
if not self._conn.Msvm_VirtualSystemManagementService():
|
if not self._conn.Msvm_VirtualSystemManagementService():
|
||||||
@ -182,10 +184,10 @@ class HyperVGNS3VM(BaseGNS3VM):
|
|||||||
|
|
||||||
vms = []
|
vms = []
|
||||||
try:
|
try:
|
||||||
for vm in self._conn.Msvm_VirtualSystemSettingData():
|
for vm in self._conn.Msvm_ComputerSystem():
|
||||||
if vm.VirtualSystemType == "Microsoft:Hyper-V:System:Realized":
|
if vm.ElementName != self._management.SystemName:
|
||||||
vms.append({"vmname": vm.ElementName})
|
vms.append({"vmname": vm.ElementName})
|
||||||
except wmi.x_wmi as e:
|
except self._wmi.x_wmi as e:
|
||||||
raise GNS3VMError("Could not list Hyper-V VMs: {}".format(e))
|
raise GNS3VMError("Could not list Hyper-V VMs: {}".format(e))
|
||||||
return vms
|
return vms
|
||||||
|
|
||||||
@ -194,7 +196,7 @@ class HyperVGNS3VM(BaseGNS3VM):
|
|||||||
Gets the WMI object.
|
Gets the WMI object.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return wmi.WMI(moniker=path.replace('\\', '/'))
|
return self._wmi.WMI(moniker=path.replace('\\', '/'))
|
||||||
|
|
||||||
async def _set_state(self, state):
|
async def _set_state(self, state):
|
||||||
"""
|
"""
|
||||||
|
@ -23,6 +23,8 @@ import socket
|
|||||||
|
|
||||||
from .base_gns3_vm import BaseGNS3VM
|
from .base_gns3_vm import BaseGNS3VM
|
||||||
from .gns3_vm_error import GNS3VMError
|
from .gns3_vm_error import GNS3VMError
|
||||||
|
from gns3server.utils import parse_version
|
||||||
|
from gns3server.utils.asyncio import wait_run_in_executor
|
||||||
|
|
||||||
from ...compute.virtualbox import (
|
from ...compute.virtualbox import (
|
||||||
VirtualBox,
|
VirtualBox,
|
||||||
@ -38,6 +40,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
|
|||||||
|
|
||||||
self._engine = "virtualbox"
|
self._engine = "virtualbox"
|
||||||
super().__init__(controller)
|
super().__init__(controller)
|
||||||
|
self._system_properties = {}
|
||||||
self._virtualbox_manager = VirtualBox()
|
self._virtualbox_manager = VirtualBox()
|
||||||
|
|
||||||
async def _execute(self, subcommand, args, timeout=60):
|
async def _execute(self, subcommand, args, timeout=60):
|
||||||
@ -63,6 +66,42 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
|
|||||||
return value.strip('"')
|
return value.strip('"')
|
||||||
return "unknown"
|
return "unknown"
|
||||||
|
|
||||||
|
async def _get_system_properties(self):
|
||||||
|
"""
|
||||||
|
Returns the VM state (e.g. running, paused etc.)
|
||||||
|
|
||||||
|
:returns: state (string)
|
||||||
|
"""
|
||||||
|
|
||||||
|
properties = await self._execute("list", ["systemproperties"])
|
||||||
|
for prop in properties.splitlines():
|
||||||
|
try:
|
||||||
|
name, value = prop.split(':', 1)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
self._system_properties[name.strip()] = value.strip()
|
||||||
|
|
||||||
|
async def _check_requirements(self):
|
||||||
|
"""
|
||||||
|
Checks if the GNS3 VM can run on VirtualBox
|
||||||
|
"""
|
||||||
|
|
||||||
|
if not self._system_properties:
|
||||||
|
await self._get_system_properties()
|
||||||
|
if "API version" not in self._system_properties:
|
||||||
|
raise VirtualBoxError("Can't access to VirtualBox API version:\n{}".format(self._system_properties))
|
||||||
|
from cpuinfo import get_cpu_info
|
||||||
|
cpu_info = await wait_run_in_executor(get_cpu_info)
|
||||||
|
vendor_id = cpu_info['vendor_id']
|
||||||
|
if vendor_id == "GenuineIntel":
|
||||||
|
if parse_version(self._system_properties["API version"]) < parse_version("6_1"):
|
||||||
|
raise VirtualBoxError("VirtualBox version 6.1 or above is required to run the GNS3 VM with nested virtualization enabled on Intel processors")
|
||||||
|
elif vendor_id == "AuthenticAMD":
|
||||||
|
if parse_version(self._system_properties["API version"]) < parse_version("6_0"):
|
||||||
|
raise VirtualBoxError("VirtualBox version 6.0 or above is required to run the GNS3 VM with nested virtualization enabled on AMD processors")
|
||||||
|
else:
|
||||||
|
log.warning("Could not determine CPU vendor: {}".format(vendor_id))
|
||||||
|
|
||||||
async def _look_for_interface(self, network_backend):
|
async def _look_for_interface(self, network_backend):
|
||||||
"""
|
"""
|
||||||
Look for an interface with a specific network backend.
|
Look for an interface with a specific network backend.
|
||||||
@ -173,13 +212,16 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
|
|||||||
List all VirtualBox VMs
|
List all VirtualBox VMs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return (await self._virtualbox_manager.list_vms())
|
await self._check_requirements()
|
||||||
|
return await self._virtualbox_manager.list_vms()
|
||||||
|
|
||||||
async def start(self):
|
async def start(self):
|
||||||
"""
|
"""
|
||||||
Start the GNS3 VM.
|
Start the GNS3 VM.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
await self._check_requirements()
|
||||||
|
|
||||||
# get a NAT interface number
|
# get a NAT interface number
|
||||||
nat_interface_number = await self._look_for_interface("nat")
|
nat_interface_number = await self._look_for_interface("nat")
|
||||||
if nat_interface_number < 0:
|
if nat_interface_number < 0:
|
||||||
|
@ -38,6 +38,7 @@ from .topology import project_to_topology, load_topology
|
|||||||
from .udp_link import UDPLink
|
from .udp_link import UDPLink
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from ..utils.path import check_path_allowed, get_default_project_directory
|
from ..utils.path import check_path_allowed, get_default_project_directory
|
||||||
|
from ..utils.application_id import get_next_application_id
|
||||||
from ..utils.asyncio.pool import Pool
|
from ..utils.asyncio.pool import Pool
|
||||||
from ..utils.asyncio import locking
|
from ..utils.asyncio import locking
|
||||||
from ..utils.asyncio import aiozipstream
|
from ..utils.asyncio import aiozipstream
|
||||||
@ -126,6 +127,8 @@ class Project:
|
|||||||
assert self._status != "closed"
|
assert self._status != "closed"
|
||||||
self.dump()
|
self.dump()
|
||||||
|
|
||||||
|
self._iou_id_lock = asyncio.Lock()
|
||||||
|
|
||||||
def emit_notification(self, action, event):
|
def emit_notification(self, action, event):
|
||||||
"""
|
"""
|
||||||
Emit a notification to all clients using this project.
|
Emit a notification to all clients using this project.
|
||||||
@ -516,17 +519,7 @@ class Project:
|
|||||||
node = await self.add_node(compute, name, node_id, node_type=node_type, **template)
|
node = await self.add_node(compute, name, node_id, node_type=node_type, **template)
|
||||||
return node
|
return node
|
||||||
|
|
||||||
@open_required
|
async def _create_node(self, compute, name, node_id, node_type=None, **kwargs):
|
||||||
async def add_node(self, compute, name, node_id, dump=True, node_type=None, **kwargs):
|
|
||||||
"""
|
|
||||||
Create a node or return an existing node
|
|
||||||
|
|
||||||
:param dump: Dump topology to disk
|
|
||||||
:param kwargs: See the documentation of node
|
|
||||||
"""
|
|
||||||
|
|
||||||
if node_id in self._nodes:
|
|
||||||
return self._nodes[node_id]
|
|
||||||
|
|
||||||
node = Node(self, compute, name, node_id=node_id, node_type=node_type, **kwargs)
|
node = Node(self, compute, name, node_id=node_id, node_type=node_type, **kwargs)
|
||||||
if compute not in self._project_created_on_compute:
|
if compute not in self._project_created_on_compute:
|
||||||
@ -547,10 +540,39 @@ class Project:
|
|||||||
data["variables"] = self._variables
|
data["variables"] = self._variables
|
||||||
|
|
||||||
await compute.post("/projects", data=data)
|
await compute.post("/projects", data=data)
|
||||||
|
|
||||||
self._project_created_on_compute.add(compute)
|
self._project_created_on_compute.add(compute)
|
||||||
|
|
||||||
await node.create()
|
await node.create()
|
||||||
self._nodes[node.id] = node
|
self._nodes[node.id] = node
|
||||||
|
|
||||||
|
return node
|
||||||
|
|
||||||
|
@open_required
|
||||||
|
async def add_node(self, compute, name, node_id, dump=True, node_type=None, **kwargs):
|
||||||
|
"""
|
||||||
|
Create a node or return an existing node
|
||||||
|
|
||||||
|
:param dump: Dump topology to disk
|
||||||
|
:param kwargs: See the documentation of node
|
||||||
|
"""
|
||||||
|
|
||||||
|
if node_id in self._nodes:
|
||||||
|
return self._nodes[node_id]
|
||||||
|
|
||||||
|
if node_type == "iou":
|
||||||
|
async with self._iou_id_lock:
|
||||||
|
# wait for a IOU node to be completely created before adding a new one
|
||||||
|
# this is important otherwise we allocate the same application ID (used
|
||||||
|
# to generate MAC addresses) when creating multiple IOU node at the same time
|
||||||
|
if "properties" in kwargs.keys():
|
||||||
|
# allocate a new application id for nodes loaded from the project
|
||||||
|
kwargs.get("properties")["application_id"] = get_next_application_id(self._controller.projects, compute)
|
||||||
|
elif "application_id" not in kwargs.keys() and not kwargs.get("properties"):
|
||||||
|
# allocate a new application id for nodes added to the project
|
||||||
|
kwargs["application_id"] = get_next_application_id(self._controller.projects, compute)
|
||||||
|
node = await self._create_node(compute, name, node_id, node_type, **kwargs)
|
||||||
|
else:
|
||||||
|
node = await self._create_node(compute, name, node_id, node_type, **kwargs)
|
||||||
self.emit_notification("node.created", node.__json__())
|
self.emit_notification("node.created", node.__json__())
|
||||||
if dump:
|
if dump:
|
||||||
self.dump()
|
self.dump()
|
||||||
@ -662,6 +684,8 @@ class Project:
|
|||||||
@open_required
|
@open_required
|
||||||
async def delete_drawing(self, drawing_id):
|
async def delete_drawing(self, drawing_id):
|
||||||
drawing = self.get_drawing(drawing_id)
|
drawing = self.get_drawing(drawing_id)
|
||||||
|
if drawing.locked:
|
||||||
|
raise aiohttp.web.HTTPConflict(text="Drawing ID {} cannot be deleted because it is locked".format(drawing_id))
|
||||||
del self._drawings[drawing.id]
|
del self._drawings[drawing.id]
|
||||||
self.dump()
|
self.dump()
|
||||||
self.emit_notification("drawing.deleted", drawing.__json__())
|
self.emit_notification("drawing.deleted", drawing.__json__())
|
||||||
@ -1102,8 +1126,7 @@ class Project:
|
|||||||
data['z'] = z
|
data['z'] = z
|
||||||
data['locked'] = False # duplicated node must not be locked
|
data['locked'] = False # duplicated node must not be locked
|
||||||
new_node_uuid = str(uuid.uuid4())
|
new_node_uuid = str(uuid.uuid4())
|
||||||
new_node = await self.add_node(
|
new_node = await self.add_node(node.compute,
|
||||||
node.compute,
|
|
||||||
node.name,
|
node.name,
|
||||||
new_node_uuid,
|
new_node_uuid,
|
||||||
node_type=node_type,
|
node_type=node_type,
|
||||||
|
@ -171,6 +171,11 @@ def load_topology(path):
|
|||||||
if "console_type" in node and node["console_type"] is None:
|
if "console_type" in node and node["console_type"] is None:
|
||||||
node["console_type"] = "none"
|
node["console_type"] = "none"
|
||||||
|
|
||||||
|
# make sure we can open a project with empty variable name
|
||||||
|
variables = topo.get("variables")
|
||||||
|
if variables:
|
||||||
|
topo["variables"] = [var for var in variables if var.get("name")]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_check_topology_schema(topo)
|
_check_topology_schema(topo)
|
||||||
except aiohttp.web.HTTPConflict as e:
|
except aiohttp.web.HTTPConflict as e:
|
||||||
|
@ -58,7 +58,7 @@ class CrashReport:
|
|||||||
Report crash to a third party service
|
Report crash to a third party service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DSN = "https://19f9932414a74c79b74d5db1ee90af54:550055575371472ab0e71f202cdf196d@sentry.io/38482"
|
DSN = "https://03a62c6395a54334b0818302138c6555:d7977024b6314ffbaddae057f9bb53b8@sentry.io/38482"
|
||||||
if hasattr(sys, "frozen"):
|
if hasattr(sys, "frozen"):
|
||||||
cacert = get_resource("cacert.pem")
|
cacert = get_resource("cacert.pem")
|
||||||
if cacert is not None and os.path.isfile(cacert):
|
if cacert is not None and os.path.isfile(cacert):
|
||||||
|
@ -60,6 +60,7 @@ class IOUHandler:
|
|||||||
vm = await iou.create_node(request.json.pop("name"),
|
vm = await iou.create_node(request.json.pop("name"),
|
||||||
request.match_info["project_id"],
|
request.match_info["project_id"],
|
||||||
request.json.get("node_id"),
|
request.json.get("node_id"),
|
||||||
|
application_id=request.json.get("application_id"),
|
||||||
path=request.json.get("path"),
|
path=request.json.get("path"),
|
||||||
console=request.json.get("console"),
|
console=request.json.get("console"),
|
||||||
console_type=request.json.get("console_type", "telnet"))
|
console_type=request.json.get("console_type", "telnet"))
|
||||||
@ -67,7 +68,7 @@ class IOUHandler:
|
|||||||
for name, value in request.json.items():
|
for name, value in request.json.items():
|
||||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||||
if name == "application_id":
|
if name == "application_id":
|
||||||
continue # we must ignore this to avoid overwriting the application_id allocated by the IOU manager
|
continue # we must ignore this to avoid overwriting the application_id allocated by the controller
|
||||||
if name == "startup_config_content" and (vm.startup_config_content and len(vm.startup_config_content) > 0):
|
if name == "startup_config_content" and (vm.startup_config_content and len(vm.startup_config_content) > 0):
|
||||||
continue
|
continue
|
||||||
if name == "private_config_content" and (vm.private_config_content and len(vm.private_config_content) > 0):
|
if name == "private_config_content" and (vm.private_config_content and len(vm.private_config_content) > 0):
|
||||||
|
@ -96,7 +96,7 @@ IOU_CREATE_SCHEMA = {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
"additionalProperties": False,
|
"additionalProperties": False,
|
||||||
"required": ["name", "path"]
|
"required": ["application_id", "name", "path"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -38,7 +38,8 @@ VARIABLES_OBJECT_SCHEMA = {
|
|||||||
"properties": {
|
"properties": {
|
||||||
"name": {
|
"name": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "Variable name"
|
"description": "Variable name",
|
||||||
|
"minLength": 1
|
||||||
},
|
},
|
||||||
"value": {
|
"value": {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
|
@ -64,9 +64,9 @@ QEMU_TEMPLATE_PROPERTIES = {
|
|||||||
"adapter_type": {
|
"adapter_type": {
|
||||||
"description": "QEMU adapter type",
|
"description": "QEMU adapter type",
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"enum": ["e1000", "i82550", "i82551", "i82557a", "i82557b", "i82557c", "i82558a","i82558b", "i82559a",
|
"enum": ["e1000", "e1000-82544gc", "e1000-82545em", "e1000e", "i82550", "i82551", "i82557a", "i82557b", "i82557c", "i82558a",
|
||||||
"i82559b", "i82559c", "i82559er", "i82562", "i82801", "ne2k_pci", "pcnet", "rtl8139", "virtio",
|
"i82558b", "i82559a", "i82559b", "i82559c", "i82559er", "i82562", "i82801", "ne2k_pci", "pcnet", "rocker", "rtl8139",
|
||||||
"virtio-net-pci", "vmxnet3"],
|
"virtio", "virtio-net-pci", "vmxnet3"],
|
||||||
"default": "e1000"
|
"default": "e1000"
|
||||||
},
|
},
|
||||||
"mac_address": {
|
"mac_address": {
|
||||||
@ -115,7 +115,7 @@ QEMU_TEMPLATE_PROPERTIES = {
|
|||||||
},
|
},
|
||||||
"hda_disk_interface": {
|
"hda_disk_interface": {
|
||||||
"description": "QEMU hda interface",
|
"description": "QEMU hda interface",
|
||||||
"enum": ["ide", "sata", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
"enum": ["ide", "sata", "nvme", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
||||||
"default": "ide"
|
"default": "ide"
|
||||||
},
|
},
|
||||||
"hdb_disk_image": {
|
"hdb_disk_image": {
|
||||||
@ -125,7 +125,7 @@ QEMU_TEMPLATE_PROPERTIES = {
|
|||||||
},
|
},
|
||||||
"hdb_disk_interface": {
|
"hdb_disk_interface": {
|
||||||
"description": "QEMU hdb interface",
|
"description": "QEMU hdb interface",
|
||||||
"enum": ["ide", "sata", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
"enum": ["ide", "sata", "nvme", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
||||||
"default": "ide"
|
"default": "ide"
|
||||||
},
|
},
|
||||||
"hdc_disk_image": {
|
"hdc_disk_image": {
|
||||||
@ -135,7 +135,7 @@ QEMU_TEMPLATE_PROPERTIES = {
|
|||||||
},
|
},
|
||||||
"hdc_disk_interface": {
|
"hdc_disk_interface": {
|
||||||
"description": "QEMU hdc interface",
|
"description": "QEMU hdc interface",
|
||||||
"enum": ["ide", "sata", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
"enum": ["ide", "sata", "nvme", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
||||||
"default": "ide"
|
"default": "ide"
|
||||||
},
|
},
|
||||||
"hdd_disk_image": {
|
"hdd_disk_image": {
|
||||||
@ -145,7 +145,7 @@ QEMU_TEMPLATE_PROPERTIES = {
|
|||||||
},
|
},
|
||||||
"hdd_disk_interface": {
|
"hdd_disk_interface": {
|
||||||
"description": "QEMU hdd interface",
|
"description": "QEMU hdd interface",
|
||||||
"enum": ["ide", "sata", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
"enum": ["ide", "sata", "nvme", "scsi", "sd", "mtd", "floppy", "pflash", "virtio", "none"],
|
||||||
"default": "ide"
|
"default": "ide"
|
||||||
},
|
},
|
||||||
"cdrom_image": {
|
"cdrom_image": {
|
||||||
|
@ -93,6 +93,8 @@ THE SOFTWARE.
|
|||||||
|
|
||||||
@angular/material/checkbox
|
@angular/material/checkbox
|
||||||
|
|
||||||
|
@angular/material/chips
|
||||||
|
|
||||||
@angular/material/core
|
@angular/material/core
|
||||||
|
|
||||||
@angular/material/dialog
|
@angular/material/dialog
|
||||||
@ -1792,8 +1794,14 @@ the License, but only in their entirety and only with respect to the Combined
|
|||||||
Software.
|
Software.
|
||||||
|
|
||||||
|
|
||||||
|
ng-circle-progress
|
||||||
|
MIT
|
||||||
|
|
||||||
ng2-file-upload
|
ng2-file-upload
|
||||||
|
|
||||||
|
ngx-childprocess
|
||||||
|
MIT
|
||||||
|
|
||||||
ngx-device-detector
|
ngx-device-detector
|
||||||
MIT
|
MIT
|
||||||
The MIT License
|
The MIT License
|
||||||
@ -2674,30 +2682,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||||||
SOFTWARE.
|
SOFTWARE.
|
||||||
|
|
||||||
|
|
||||||
webpack
|
|
||||||
MIT
|
|
||||||
Copyright JS Foundation and other contributors
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
'Software'), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
||||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
|
||||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
|
||||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
|
||||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
|
|
||||||
|
|
||||||
zone.js
|
zone.js
|
||||||
MIT
|
MIT
|
||||||
The MIT License
|
The MIT License
|
||||||
|
@ -33,9 +33,19 @@
|
|||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
</script>
|
</script>
|
||||||
<link rel="stylesheet" href="styles.06678052914fe71fbfae.css"></head>
|
<link rel="stylesheet" href="styles.b9b50fa6f0efb42b5d70.css"></head>
|
||||||
<!-- <body class="mat-app-background" oncontextmenu="return false;"> -->
|
<!-- <body class="mat-app-background" oncontextmenu="return false;"> -->
|
||||||
<body class="mat-app-background" oncontextmenu="return false;">
|
<body class="mat-app-background" oncontextmenu="return false;">
|
||||||
<app-root></app-root>
|
<app-root></app-root>
|
||||||
<script src="runtime.3b1df53b846aa25ed7e6.js" defer></script><script src="polyfills-es5.8928d61b0c56a8998c1c.js" nomodule defer></script><script src="polyfills.b9cf08f4f6defdfd6c4f.js" defer></script><script src="main.89a8d561ddbdc1d9fa28.js" defer></script></body>
|
<script async src="https://www.googletagmanager.com/gtag/js?id=G-5D6FZL9923"></script>
|
||||||
|
<script>
|
||||||
|
window.dataLayer = window.dataLayer || [];
|
||||||
|
function gtag()
|
||||||
|
|
||||||
|
{dataLayer.push(arguments);}
|
||||||
|
gtag('js', new Date());
|
||||||
|
|
||||||
|
gtag('config', 'G-5D6FZL9923');
|
||||||
|
</script>
|
||||||
|
<script src="runtime.a8ef3a8272419c2e2c66.js" defer></script><script src="polyfills-es5.adfecedb3e8b94a475d4.js" nomodule defer></script><script src="polyfills.312ad1f991887572d9bc.js" defer></script><script src="main.c31c0c40fa0fcdc2cdcf.js" defer></script></body>
|
||||||
</html>
|
</html>
|
||||||
|
File diff suppressed because one or more lines are too long
1
gns3server/static/web-ui/main.c31c0c40fa0fcdc2cdcf.js
Normal file
1
gns3server/static/web-ui/main.c31c0c40fa0fcdc2cdcf.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
6
gns3server/static/web-ui/styles.b9b50fa6f0efb42b5d70.css
Normal file
6
gns3server/static/web-ui/styles.b9b50fa6f0efb42b5d70.css
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -28,6 +28,7 @@ import re
|
|||||||
|
|
||||||
from gns3server.utils import parse_version
|
from gns3server.utils import parse_version
|
||||||
from gns3server.utils.asyncio import wait_for_process_termination
|
from gns3server.utils.asyncio import wait_for_process_termination
|
||||||
|
from gns3server.utils.asyncio import monitor_process
|
||||||
from gns3server.utils.asyncio import subprocess_check_output
|
from gns3server.utils.asyncio import subprocess_check_output
|
||||||
from .ubridge_hypervisor import UBridgeHypervisor
|
from .ubridge_hypervisor import UBridgeHypervisor
|
||||||
from .ubridge_error import UbridgeError
|
from .ubridge_error import UbridgeError
|
||||||
@ -176,6 +177,7 @@ class Hypervisor(UBridgeHypervisor):
|
|||||||
env=env)
|
env=env)
|
||||||
|
|
||||||
log.info("ubridge started PID={}".format(self._process.pid))
|
log.info("ubridge started PID={}".format(self._process.pid))
|
||||||
|
monitor_process(self._process, self._termination_callback)
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
ubridge_stdout = self.read_stdout()
|
ubridge_stdout = self.read_stdout()
|
||||||
log.error("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
|
log.error("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
|
||||||
@ -188,9 +190,12 @@ class Hypervisor(UBridgeHypervisor):
|
|||||||
:param returncode: Process returncode
|
:param returncode: Process returncode
|
||||||
"""
|
"""
|
||||||
|
|
||||||
log.info("uBridge process has stopped, return code: %d", returncode)
|
|
||||||
if returncode != 0:
|
if returncode != 0:
|
||||||
self._project.emit("log.error", {"message": "uBridge process has stopped, return code: {}\n{}".format(returncode, self.read_stdout())})
|
error_msg = "uBridge process has stopped, return code: {}\n{}\n".format(returncode, self.read_stdout())
|
||||||
|
log.error(error_msg)
|
||||||
|
self._project.emit("log.error", {"message": error_msg})
|
||||||
|
else:
|
||||||
|
log.info("uBridge process has stopped, return code: %d", returncode)
|
||||||
|
|
||||||
async def stop(self):
|
async def stop(self):
|
||||||
"""
|
"""
|
||||||
|
@ -15,24 +15,32 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from ..iou_error import IOUError
|
import aiohttp
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def get_next_application_id(nodes):
|
def get_next_application_id(projects, compute):
|
||||||
"""
|
"""
|
||||||
Calculates free application_id from given nodes
|
Calculates free application_id from given nodes
|
||||||
|
|
||||||
:param nodes:
|
:param projects: all projects managed by controller
|
||||||
:raises IOUError when exceeds number
|
:param compute: Compute instance
|
||||||
|
:raises HTTPConflict when exceeds number
|
||||||
:return: integer first free id
|
:return: integer first free id
|
||||||
"""
|
"""
|
||||||
|
|
||||||
used = set([n.application_id for n in nodes])
|
nodes = []
|
||||||
|
|
||||||
|
# look for application id for in all nodes across all opened projects that share the same compute
|
||||||
|
for project in projects.values():
|
||||||
|
if project.status == "opened" and compute in project.computes:
|
||||||
|
nodes.extend(list(project.nodes.values()))
|
||||||
|
|
||||||
|
used = set([n.properties["application_id"] for n in nodes if n.node_type == "iou"])
|
||||||
pool = set(range(1, 512))
|
pool = set(range(1, 512))
|
||||||
try:
|
try:
|
||||||
return (pool - used).pop()
|
return (pool - used).pop()
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise IOUError("Cannot create a new IOU VM (limit of 512 VMs on one host reached)")
|
raise aiohttp.web.HTTPConflict(text="Cannot create a new IOU node (limit of 512 nodes across all opened projects using compute {} reached".format(compute.name))
|
@ -6,6 +6,7 @@ aiofiles==0.4.0
|
|||||||
async_generator>=1.10
|
async_generator>=1.10
|
||||||
Jinja2>=2.7.3
|
Jinja2>=2.7.3
|
||||||
raven>=5.23.0
|
raven>=5.23.0
|
||||||
psutil==5.6.3
|
psutil==5.6.6
|
||||||
async-timeout==3.0.1
|
async-timeout==3.0.1
|
||||||
distro>=1.3.0
|
distro>=1.3.0
|
||||||
|
py-cpuinfo==5.0.0
|
||||||
|
@ -18,6 +18,6 @@
|
|||||||
# A docker server use for localy test a remote GNS3 server
|
# A docker server use for localy test a remote GNS3 server
|
||||||
|
|
||||||
docker build -t gns3-server .
|
docker build -t gns3-server .
|
||||||
docker run -i -h gns3vm -p 8001:8001/tcp -t gns3-server python3 -m gns3server --local --port 8001
|
docker run -i -h gns3vm -p 8001:8001/tcp -t gns3-server python3 -m gns3server --port 8001
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,79 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
|
||||||
#
|
|
||||||
# This program is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation, either version 3 of the License, or
|
|
||||||
# (at your option) any later version.
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
|
|
||||||
import pytest
|
|
||||||
from unittest.mock import patch
|
|
||||||
import uuid
|
|
||||||
import sys
|
|
||||||
|
|
||||||
pytestmark = pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
|
|
||||||
|
|
||||||
if not sys.platform.startswith("win"):
|
|
||||||
from gns3server.compute.iou import IOU
|
|
||||||
from gns3server.compute.iou.iou_error import IOUError
|
|
||||||
|
|
||||||
from gns3server.compute.project_manager import ProjectManager
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
|
||||||
def iou(port_manager):
|
|
||||||
# Cleanup the IOU object
|
|
||||||
IOU._instance = None
|
|
||||||
iou = IOU.instance()
|
|
||||||
iou.port_manager = port_manager
|
|
||||||
return iou
|
|
||||||
|
|
||||||
|
|
||||||
def test_application_id(loop, project, iou):
|
|
||||||
vm1_id = str(uuid.uuid4())
|
|
||||||
vm2_id = str(uuid.uuid4())
|
|
||||||
vm3_id = str(uuid.uuid4())
|
|
||||||
vm1 = loop.run_until_complete(iou.create_node("PC 1", project.id, vm1_id))
|
|
||||||
vm2 = loop.run_until_complete(iou.create_node("PC 2", project.id, vm2_id))
|
|
||||||
assert vm1.application_id == 1
|
|
||||||
assert vm2.application_id == 2
|
|
||||||
loop.run_until_complete(iou.delete_node(vm1_id))
|
|
||||||
vm3 = loop.run_until_complete(iou.create_node("PC 3", project.id, vm3_id))
|
|
||||||
assert vm3.application_id == 1
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_application_id_multiple_project(loop, iou):
|
|
||||||
vm1_id = str(uuid.uuid4())
|
|
||||||
vm2_id = str(uuid.uuid4())
|
|
||||||
vm3_id = str(uuid.uuid4())
|
|
||||||
project1 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
|
|
||||||
project2 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
|
|
||||||
vm1 = loop.run_until_complete(iou.create_node("PC 1", project1.id, vm1_id))
|
|
||||||
vm2 = loop.run_until_complete(iou.create_node("PC 2", project1.id, vm2_id))
|
|
||||||
vm3 = loop.run_until_complete(iou.create_node("PC 2", project2.id, vm3_id))
|
|
||||||
assert vm1.application_id == 1
|
|
||||||
assert vm2.application_id == 2
|
|
||||||
assert vm3.application_id == 3
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_application_id_no_id_available(loop, project, iou):
|
|
||||||
with pytest.raises(IOUError):
|
|
||||||
for i in range(1, 513):
|
|
||||||
node_id = str(uuid.uuid4())
|
|
||||||
vm = loop.run_until_complete(iou.create_node("PC {}".format(i), project.id, node_id))
|
|
||||||
assert vm.application_id == i
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_images_directory(iou, tmpdir):
|
|
||||||
with patch("gns3server.config.Config.get_section_config", return_value={"images_path": str(tmpdir)}):
|
|
||||||
assert iou.get_images_directory() == str(tmpdir / "IOU")
|
|
@ -28,6 +28,7 @@ from uuid import uuid4
|
|||||||
|
|
||||||
from gns3server.controller.project import Project
|
from gns3server.controller.project import Project
|
||||||
from gns3server.controller.template import Template
|
from gns3server.controller.template import Template
|
||||||
|
from gns3server.controller.node import Node
|
||||||
from gns3server.controller.ports.ethernet_port import EthernetPort
|
from gns3server.controller.ports.ethernet_port import EthernetPort
|
||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
|
|
||||||
@ -204,6 +205,131 @@ def test_add_node_non_local(async_run, controller):
|
|||||||
project.emit_notification.assert_any_call("node.created", node.__json__())
|
project.emit_notification.assert_any_call("node.created", node.__json__())
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_node_iou(async_run, controller):
|
||||||
|
"""
|
||||||
|
Test if an application ID is allocated for IOU nodes
|
||||||
|
"""
|
||||||
|
compute = MagicMock()
|
||||||
|
compute.id = "local"
|
||||||
|
project = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test1"))
|
||||||
|
project.emit_notification = MagicMock()
|
||||||
|
|
||||||
|
response = MagicMock()
|
||||||
|
compute.post = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node1 = async_run(project.add_node(compute, "test1", None, node_type="iou"))
|
||||||
|
node2 = async_run(project.add_node(compute, "test2", None, node_type="iou"))
|
||||||
|
node3 = async_run(project.add_node(compute, "test3", None, node_type="iou"))
|
||||||
|
assert node1.properties["application_id"] == 1
|
||||||
|
assert node2.properties["application_id"] == 2
|
||||||
|
assert node3.properties["application_id"] == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_node_iou_with_multiple_projects(async_run, controller):
|
||||||
|
"""
|
||||||
|
Test if an application ID is allocated for IOU nodes with different projects already opened
|
||||||
|
"""
|
||||||
|
compute = MagicMock()
|
||||||
|
compute.id = "local"
|
||||||
|
project1 = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test1"))
|
||||||
|
project1.emit_notification = MagicMock()
|
||||||
|
project2 = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test2"))
|
||||||
|
project2.emit_notification = MagicMock()
|
||||||
|
project3 = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test3"))
|
||||||
|
project3.emit_notification = MagicMock()
|
||||||
|
response = MagicMock()
|
||||||
|
compute.post = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node1 = async_run(project1.add_node(compute, "test1", None, node_type="iou"))
|
||||||
|
node2 = async_run(project1.add_node(compute, "test2", None, node_type="iou"))
|
||||||
|
node3 = async_run(project1.add_node(compute, "test3", None, node_type="iou"))
|
||||||
|
|
||||||
|
node4 = async_run(project2.add_node(compute, "test4", None, node_type="iou"))
|
||||||
|
node5 = async_run(project2.add_node(compute, "test5", None, node_type="iou"))
|
||||||
|
node6 = async_run(project2.add_node(compute, "test6", None, node_type="iou"))
|
||||||
|
|
||||||
|
node7 = async_run(project3.add_node(compute, "test7", None, node_type="iou"))
|
||||||
|
node8 = async_run(project3.add_node(compute, "test8", None, node_type="iou"))
|
||||||
|
node9 = async_run(project3.add_node(compute, "test9", None, node_type="iou"))
|
||||||
|
|
||||||
|
assert node1.properties["application_id"] == 1
|
||||||
|
assert node2.properties["application_id"] == 2
|
||||||
|
assert node3.properties["application_id"] == 3
|
||||||
|
|
||||||
|
assert node4.properties["application_id"] == 4
|
||||||
|
assert node5.properties["application_id"] == 5
|
||||||
|
assert node6.properties["application_id"] == 6
|
||||||
|
|
||||||
|
assert node7.properties["application_id"] == 7
|
||||||
|
assert node8.properties["application_id"] == 8
|
||||||
|
assert node9.properties["application_id"] == 9
|
||||||
|
|
||||||
|
controller.remove_project(project1)
|
||||||
|
project4 = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test4"))
|
||||||
|
project4.emit_notification = MagicMock()
|
||||||
|
|
||||||
|
node10 = async_run(project3.add_node(compute, "test10", None, node_type="iou"))
|
||||||
|
node11 = async_run(project3.add_node(compute, "test11", None, node_type="iou"))
|
||||||
|
node12 = async_run(project3.add_node(compute, "test12", None, node_type="iou"))
|
||||||
|
|
||||||
|
assert node10.properties["application_id"] == 1
|
||||||
|
assert node11.properties["application_id"] == 2
|
||||||
|
assert node12.properties["application_id"] == 3
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_node_iou_with_multiple_projects_different_computes(async_run, controller):
|
||||||
|
"""
|
||||||
|
Test if an application ID is allocated for IOU nodes with different projects already opened
|
||||||
|
"""
|
||||||
|
compute1 = MagicMock()
|
||||||
|
compute1.id = "remote1"
|
||||||
|
compute2 = MagicMock()
|
||||||
|
compute2.id = "remote2"
|
||||||
|
project1 = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test1"))
|
||||||
|
project1.emit_notification = MagicMock()
|
||||||
|
project2 = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test2"))
|
||||||
|
project2.emit_notification = MagicMock()
|
||||||
|
response = MagicMock()
|
||||||
|
compute1.post = AsyncioMagicMock(return_value=response)
|
||||||
|
compute2.post = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node1 = async_run(project1.add_node(compute1, "test1", None, node_type="iou"))
|
||||||
|
node2 = async_run(project1.add_node(compute1, "test2", None, node_type="iou"))
|
||||||
|
|
||||||
|
node3 = async_run(project2.add_node(compute2, "test3", None, node_type="iou"))
|
||||||
|
node4 = async_run(project2.add_node(compute2, "test4", None, node_type="iou"))
|
||||||
|
|
||||||
|
assert node1.properties["application_id"] == 1
|
||||||
|
assert node2.properties["application_id"] == 2
|
||||||
|
|
||||||
|
assert node3.properties["application_id"] == 1
|
||||||
|
assert node4.properties["application_id"] == 2
|
||||||
|
|
||||||
|
node5 = async_run(project1.add_node(compute2, "test5", None, node_type="iou"))
|
||||||
|
node6 = async_run(project2.add_node(compute1, "test6", None, node_type="iou"))
|
||||||
|
|
||||||
|
assert node5.properties["application_id"] == 3
|
||||||
|
assert node6.properties["application_id"] == 4
|
||||||
|
|
||||||
|
|
||||||
|
def test_add_node_iou_no_id_available(async_run, controller):
|
||||||
|
"""
|
||||||
|
Test if an application ID is allocated for IOU nodes
|
||||||
|
"""
|
||||||
|
compute = MagicMock()
|
||||||
|
compute.id = "local"
|
||||||
|
project = async_run(controller.add_project(project_id=str(uuid.uuid4()), name="test"))
|
||||||
|
project.emit_notification = MagicMock()
|
||||||
|
response = MagicMock()
|
||||||
|
compute.post = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
with pytest.raises(aiohttp.web.HTTPConflict):
|
||||||
|
for i in range(1, 513):
|
||||||
|
prop = {"properties": {"application_id": i}}
|
||||||
|
project._nodes[i] = Node(project, compute, "Node{}".format(i), node_id=i, node_type="iou", **prop)
|
||||||
|
async_run(project.add_node(compute, "test1", None, node_type="iou"))
|
||||||
|
|
||||||
|
|
||||||
def test_add_node_from_template(async_run, controller):
|
def test_add_node_from_template(async_run, controller):
|
||||||
"""
|
"""
|
||||||
For a local server we send the project path
|
For a local server we send the project path
|
||||||
|
@ -42,7 +42,7 @@ def fake_iou_bin(images_dir):
|
|||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def base_params(tmpdir, fake_iou_bin):
|
def base_params(tmpdir, fake_iou_bin):
|
||||||
"""Return standard parameters"""
|
"""Return standard parameters"""
|
||||||
return {"name": "PC TEST 1", "path": "iou.bin"}
|
return {"application_id": 42, "name": "PC TEST 1", "path": "iou.bin"}
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
Loading…
Reference in New Issue
Block a user