1
0
mirror of https://github.com/GNS3/gns3-server synced 2025-06-27 18:32:39 +00:00

Merge remote-tracking branch 'origin/2.2' into 3.0

# Conflicts:
#	CHANGELOG
#	Dockerfile
#	README.md
#	gns3server/appliances/infix.gns3a
#	gns3server/compute/docker/docker_vm.py
#	gns3server/compute/qemu/qemu_vm.py
#	gns3server/controller/gns3vm/virtualbox_gns3_vm.py
#	gns3server/crash_report.py
#	gns3server/static/web-ui/index.html
#	gns3server/static/web-ui/main.9bcf455e62558dedfd48.js
#	gns3server/version.py
This commit is contained in:
grossmj 2025-04-21 20:13:31 +07:00
commit d9dcc2738d
No known key found for this signature in database
GPG Key ID: 1E7DD6DBB53FF3D7
25 changed files with 650 additions and 171 deletions

View File

@ -1,5 +1,19 @@
# Change Log
## 2.2.54 21/04/2025
* Bundle web-ui v2.2.54
* Add new method to find the IP address of a VBox GNS3 VM + allow NAT Network
* Add edk2-stable202502 UEFI firmwares and try to first use firmwares from the ovmf package if installed. Fixes #2494
* Try to detect swtpm and AppArmor issue. Ref https://github.com/GNS3/gns3-gui/issues/3725
* Fix Docker logs decoding. Ref #2522
* Add delay after starting a Docker container and adding connections in uBridge. Ref #2522
* Fix TypeError when reading Docker container logs. Ref #2522
* Replace "Docker hub" by "Docker repository" because it is possible to use different repositories
* Upgrade dependencies
* Improvements for remote-install.sh
## 3.0.4 25/02/2025
* Require minimum 8 characters for passwords

View File

@ -99,6 +99,12 @@ Alternatively, you can run the GNS3 server in a container
bash scripts/docker_dev_server.sh
```
#### use Docker Compose
``` {.bash}
docker compose up -d
```
### Running tests
First, install the development dependencies:

7
compose.yaml Normal file
View File

@ -0,0 +1,7 @@
services:
gen3-server:
build:
context: .
dockerfile: Dockerfile
ports:
- "8001:3080"

View File

@ -25,6 +25,14 @@
"options": "-cpu host -nographic"
},
"images": [
{
"filename": "AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2",
"version": "9.4",
"md5sum": "7c5040c044a989c524d40824cebb4a4d",
"filesize": 591724544,
"download_url": "https://vault.almalinux.org/9.4/cloud/x86_64/images/",
"direct_download_url": "https://vault.almalinux.org/9.4/cloud/x86_64/images/AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2"
},
{
"filename": "AlmaLinux-9-GenericCloud-9.2-20230513.x86_64.qcow2",
"version": "9.2",
@ -33,6 +41,14 @@
"download_url": "https://vault.almalinux.org/9.2/cloud/x86_64/images/",
"direct_download_url": "https://vault.almalinux.org/9.2/cloud/x86_64/images/AlmaLinux-9-GenericCloud-9.2-20230513.x86_64.qcow2"
},
{
"filename": "AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2",
"version": "8.9",
"md5sum": "1afc48c798960f0c6ebb65428c0ea973",
"filesize": 697434112,
"download_url": "https://vault.almalinux.org/8.9/cloud/x86_64/images/",
"direct_download_url": "https://vault.almalinux.org/8.9/cloud/x86_64/images/AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2"
},
{
"filename": "AlmaLinux-8-GenericCloud-8.8-20230524.x86_64.qcow2",
"version": "8.8",
@ -59,6 +75,13 @@
}
],
"versions": [
{
"name": "9.4",
"images": {
"hda_disk_image": "AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2",
"cdrom_image": "almalinux-cloud-init-data.iso"
}
},
{
"name": "9.2",
"images": {
@ -66,6 +89,13 @@
"cdrom_image": "almalinux-cloud-init-data.iso"
}
},
{
"name": "8.9",
"images": {
"hda_disk_image": "AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2",
"cdrom_image": "almalinux-cloud-init-data.iso"
}
},
{
"name": "8.8",
"images": {

View File

@ -29,23 +29,23 @@
},
"images": [
{
"filename": "vEOS-lab-4.33.1F.qcow2",
"version": "4.33.1F",
"md5sum": "8f662409c0732ed9f682edce63601e8a",
"filesize": 611909632,
"filename": "vEOS64-lab-4.33.2F.qcow2",
"version": "4.33.2F",
"md5sum": "fbe629a8342cd0b3b19566b9d7ef4f4f",
"filesize": 610992128,
"download_url": "https://www.arista.com/en/support/software-download"
},
{
"filename": "vEOS-lab-4.32.3M.qcow2",
"version": "4.32.3M",
"md5sum": "46fc46f5ed1da8752eed8396f08862f8",
"filesize": 605683712,
"filename": "vEOS64-lab-4.32.4.1M.qcow2",
"version": "4.32.4.1M",
"md5sum": "cd369b5ccfd87ccd83a34538681ba35f",
"filesize": 605159424,
"download_url": "https://www.arista.com/en/support/software-download"
},
{
"filename": "vEOS-lab-4.31.6M.qcow2",
"filename": "vEOS64-lab-4.31.6M.qcow2",
"version": "4.31.6M",
"md5sum": "7410110b77472f058322ec4681f8a356",
"md5sum": "02fbd929de9416e1096cd2454507d6ce",
"filesize": 590479360,
"download_url": "https://www.arista.com/en/support/software-download"
},
@ -59,24 +59,24 @@
],
"versions": [
{
"name": "4.33.1F",
"name": "4.33.2F",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
"hdb_disk_image": "vEOS-lab-4.33.1F.qcow2"
"hdb_disk_image": "vEOS64-lab-4.33.2F.qcow2"
}
},
{
"name": "4.32.3M",
"name": "4.32.4.1M",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
"hdb_disk_image": "vEOS-lab-4.32.3M.qcow2"
"hdb_disk_image": "vEOS64-lab-4.32.4.1M.qcow2"
}
},
{
"name": "4.31.6M",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
"hdb_disk_image": "vEOS-lab-4.31.6M.qcow2"
"hdb_disk_image": "vEOS64-lab-4.31.6M.qcow2"
}
}
]

View File

@ -32,6 +32,27 @@
"process_priority": "normal"
},
"images": [
{
"filename": "arubaoscx-disk-image-genericx86-p4-20241115202521.vmdk",
"version": "10.15.0005",
"md5sum": "69b3675391c04c1a6e0fd0bf8d8bc2d9",
"filesize": 420049408,
"download_url": "https://networkingsupport.hpe.com"
},
{
"filename": "arubaoscx-disk-image-genericx86-p4-20240731173624.vmdk",
"version": "10.14.1000",
"md5sum": "01d6932fbc9c46180a4f41aee9e98301",
"filesize": 412140544,
"download_url": "https://networkingsupport.hpe.com"
},
{
"filename": "arubaoscx-disk-image-genericx86-p4-20240528190307.vmdk",
"version": "10.14.0001",
"md5sum": "83902dbaa74c37cdda3f066f79431933",
"filesize": 401023488,
"download_url": "https://networkingsupport.hpe.com"
},
{
"filename": "arubaoscx-disk-image-genericx86-p4-20240129204649.vmdk",
"version": "10.13.1000",
@ -139,6 +160,24 @@
}
],
"versions": [
{
"name": "10.15.0005",
"images": {
"hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20241115202521.vmdk"
}
},
{
"name": "10.14.1000",
"images": {
"hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20240731173624.vmdk"
}
},
{
"name": "10.14.0001",
"images": {
"hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20240528190307.vmdk"
}
},
{
"name": "10.13.1000",
"images": {

View File

@ -0,0 +1,63 @@
{
"appliance_id": "9e934470-d898-4289-a5ed-50af094e629e",
"name": "Asterfusion vAsterNOS campus",
"category": "multilayer_switch",
"description": "AsterNOS is the core technology of Asterfusions one-stop SONiC turnkey solution designed for cloud, enterprise, and AI-driven scenarios. AsterNOS v5.2 Campus is specifically designed for traditional campus networks, offering comprehensive L2/L3 capabilities suitable for various campus scenarios such as schools, office buildings, and hospitals. This version supports a fully cloud-integrated Layer 3 network architecture, providing rich routing and switching functionalities to ensure high-performance operation and ease of maintenance. It can also be deployed in the GNS3 simulation environment to experience a complete All-Layer 3 Cloud-Campus network. AsterNOS v6.0 Campus builds upon the L2/L3 features of v5.2 (including ACL, MSTP, QinQ, IGMP Snooping, OSPF/BGP, etc.) and further enhances support for advanced technologies such as MPLS L2VPN/L3VPN and PTP. This version is ideal for enterprises and campus networks requiring high-performance multi-service transport, supporting cross-domain connectivity and providing nanosecond-level time synchronization. It is well-suited for applications with stringent time accuracy requirements, such as financial trading, industrial automation, and smart manufacturing. NOTICE: This appliance file is a virtualized version of AsterNOS and is intended to be used only to experience the basic functionality and industry standard CLI (Klish), not for official software testing. For more information about AsterNOS commercial version, please feel free to contact us via Email: bd@cloudswit.ch",
"vendor_name": "Asterfusion",
"vendor_url": "https://cloudswit.ch/",
"vendor_logo_url": "https://raw.githubusercontent.com/GNS3/gns3-registry/master/vendor-logos/asterfusion.png",
"documentation_url": "https://help.cloudswit.ch/portal/en/kb/articles/vasternos",
"product_name": "vAsterNOS",
"product_url": "https://cloudswit.ch/",
"registry_version": 4,
"status": "experimental",
"maintainer": "Asterfusion",
"maintainer_email": "bd@cloudswit.ch",
"usage": "The login is admin and the password is asteros",
"symbol": "asterfusion-vAsterNOS.svg",
"first_port_name": "eth0",
"port_name_format": "Ethernet{0}",
"qemu": {
"adapter_type": "e1000",
"adapters": 10,
"ram": 4096,
"cpus": 4,
"hda_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "telnet",
"boot_priority": "d",
"kvm": "require"
},
"images": [
{
"filename": "vAsterNOS-V6.1R002.img",
"version": "6.1-2",
"md5sum": "003e6329489a617fbab5783504559d26",
"filesize": 2106851328,
"download_url": "https://drive.cloudswitch.io/external/c224501f36e6003767b30112bd44d92476f81f442cf47f8027a6f4f7e4227995"
},
{
"filename": "vAsterNOS-V5.2R012P01.img",
"version": "5.2-12-1",
"md5sum": "d18c0cfd786607ccc6dc1069a8f40465",
"filesize": 2823290880,
"download_url": "https://drive.cloudswitch.io/external/d29f6d0a6c8322fea42b3c08e95113d026b8ec6aafbe29193c338333077f3da7"
}
],
"versions": [
{
"name": "6.1-2",
"images": {
"hda_disk_image": "vAsterNOS-V6.1R002.img"
}
},
{
"name": "5.2-12-1",
"images": {
"hda_disk_image": "vAsterNOS-V5.2R012P01.img"
}
}
]
}

View File

@ -13,7 +13,7 @@
"status": "experimental",
"maintainer": "Asterfusion",
"maintainer_email": "bd@cloudswit.ch",
"usage": "The login is admin, passwd asteros",
"usage": "The login is admin and the password is asteros",
"symbol": "asterfusion-vAsterNOS.svg",
"first_port_name": "eth0",
"port_name_format": "Ethernet{0}",

View File

@ -27,44 +27,28 @@
},
"images": [
{
"filename": "CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2",
"version": "Stream-9 (20230704.1)",
"md5sum": "e04511e019325a97837edd9eafe02b48",
"filesize": 1087868416,
"filename": "CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2",
"version": "Stream-10 (20250331.0)",
"md5sum": "776033371ca346001dd6390f0cbaf0d0",
"filesize": 952041472,
"download_url": "https://cloud.centos.org/centos/10-stream/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2"
},
{
"filename": "CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2",
"version": "Stream-9 (20250331.0)",
"md5sum": "4aaeddc6ca497065522c75a7471f9bfd",
"filesize": 1250625536,
"download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2"
"direct_download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2"
},
{
"filename": "CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2",
"version": "Stream-8 (20230710.0)",
"md5sum": "83e02ce98c29753c86fb7be7d802aa75",
"filesize": 1676164096,
"filename": "CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2",
"version": "Stream-8 (20240603.0)",
"md5sum": "77f3c9650785b8e977209796e09ee33e",
"filesize": 2003698688,
"download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2"
},
{
"filename": "CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2",
"version": "8.4 (2105)",
"md5sum": "032eed270415526546eac07628905a62",
"filesize": 1309652992,
"download_url": "https://cloud.centos.org/centos/8/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2"
},
{
"filename": "CentOS-7-x86_64-GenericCloud-2111.qcow2",
"version": "7 (2111)",
"md5sum": "730b8662695831670721c8245be61dac",
"filesize": 897384448,
"download_url": "https://cloud.centos.org/centos/7/images",
"direct_download_url": "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2111.qcow2"
},
{
"filename": "CentOS-7-x86_64-GenericCloud-1809.qcow2",
"version": "7 (1809)",
"md5sum": "da79108d1324b27bd1759362b82fbe40",
"filesize": 914948096,
"download_url": "https://cloud.centos.org/centos/7/images",
"direct_download_url": "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2"
"direct_download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2"
},
{
"filename": "centos-cloud-init-data.iso",
@ -77,37 +61,23 @@
],
"versions": [
{
"name": "Stream-9 (20230704.1)",
"name": "Stream-10 (20250331.0)",
"images": {
"hda_disk_image": "CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2",
"hda_disk_image": "CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "Stream-8 (20230710.0)",
"name": "Stream-9 (20250331.0)",
"images": {
"hda_disk_image": "CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2",
"hda_disk_image": "CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "8.4 (2105)",
"name": "Stream-8 (20240603.0)",
"images": {
"hda_disk_image": "CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "7 (2111)",
"images": {
"hda_disk_image": "CentOS-7-x86_64-GenericCloud-2111.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "7 (1809)",
"images": {
"hda_disk_image": "CentOS-7-x86_64-GenericCloud-1809.qcow2",
"hda_disk_image": "CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
}

View File

@ -30,6 +30,13 @@
"images": [
{
"filename": "EXOS-VM_32.7.2.19.qcow2",
"version": "32.7.2.19",
"md5sum": "eba580a2e18d2a9cc972c9ece8917ea8",
"filesize": 236847104,
"direct_download_url": "https://akamai-ep.extremenetworks.com/Extreme_P/github-en/Virtual_EXOS/EXOS-VM_32.7.2.19.qcow2"
},
{
"filename": "EXOS-VM_v32.6.3.126.qcow2",
"version": "32.6.3.126",
"md5sum": "5856b6c427bd605fe1c7adb6ee6b2659",
@ -41,6 +48,12 @@
"versions": [
{
"name": "32.7.2.19",
"images": {
"hda_disk_image": "EXOS-VM_32.7.2.19.qcow2"
}
},
{
"name": "32.6.3.126",
"images": {

View File

@ -26,6 +26,22 @@
"options": "-nographic"
},
"images": [
{
"filename": "Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2",
"version": "41-1.4",
"md5sum": "8efc9edc04f38775de72ce067166b2a1",
"filesize": 491716608,
"download_url": "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images",
"direct_download_url": "https://fedora.mirrorservice.org/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
},
{
"filename": "Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2",
"version": "40-1.14",
"md5sum": "3eed4b1a9de35208ed30d9bb72c1522d",
"filesize": 397475840,
"download_url": "https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images",
"direct_download_url": "https://fedora.mirrorservice.org/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
},
{
"filename": "Fedora-Cloud-Base-39-1.5.x86_64.qcow2",
"version": "39-1.5",
@ -52,6 +68,20 @@
}
],
"versions": [
{
"name": "41-1.4",
"images": {
"hda_disk_image": "Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2",
"cdrom_image": "fedora-cloud-init-data.iso"
}
},
{
"name": "40-1.14",
"images": {
"hda_disk_image": "Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2",
"cdrom_image": "fedora-cloud-init-data.iso"
}
},
{
"name": "39-1.5",
"images": {

View File

@ -53,10 +53,39 @@
"version": "25.01.0",
"download_url": "https://github.com/kernelkit/infix/releases/download/v25.01.0/infix-x86_64-25.01.0.tar.gz",
"compression": "gzip"
},
{
"filename": "infix-x86_64-disk-25.02.0.img",
"filesize": 536870912,
"md5sum": "8e29474c97df3486eb063a8af5043f50",
"version": "25.02.0",
"download_url": "https://github.com/kernelkit/infix/releases/download/v25.02.0/infix-x86_64-25.02.0.tar.gz",
"compression": "gzip"
},
{
"filename": "infix-x86_64-disk-25.03.0.img",
"filesize": 536870912,
"md5sum": "5e1ed1081cd1673bfed4a9b5b1c58e08",
"version": "25.03.0",
"download_url": "https://github.com/kernelkit/infix/releases/download/v25.03.0/infix-x86_64-25.03.0.tar.gz",
"compression": "gzip"
}
],
"versions": [
{
"name": "25.03.0",
"images": {
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "infix-x86_64-disk-25.03.0.img"
}
},
{
"name": "25.02.0",
"images": {
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "infix-x86_64-disk-25.02.0.img"
}
},
{
"name": "25.01.0",
"images": {

View File

@ -0,0 +1,75 @@
{
"appliance_id": "12394e0d-9ac5-4da5-8e91-94a462536b61",
"name": "vJunos-router",
"category": "router",
"description": "vJunos-router",
"vendor_name": "Juniper",
"vendor_url": "https://www.juniper.net",
"documentation_url": "https://www.juniper.net/documentation/product/us/en/vjunos-router/",
"product_name": "vJunos Router",
"registry_version": 6,
"status": "stable",
"availability": "free",
"maintainer": "AAm-kun",
"maintainer_email": "github@sugarpapa.mozmail.com",
"usage": "GNS3 SHOULD be a baremetal installation. Using the GNS3 VM MIGHT result in unwanted issues. Default user is root. No password is needed.",
"symbol": "juniper-vmx.svg",
"first_port_name": "ge-0/0/0",
"port_name_format": "ge-0/0/{port0}",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 17,
"ram": 5120,
"cpus": 4,
"hda_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "telnet",
"kvm": "require",
"options": "-serial mon:stdio -nographic -smbios type=1,product=VM-VMX,family=lab -cpu host",
"on_close": "power_off"
},
"images": [
{
"filename": "vJunos-router-24.2R1-S2.qcow2",
"version": "24.2R1-S2",
"md5sum": "dd906b4d19463e22f3e1a297ff1a7464",
"filesize": 3675783168,
"download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
},
{
"filename": "vJunos-router-23.4R2-S2.1.qcow2",
"version": "23.4R2-S2.1",
"md5sum": "e25f5acdfc6c076d0023fd8289bcdd89",
"filesize": 3644063744,
"download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
},
{
"filename": "vJunos-router-23.2R1.15.qcow2",
"version": "23.2R1.15",
"md5sum": "18670fb67633822697fdd3cf982e7eb1",
"filesize": 3653566464,
"download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
}
],
"versions": [
{
"images": {
"hda_disk_image": "vJunos-router-24.2R1-S2.qcow2"
},
"name": "24.2R1-S2"
},
{
"images": {
"hda_disk_image": "vJunos-router-23.4R2-S2.1.qcow2"
},
"name": "23.4R2-S2.1"
},
{
"images": {
"hda_disk_image": "vJunos-router-23.2R1.15.qcow2"
},
"name": "23.2R1.15"
}
]
}

View File

@ -0,0 +1,44 @@
{
"appliance_id": "bb9ff73a-701e-40e8-b68a-6a6efeb04e99",
"name": "NethSecurity",
"category": "firewall",
"description": "NethSecurity is an Unified Threat Management (UTM) solution that provides a comprehensive suite of security features, including firewall, content filtering, deep packet inspection (DPI) using Netifyd, Dedalo hotspot, OpenVPN, and an optional remote controller. It is designed to be easy to install and configure, making it a good choice for both small and medium-sized businesses (SMBs) as well as enterprise organizations.",
"vendor_name": "Nethesis",
"vendor_url": "https://www.nethesis.it/",
"documentation_url": "https://docs.nethsecurity.org/en/latest/",
"product_name": "NethSecurity",
"product_url": "https://nethsecurity.org/",
"registry_version": 4,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Ethernet0 is the LAN link, Ethernet1 the WAN link. The default username is root and the password is Nethesis,1234",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 2,
"ram": 1024,
"hda_disk_interface": "scsi",
"arch": "x86_64",
"console_type": "telnet",
"kvm": "allow"
},
"images": [
{
"filename": "nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img",
"version": "8-24.10.0-ns.1.5.1",
"md5sum": "be670218effca1b86dac6b8d95012791",
"filesize": 331644416,
"download_url": "https://nethsecurity.org/download",
"direct_download_url": "https://updates.nethsecurity.nethserver.org/stable/8-24.10.0-ns.1.5.1/targets/x86/64/nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img.gz",
"compression": "gzip"
}
],
"versions": [
{
"name": "8-24.10.0-ns.1.5.1",
"images": {
"hda_disk_image": "nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img"
}
}
]
}

View File

@ -26,6 +26,14 @@
"options": "-cpu host -nographic"
},
"images": [
{
"filename": "OL9U5_x86_64-kvm-b259.qcow2",
"version": "9.5",
"md5sum": "05e9b62c408ab49a02d6833fc683d1ad",
"filesize": 652935168,
"download_url": "https://yum.oracle.com/oracle-linux-templates.html",
"direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL9/u5/x86_64/OL9U5_x86_64-kvm-b259.qcow2"
},
{
"filename": "OL9U2_x86_64-kvm-b197.qcow",
"version": "9.2",
@ -42,6 +50,14 @@
"download_url": "https://yum.oracle.com/oracle-linux-templates.html",
"direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL9/u1/x86_64/OL9U1_x86_64-kvm-b158.qcow"
},
{
"filename": "OL8U10_x86_64-kvm-b258.qcow2",
"version": "8.10",
"md5sum": "bb07581af5122515b6822595ded5deef",
"filesize": 1251672064,
"download_url": "https://yum.oracle.com/oracle-linux-templates.html",
"direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL8/u10/x86_64/OL8U10_x86_64-kvm-b258.qcow2"
},
{
"filename": "OL8U8_x86_64-kvm-b198.qcow",
"version": "8.8",
@ -76,7 +92,14 @@
}
],
"versions": [
{
{
"name": "9.5",
"images": {
"hda_disk_image": "OL9U5_x86_64-kvm-b259.qcow2",
"cdrom_image": "oracle-cloud-init-data.iso"
}
},
{
"name": "9.2",
"images": {
"hda_disk_image": "OL9U2_x86_64-kvm-b197.qcow",
@ -90,6 +113,13 @@
"cdrom_image": "oracle-cloud-init-data.iso"
}
},
{
"name": "8.10",
"images": {
"hda_disk_image": "OL8U10_x86_64-kvm-b258.qcow2",
"cdrom_image": "oracle-cloud-init-data.iso"
}
},
{
"name": "8.8",
"images": {

View File

@ -13,7 +13,7 @@
"availability": "service-contract",
"maintainer": "Da-Geek",
"maintainer_email": "dageek@dageeks-geeks.gg",
"usage": "You should download Red Hat Enterprise Linux KVM Guest Image from https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.3/x86_64/product-software attach/customize rhel-cloud-init.iso and start.\nusername: cloud-user\npassword: redhat",
"usage": "You should download Red Hat Enterprise Linux KVM Guest Image from https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.5/x86_64/product-software attach/customize rhel-cloud-init.iso and start.\nusername: cloud-user\npassword: redhat",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 1,
@ -26,6 +26,20 @@
"options": "-cpu host -nographic"
},
"images": [
{
"filename": "rhel-9.5-x86_64-kvm.qcow2",
"version": "9.5",
"md5sum": "8174396d5cb47727c59dd04dd9a05418",
"filesize": 974389248,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.5/x86_64/product-software"
},
{
"filename": "rhel-9.4-x86_64-kvm.qcow2",
"version": "9.4",
"md5sum": "77a2ca9a4cb0448260e04f0d2ebf9807",
"filesize": 957218816,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.4/x86_64/product-software"
},
{
"filename": "rhel-9.3-x86_64-kvm.qcow2",
"version": "9.3",
@ -54,6 +68,20 @@
"filesize": 696582144,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/9.0/x86_64/product-software"
},
{
"filename": "rhel-8.10-x86_64-kvm.qcow2",
"version": "8.10",
"md5sum": "5fda99fcab47e3b235c6ccdb6e80d362",
"filesize": 1065091072,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.10/x86_64/product-software"
},
{
"filename": "rhel-8.9-x86_64-kvm.qcow2",
"version": "8.9",
"md5sum": "23295fe508678cbdebfbdbd41ef6e6e2",
"filesize": 971833344,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.9/x86_64/product-software"
},
{
"filename": "rhel-8.8-x86_64-kvm.qcow2",
"version": "8.8",
@ -119,6 +147,20 @@
}
],
"versions": [
{
"name": "9.5",
"images": {
"hda_disk_image": "rhel-9.5-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "9.4",
"images": {
"hda_disk_image": "rhel-9.4-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "9.3",
"images": {
@ -147,6 +189,20 @@
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "8.10",
"images": {
"hda_disk_image": "rhel-8.10-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "8.9",
"images": {
"hda_disk_image": "rhel-8.9-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "8.8",
"images": {

View File

@ -26,6 +26,14 @@
"options": "-nographic -cpu host"
},
"images": [
{
"filename": "Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2",
"version": "9.5",
"md5sum": "880eccf788301bb9f34669faebe09276",
"filesize": 609812480,
"download_url": "https://download.rockylinux.org/pub/rocky/9/images/x86_64/",
"direct_download_url": "https://download.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2"
},
{
"filename": "Rocky-9-GenericCloud-Base-9.3-20231113.0.x86_64.qcow2",
"version": "9.3",
@ -68,6 +76,13 @@
}
],
"versions": [
{
"name": "9.5",
"images": {
"hda_disk_image": "Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2",
"cdrom_image": "rocky-cloud-init-data.iso"
}
},
{
"name": "9.3",
"images": {

View File

@ -175,11 +175,10 @@ class Docker(BaseManager):
response = await self.http_query(method, path, data=data, params=params)
body = await response.read()
response.close()
if body and len(body):
if response.headers.get('CONTENT-TYPE') == 'application/json':
body = json.loads(body.decode("utf-8"))
else:
body = body.decode("utf-8")
if response.headers.get('CONTENT-TYPE') == 'application/json':
body = json.loads(body.decode("utf-8", errors="ignore"))
else:
body = body.decode("utf-8", errors="ignore")
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
return body

View File

@ -617,6 +617,7 @@ class DockerVM(BaseNode):
await self._clean_servers()
await self.manager.query("POST", f"containers/{self._cid}/start")
await asyncio.sleep(0.5) # give the Docker container some time to start
self._namespace = await self._get_namespace()
await self._start_ubridge(require_privileged_access=True)

View File

@ -32,6 +32,7 @@ import subprocess
import time
import json
import shlex
import psutil
from gns3server.utils import parse_version
from gns3server.utils.asyncio import subprocess_check_output, cancellable_wait_run_in_executor
@ -1225,6 +1226,21 @@ class QemuVM(BaseNode):
except OSError as e:
raise QemuError(f"Could not start Telnet QEMU console {e}\n")
def _find_partition_for_path(self, path):
"""
Finds the disk partition for a given path.
"""
path = os.path.abspath(path)
partitions = psutil.disk_partitions()
# find the partition with the longest matching mount point
matching_partition = None
for partition in partitions:
if path.startswith(partition.mountpoint):
if matching_partition is None or len(partition.mountpoint) > len(matching_partition.mountpoint):
matching_partition = partition
return matching_partition
async def _termination_callback(self, returncode):
"""
Called when the process has stopped.
@ -1236,9 +1252,19 @@ class QemuVM(BaseNode):
log.info("QEMU process has stopped, return code: %d", returncode)
await self.stop()
if returncode != 0:
qemu_stdout = self.read_stdout()
# additional permissions need to be configured for swtpm in AppArmor if the working dir
# is located on a different partition than the partition for the root directory
if "TPM result for CMD_INIT" in qemu_stdout:
partition = self._find_partition_for_path(self.project.path)
if partition and partition.mountpoint != "/":
qemu_stdout += "\nTPM error: the project directory is not on the same partition as the root directory which can be a problem when using AppArmor.\n" \
"Please try to execute the following commands on the server:\n\n" \
"echo 'owner {}/** rwk,' | sudo tee /etc/apparmor.d/local/usr.bin.swtpm > /dev/null\n" \
"sudo service apparmor restart".format(os.path.dirname(self.project.path))
self.project.emit(
"log.error",
{"message": f"QEMU process has stopped, return code: {returncode}\n{self.read_stdout()}"},
{"message": f"QEMU process has stopped, return code: {returncode}\n{qemu_stdout}"},
)
async def stop(self):
@ -2287,19 +2313,42 @@ class QemuVM(BaseNode):
else:
raise QemuError(f"bios image '{self._bios_image}' is not accessible")
options.extend(["-bios", self._bios_image.replace(",", ",,")])
elif self._uefi:
# get the OVMF firmware from the images directory
ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE.fd")
old_ovmf_vars_path = os.path.join(self.working_dir, "OVMF_VARS.fd")
if os.path.exists(old_ovmf_vars_path):
# the node has its own UEFI variables store already, we must also use the old UEFI firmware
ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE.fd")
else:
system_ovmf_firmware_path = "/usr/share/OVMF/OVMF_CODE_4M.fd"
if os.path.exists(system_ovmf_firmware_path):
ovmf_firmware_path = system_ovmf_firmware_path
else:
# otherwise, get the UEFI firmware from the images directory
ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE_4M.fd")
log.info("Configuring UEFI boot mode using OVMF file: '{}'".format(ovmf_firmware_path))
options.extend(["-drive", "if=pflash,format=raw,readonly,file={}".format(ovmf_firmware_path)])
# try to use the UEFI variables store from the system first
system_ovmf_vars_path = "/usr/share/OVMF/OVMF_VARS_4M.fd"
if os.path.exists(system_ovmf_vars_path):
ovmf_vars_path = system_ovmf_vars_path
else:
# otherwise, get the UEFI variables store from the images directory
ovmf_vars_path = self.manager.get_abs_image_path("OVMF_VARS_4M.fd")
# the node should have its own copy of OVMF_VARS.fd (the UEFI variables store)
ovmf_vars_node_path = os.path.join(self.working_dir, "OVMF_VARS.fd")
if not os.path.exists(ovmf_vars_node_path):
try:
shutil.copyfile(self.manager.get_abs_image_path("OVMF_VARS.fd"), ovmf_vars_node_path)
except OSError as e:
raise QemuError("Cannot copy OVMF_VARS.fd file to the node working directory: {}".format(e))
if os.path.exists(old_ovmf_vars_path):
ovmf_vars_node_path = old_ovmf_vars_path
else:
ovmf_vars_node_path = os.path.join(self.working_dir, "OVMF_VARS_4M.fd")
if not os.path.exists(ovmf_vars_node_path):
try:
shutil.copyfile(ovmf_vars_path, ovmf_vars_node_path)
except OSError as e:
raise QemuError("Cannot copy OVMF_VARS_4M.fd file to the node working directory: {}".format(e))
options.extend(["-drive", "if=pflash,format=raw,file={}".format(ovmf_vars_node_path)])
return options

View File

@ -249,6 +249,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
return True
return False
async def list(self):
"""
List all VirtualBox VMs
@ -269,8 +270,8 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
# get a NAT interface number
nat_interface_number = await self._look_for_interface("nat")
if nat_interface_number < 0:
raise GNS3VMError(f'VM "{self.vmname}" must have a NAT interface configured in order to start')
if nat_interface_number < 0 and await self._look_for_interface("natnetwork") < 0:
raise GNS3VMError(f'VM "{self.vmname}" must have a NAT interface or NAT Network configured in order to start')
if sys.platform.startswith("darwin") and parse_version(self._system_properties["API version"]) >= parse_version("7_0"):
# VirtualBox 7.0+ on macOS requires a host-only network interface
@ -339,42 +340,68 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
elif vm_state == "paused":
args = [self._vmname, "resume"]
await self._execute("controlvm", args)
ip_address = "127.0.0.1"
try:
# get a random port on localhost
with socket.socket() as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((ip_address, 0))
api_port = s.getsockname()[1]
except OSError as e:
raise GNS3VMError(f"Error while getting random port: {e}")
if await self._check_vbox_port_forwarding():
# delete the GNS3VM NAT port forwarding rule if it exists
log.info(f"Removing GNS3VM NAT port forwarding rule from interface {nat_interface_number}")
await self._execute("controlvm", [self._vmname, f"natpf{nat_interface_number}", "delete", "GNS3VM"])
log.info("Retrieving IP address from GNS3 VM...")
ip = await self._get_ip_from_guest_property()
if ip:
self.ip_address = ip
else:
# if we can't get the IP address from the guest property, we try to get it from the GNS3 server (a NAT interface is required)
if nat_interface_number < 0:
raise GNS3VMError("Could not find guest IP address for {}".format(self.vmname))
log.warning("Could not find IP address from guest property, trying to get it from GNS3 server")
ip_address = "127.0.0.1"
try:
# get a random port on localhost
with socket.socket() as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((ip_address, 0))
api_port = s.getsockname()[1]
except OSError as e:
raise GNS3VMError("Error while getting random port: {}".format(e))
# add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to the port in the VM
log.info(f"Adding GNS3VM NAT port forwarding rule with port {api_port} to interface {nat_interface_number}")
await self._execute(
"controlvm",
[self._vmname, f"natpf{nat_interface_number}", f"GNS3VM,tcp,{ip_address},{api_port},,{self.port}"],
)
if await self._check_vbox_port_forwarding():
# delete the GNS3VM NAT port forwarding rule if it exists
log.info("Removing GNS3VM NAT port forwarding rule from interface {}".format(nat_interface_number))
await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"])
self.ip_address = await self._get_ip(interface_number, api_port)
log.info("GNS3 VM has been started with IP {}".format(self.ip_address))
# add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to the port in the VM
log.info("Adding GNS3VM NAT port forwarding rule with port {} to interface {}".format(api_port, nat_interface_number))
await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number),
"GNS3VM,tcp,{},{},,{}".format(ip_address, api_port, self.port)])
self.ip_address = await self._get_ip_from_server(interface_number, api_port)
log.info("GNS3 VM has been started with IP '{}'".format(self.ip_address))
self.running = True
async def _get_ip(self, hostonly_interface_number, api_port):
async def _get_ip_from_guest_property(self):
"""
Get the IP from VirtualBox.
Get the IP from VirtualBox by retrieving the guest property (Guest Additions must be installed).
"""
remaining_try = 180 # try for 3 minutes
while remaining_try > 0:
result = await self._execute("guestproperty", ["get", self._vmname, "/VirtualBox/GuestInfo/Net/0/V4/IP"])
for info in result.splitlines():
if ':' in info:
name, value = info.split(':', 1)
if name == "Value":
return value.strip()
remaining_try -= 1
await asyncio.sleep(1)
return None
async def _get_ip_from_server(self, hostonly_interface_number, api_port):
"""
Get the IP from VirtualBox by sending a request to the GNS3 server.
Due to VirtualBox limitation the only way is to send request each
second to a GNS3 endpoint in order to get the list of the interfaces and
their IP and after that match it with VirtualBox host only.
"""
remaining_try = 300
remaining_try = 180 # try for 3 minutes
while remaining_try > 0:
try:
async with HTTPClient.get(f"http://127.0.0.1:{api_port}/v3/compute/network/interfaces") as resp:

Binary file not shown.

Binary file not shown.

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (C) 2024 GNS3 Technologies Inc.
# Copyright (C) 2025 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -28,6 +28,7 @@ function help {
echo "--with-i386-repository: Add the i386 repositories required by IOU i386 images. This is not needed for recent x86_64 IOU images." >&2
echo "--with-welcome: Install GNS3-VM welcome.py script" >&2
echo "--without-kvm: Disable KVM, required if system do not support it (limitation in some hypervisors and cloud providers). Warning: only disable KVM if strictly necessary as this will degrade performance" >&2
echo "--without-system-upgrade: Do not upgrade the system" >&2
echo "--unstable: Use the GNS3 unstable repository" >&2
echo "--custom-repository <repository>: Use a custom repository" >&2
echo "--help: This help" >&2
@ -38,6 +39,13 @@ function log {
}
lsb_release -d | grep "LTS" > /dev/null
if [ "$EUID" -ne 0 ]
then
echo "This script must be run as root"
exit 1
fi
if [ $? != 0 ]
then
echo "This script can only be run on a Linux Ubuntu LTS release"
@ -52,6 +60,7 @@ USE_VPN=0
USE_IOU=0
I386_REPO=0
DISABLE_KVM=0
NO_SYSTEM_UPGRADE=0
WELCOME_SETUP=0
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,with-welcome,without-kvm,unstable,custom-repository:,help -n 'gns3-remote-install.sh' -- "$@"`
@ -85,6 +94,10 @@ while true ; do
DISABLE_KVM=1
shift
;;
--without-system-upgrade)
NO_SYSTEM_UPGRADE=1
shift
;;
--unstable)
REPOSITORY="unstable"
shift
@ -102,68 +115,33 @@ while true ; do
esac
done
if [ "$REPOSITORY" == "ppa-v3" ]
then
if ! python3 -c 'import sys; assert sys.version_info >= (3,9)' > /dev/null 2>&1; then
echo "GNS3 version >= 3.0 requires Python 3.9 or later"
exit 1
fi
fi
# Exit in case of error
set -e
export DEBIAN_FRONTEND="noninteractive"
UBUNTU_CODENAME=`lsb_release -c -s`
log "Add GNS3 repository"
log "Updating system packages, installing curl and software-properties-common"
apt update
apt install -y curl software-properties-common
if [ ! -f "/etc/apt/sources.list.d/ubuntu.sources" ]
if [ $NO_SYSTEM_UPGRADE == 0 ]
then
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B83AAABFFBD82D21B543C8EA86C22C2EC6A24D7F
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
deb http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
EOFLIST
else
cat <<EOFLIST > /etc/apt/sources.list.d/gns3-ppa.sources
Types: deb
URIs: https://ppa.launchpadcontent.net/gns3/$REPOSITORY/ubuntu/
Suites: $UBUNTU_CODENAME
Components: main
Signed-By:
-----BEGIN PGP PUBLIC KEY BLOCK-----
.
mQINBGY0jSYBEADMH5CvX8ZVX4XzAxdQ2CmF7t86IjFnQgtI18Q19nVnpKEGNyB5
pgotDMzkhGnxuhvz2zE9PZhd8VgkodB81V607d/Dy8FfI7t1BVQhLvJDx0H/q6RE
n2y9WxiuBzTHitoQTCTY3hjcr7AUNFFI64gUqwbkQmYbCWWsYOlDpRSkWKg8P8WK
08RetwTI0Iwoz8j+BkbPlubuImiVfh1TeH23FBuGIwL1r1Cps0wel6JAi+jaU9WG
j8MX3mQYFTAtk7f1lRubqWosB/A4xIu609pF1e1tAkWAGltYAeoFhDn+PfA9KgmV
fvxfVR7zmxp31imTJgXgUFCz+H0Xb3vpve8XsrsHZUP6StJ3+6cFXjNBV6PuO1FT
JWp86a+AYHg7+sUWcoJRZPCTbb/pOcCa0q1ch5qcLkiYEOGK+pYhbPptq6y8IsJW
N6EDNCVvVqVyTJy14FZWoOqxcpUiDOQ+su28j8++V+PMo+FO3SQqwEZwJXk7LF/4
wUipDCUh/WNjDqqgmYLoO+ttiiJPbEw3jtbO+zopbzYpyEC1f06Nz7uz1daOIN3J
etFPzSqWCE7Eq+hoVmAAm8gVmQir3rFJbIGBAvAaOLQEOkUlOlS7AezqUhdyhGER
Zrvc3eNqxY7G61SEHipEJ7/hpcDq0RRWCXHsoQqyHaPje826n2pGkJYt4QARAQAB
tBZMYXVuY2hwYWQgUFBBIGZvciBHTlMziQJOBBMBCgA4FiEEuDqqv/vYLSG1Q8jq
hsIsLsaiTX8FAmY0jSYCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQhsIs
LsaiTX9z9xAAq1uHmRgfYmELS0cr2YEnTWHPVE6s95Qx+0cr5zzNeWfmoAS9uSyl
z8bCm+Q2ZapzU/nOtkozU+RGjgcRRTKMVTyS0PjFX22965xHCRWnw79fPyrYouUw
H2cAT8WSGYEeVAbqhJSns0RnDpXuaxmWE1wT+iitY/QAjeXo22Z2mjv2bFTitKbY
hZbE5Eu8Olc5YHCVI0ofq84/Ii921iMibU6EDMmm/iOnMK2uHGbC59t0YG8Rm7mK
uk6+TpxOULjFeCWSkF2Dr33m8JQmtYZuFUnmqWPuSdBo3J0O1b0qTg+EP9FbDAtj
CoEKT/V1ccMBd3r77o23CGsvpV7bzEU60A+NsU8vb/AkOmouYiF+qaYDFGZDfWhK
p1HFmd1kt7YdgxsmoKoFJkbt1bBdcFJLV0Jcad5sfArg2aFDYf2giMxAw4iQ+9jc
MCuwWxiqWicPqJ5erNTzVfayBkjuZqBDVTO9wmG3DL4QmNosIBS7kq+NGrT8Ql22
FqYfdIZJDlKVtJKHK8eKJSB0dbFawV2h5p/CvQlIm6nthg5FzOyjvCkPkvxvveq+
SuNxFEscumFCgo7j7RMWHW9HWK3TUvMmYLMVjxL8kXyCwknp9GklBQHA/IPxRa/2
eFqqkmVbmNAoMzzw5wqa/BPcFEbgn+E+TFyZqbzp0F4QzPJZFkz16SA=
=xnj5
-----END PGP PUBLIC KEY BLOCK-----
EOFLIST
log "Upgrading system packages"
apt upgrade --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
fi
log "Updating system packages and installing curl"
apt update
apt install -y curl
log "Upgrading packages"
apt upgrade --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
log "Adding GNS3 repository ppa:gns3/$REPOSITORY"
# use sudo -E to preserve proxy config
sudo -E apt-add-repository -y "ppa:gns3/$REPOSITORY"
log "Installing the GNS3 server and its dependencies"
apt install -y gns3-server

View File

@ -401,16 +401,20 @@ async def test_uefi_boot_mode_option(vm, tmpdir, images_dir, fake_qemu_img_binar
vm._uefi = True
# create fake OVMF files
ovmf_code_path = os.path.join(images_dir, "OVMF_CODE.fd")
with open(ovmf_code_path, "w+") as f:
f.write('1')
ovmf_vars_path = os.path.join(images_dir, "OVMF_VARS.fd")
system_ovmf_firmware_path = "/usr/share/OVMF/OVMF_CODE_4M.fd"
if os.path.exists(system_ovmf_firmware_path):
ovmf_code_path = system_ovmf_firmware_path
else:
ovmf_code_path = os.path.join(images_dir, "OVMF_CODE_4M.fd")
with open(ovmf_code_path, "w+") as f:
f.write('1')
ovmf_vars_path = os.path.join(images_dir, "OVMF_VARS_4M.fd")
with open(ovmf_vars_path, "w+") as f:
f.write('1')
options = await vm._build_command()
assert ' '.join(["-drive", "if=pflash,format=raw,readonly,file={}".format(ovmf_code_path)]) in ' '.join(options)
assert ' '.join(["-drive", "if=pflash,format=raw,file={}".format(os.path.join(vm.working_dir, "OVMF_VARS.fd"))]) in ' '.join(options)
assert ' '.join(["-drive", "if=pflash,format=raw,file={}".format(os.path.join(vm.working_dir, "OVMF_VARS_4M.fd"))]) in ' '.join(options)
@pytest.mark.asyncio