diff --git a/CHANGELOG b/CHANGELOG
index 89562ef4..b3cbbd9c 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,5 +1,44 @@
# Change Log
+## 3.0.5 14/05/2025
+
+* Bundle web-ui v3.0.5
+* Sync appliances
+* Use Ubuntu 24.04 LTS in Dockerfile. Ref #2523
+* Fix tests and require Qemu version >= 2.4
+* Fix adding pci_bridges to qemu vms
+* Resolve deprecation warnings of regex library
+* Remove OVMF_CODE_4M.secboot.fd and associated code
+* Add edk2-stable202502 UEFI firmwares and try to first use firmwares from the ovmf package if installed. Fixes #2494
+* Prettify TPM error message
+* Fix bug in qemu_stdout message
+* Try to detect swtpm and AppArmor issue. Ref https://github.com/GNS3/gns3-gui/issues/3725
+* Fix Docker logs decoding. Ref #2522
+* Add delay after starting a Docker container and adding connections in uBridge. Ref #2522
+* Fix TypeError when reading Docker container logs. Ref #2522
+* Allow an image to be uploaded to the controller again even if it is already in the database
+* Fix controller not reporting an error if invalid options are passed to Qemu command line. Fixes #2517
+* Replace "Docker hub" by "Docker repository" because it is possible to use different repositories
+* Fix unable to add NAT cloud after configuring “allowed_interfaces” in configuration file. Fixes #2508
+* Fix interface information API endpoint for Cloud/NAT devices
+* Upgrade Jinja2 to v3.1.6. Fixes #2515
+* Fix packet capture for links connected to a cloud node. Fixes #2513
+* fix: check if remote-install.sh is being run as root
+
+## 2.2.54 21/04/2025
+
+* Bundle web-ui v2.2.54
+* Add new method to find the IP address of a VBox GNS3 VM + allow NAT Network
+* Add edk2-stable202502 UEFI firmwares and try to first use firmwares from the ovmf package if installed. Fixes #2494
+* Try to detect swtpm and AppArmor issue. Ref https://github.com/GNS3/gns3-gui/issues/3725
+* Fix Docker logs decoding. Ref #2522
+* Add delay after starting a Docker container and adding connections in uBridge. Ref #2522
+* Fix TypeError when reading Docker container logs. Ref #2522
+* Replace "Docker hub" by "Docker repository" because it is possible to use different repositories
+* Upgrade dependencies
+* Improvements for remote-install.sh
+
+
## 3.0.4 25/02/2025
* Require minimum 8 characters for passwords
diff --git a/Dockerfile b/Dockerfile
index 4ced87dc..49555754 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM ubuntu:focal
+FROM ubuntu:noble
WORKDIR /gns3server
@@ -34,4 +34,4 @@ COPY . /gns3server
RUN mkdir -p ~/.config/GNS3/3.0/
RUN cp scripts/gns3_server.conf ~/.config/GNS3/3.0/
-RUN python3 -m pip install .
+RUN python3 -m pip install --break-system-packages .
diff --git a/README.md b/README.md
index 99c168bd..0bfb33bd 100644
--- a/README.md
+++ b/README.md
@@ -99,6 +99,12 @@ Alternatively, you can run the GNS3 server in a container
bash scripts/docker_dev_server.sh
```
+#### use Docker Compose
+
+``` {.bash}
+docker compose up -d
+```
+
### Running tests
First, install the development dependencies:
diff --git a/compose.yaml b/compose.yaml
new file mode 100644
index 00000000..cb0343af
--- /dev/null
+++ b/compose.yaml
@@ -0,0 +1,7 @@
+services:
+ gen3-server:
+ build:
+ context: .
+ dockerfile: Dockerfile
+ ports:
+ - "8001:3080"
diff --git a/gns3server/api/routes/compute/cloud_nodes.py b/gns3server/api/routes/compute/cloud_nodes.py
index fd333ab3..c220ea1c 100644
--- a/gns3server/api/routes/compute/cloud_nodes.py
+++ b/gns3server/api/routes/compute/cloud_nodes.py
@@ -232,7 +232,7 @@ async def stop_cloud_capture(
await node.stop_capture(port_number)
-@router.get("/{node_id}/adapters/{adapter_number}/ports/{port_number}/pcap")
+@router.get("/{node_id}/adapters/{adapter_number}/ports/{port_number}/capture/stream")
async def stream_pcap_file(
*,
adapter_number: int = Path(..., ge=0, le=0),
diff --git a/gns3server/api/routes/controller/images.py b/gns3server/api/routes/controller/images.py
index c4a06a4f..736c44c3 100644
--- a/gns3server/api/routes/controller/images.py
+++ b/gns3server/api/routes/controller/images.py
@@ -148,11 +148,6 @@ async def upload_image(
if os.path.commonprefix([base_images_directory, full_path]) != base_images_directory:
raise ControllerForbiddenError(f"Cannot write image, '{image_path}' is forbidden")
- image = await images_repo.get_image(image_path)
- if image:
- log.warning(f"Image '{image_path}' already exists")
- return image
-
try:
allow_raw_image = Config.instance().settings.Server.allow_raw_images
image = await write_image(image_path, full_path, request.stream(), images_repo, allow_raw_image=allow_raw_image)
diff --git a/gns3server/api/routes/controller/links.py b/gns3server/api/routes/controller/links.py
index 7264a941..67041eee 100644
--- a/gns3server/api/routes/controller/links.py
+++ b/gns3server/api/routes/controller/links.py
@@ -24,7 +24,7 @@ import aiohttp
from fastapi import APIRouter, Depends, Request, status
from fastapi.responses import StreamingResponse
from fastapi.encoders import jsonable_encoder
-from typing import List
+from typing import List, Union
from uuid import UUID
from gns3server.controller import Controller
@@ -285,3 +285,54 @@ async def stream_pcap(request: Request, link: Link = Depends(dep_link)) -> Strea
raise ControllerError(f"Client error received when receiving pcap stream from compute: {e}")
return StreamingResponse(compute_pcap_stream(), media_type="application/vnd.tcpdump.pcap")
+
+
+@router.get(
+ "/{link_id}/iface",
+ response_model=Union[schemas.UDPPortInfo, schemas.EthernetPortInfo],
+ dependencies=[Depends(has_privilege("Link.Audit"))]
+)
+async def get_iface(link: Link = Depends(dep_link)) -> Union[schemas.UDPPortInfo, schemas.EthernetPortInfo]:
+ """
+ Return iface info for links to Cloud or NAT devices.
+
+ Required privilege: Link.Audit
+ """
+
+ ifaces_info = {}
+ for node_data in link._nodes:
+ node = node_data["node"]
+ if node.node_type not in ("cloud", "nat"):
+ continue
+
+ port_number = node_data["port_number"]
+ compute = node.compute
+ project_id = link.project.id
+ response = await compute.get(f"/projects/{project_id}/{node.node_type}/nodes/{node.id}")
+ if "ports_mapping" not in response.json:
+ continue
+ ports_mapping = response.json["ports_mapping"]
+
+ for port in ports_mapping:
+ port_num = port.get("port_number")
+
+ if port_num and int(port_num) == int(port_number):
+ port_type = port.get("type", "")
+ if "udp" in port_type.lower():
+ ifaces_info = {
+ "node_id": node.id,
+ "type": f"{port_type}",
+ "lport": port["lport"],
+ "rhost": port["rhost"],
+ "rport": port["rport"]
+ }
+ else:
+ ifaces_info = {
+ "node_id": node.id,
+ "type": f"{port_type}",
+ "interface": port["interface"],
+ }
+
+ if not ifaces_info:
+ raise ControllerError("Link not connected to Cloud/NAT")
+ return ifaces_info
diff --git a/gns3server/appliances/almalinux.gns3a b/gns3server/appliances/almalinux.gns3a
index 6e39d7d2..a8a2091b 100644
--- a/gns3server/appliances/almalinux.gns3a
+++ b/gns3server/appliances/almalinux.gns3a
@@ -25,6 +25,14 @@
"options": "-cpu host -nographic"
},
"images": [
+ {
+ "filename": "AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2",
+ "version": "9.4",
+ "md5sum": "7c5040c044a989c524d40824cebb4a4d",
+ "filesize": 591724544,
+ "download_url": "https://vault.almalinux.org/9.4/cloud/x86_64/images/",
+ "direct_download_url": "https://vault.almalinux.org/9.4/cloud/x86_64/images/AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2"
+ },
{
"filename": "AlmaLinux-9-GenericCloud-9.2-20230513.x86_64.qcow2",
"version": "9.2",
@@ -33,6 +41,14 @@
"download_url": "https://vault.almalinux.org/9.2/cloud/x86_64/images/",
"direct_download_url": "https://vault.almalinux.org/9.2/cloud/x86_64/images/AlmaLinux-9-GenericCloud-9.2-20230513.x86_64.qcow2"
},
+ {
+ "filename": "AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2",
+ "version": "8.9",
+ "md5sum": "1afc48c798960f0c6ebb65428c0ea973",
+ "filesize": 697434112,
+ "download_url": "https://vault.almalinux.org/8.9/cloud/x86_64/images/",
+ "direct_download_url": "https://vault.almalinux.org/8.9/cloud/x86_64/images/AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2"
+ },
{
"filename": "AlmaLinux-8-GenericCloud-8.8-20230524.x86_64.qcow2",
"version": "8.8",
@@ -59,6 +75,13 @@
}
],
"versions": [
+ {
+ "name": "9.4",
+ "images": {
+ "hda_disk_image": "AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2",
+ "cdrom_image": "almalinux-cloud-init-data.iso"
+ }
+ },
{
"name": "9.2",
"images": {
@@ -66,6 +89,13 @@
"cdrom_image": "almalinux-cloud-init-data.iso"
}
},
+ {
+ "name": "8.9",
+ "images": {
+ "hda_disk_image": "AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2",
+ "cdrom_image": "almalinux-cloud-init-data.iso"
+ }
+ },
{
"name": "8.8",
"images": {
diff --git a/gns3server/appliances/arista-veos.gns3a b/gns3server/appliances/arista-veos.gns3a
index 9a01ce25..83555608 100644
--- a/gns3server/appliances/arista-veos.gns3a
+++ b/gns3server/appliances/arista-veos.gns3a
@@ -29,23 +29,23 @@
},
"images": [
{
- "filename": "vEOS-lab-4.33.1F.qcow2",
- "version": "4.33.1F",
- "md5sum": "8f662409c0732ed9f682edce63601e8a",
- "filesize": 611909632,
+ "filename": "vEOS64-lab-4.33.2F.qcow2",
+ "version": "4.33.2F",
+ "md5sum": "fbe629a8342cd0b3b19566b9d7ef4f4f",
+ "filesize": 610992128,
"download_url": "https://www.arista.com/en/support/software-download"
},
{
- "filename": "vEOS-lab-4.32.3M.qcow2",
- "version": "4.32.3M",
- "md5sum": "46fc46f5ed1da8752eed8396f08862f8",
- "filesize": 605683712,
+ "filename": "vEOS64-lab-4.32.4.1M.qcow2",
+ "version": "4.32.4.1M",
+ "md5sum": "cd369b5ccfd87ccd83a34538681ba35f",
+ "filesize": 605159424,
"download_url": "https://www.arista.com/en/support/software-download"
},
{
- "filename": "vEOS-lab-4.31.6M.qcow2",
+ "filename": "vEOS64-lab-4.31.6M.qcow2",
"version": "4.31.6M",
- "md5sum": "7410110b77472f058322ec4681f8a356",
+ "md5sum": "02fbd929de9416e1096cd2454507d6ce",
"filesize": 590479360,
"download_url": "https://www.arista.com/en/support/software-download"
},
@@ -59,24 +59,24 @@
],
"versions": [
{
- "name": "4.33.1F",
+ "name": "4.33.2F",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
- "hdb_disk_image": "vEOS-lab-4.33.1F.qcow2"
+ "hdb_disk_image": "vEOS64-lab-4.33.2F.qcow2"
}
},
{
- "name": "4.32.3M",
+ "name": "4.32.4.1M",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
- "hdb_disk_image": "vEOS-lab-4.32.3M.qcow2"
+ "hdb_disk_image": "vEOS64-lab-4.32.4.1M.qcow2"
}
},
{
"name": "4.31.6M",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
- "hdb_disk_image": "vEOS-lab-4.31.6M.qcow2"
+ "hdb_disk_image": "vEOS64-lab-4.31.6M.qcow2"
}
}
]
diff --git a/gns3server/appliances/aruba-arubaoscx.gns3a b/gns3server/appliances/aruba-arubaoscx.gns3a
index 893f66ec..d9f8c841 100644
--- a/gns3server/appliances/aruba-arubaoscx.gns3a
+++ b/gns3server/appliances/aruba-arubaoscx.gns3a
@@ -32,6 +32,27 @@
"process_priority": "normal"
},
"images": [
+ {
+ "filename": "arubaoscx-disk-image-genericx86-p4-20241115202521.vmdk",
+ "version": "10.15.0005",
+ "md5sum": "69b3675391c04c1a6e0fd0bf8d8bc2d9",
+ "filesize": 420049408,
+ "download_url": "https://networkingsupport.hpe.com"
+ },
+ {
+ "filename": "arubaoscx-disk-image-genericx86-p4-20240731173624.vmdk",
+ "version": "10.14.1000",
+ "md5sum": "01d6932fbc9c46180a4f41aee9e98301",
+ "filesize": 412140544,
+ "download_url": "https://networkingsupport.hpe.com"
+ },
+ {
+ "filename": "arubaoscx-disk-image-genericx86-p4-20240528190307.vmdk",
+ "version": "10.14.0001",
+ "md5sum": "83902dbaa74c37cdda3f066f79431933",
+ "filesize": 401023488,
+ "download_url": "https://networkingsupport.hpe.com"
+ },
{
"filename": "arubaoscx-disk-image-genericx86-p4-20240129204649.vmdk",
"version": "10.13.1000",
@@ -139,6 +160,24 @@
}
],
"versions": [
+ {
+ "name": "10.15.0005",
+ "images": {
+ "hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20241115202521.vmdk"
+ }
+ },
+ {
+ "name": "10.14.1000",
+ "images": {
+ "hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20240731173624.vmdk"
+ }
+ },
+ {
+ "name": "10.14.0001",
+ "images": {
+ "hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20240528190307.vmdk"
+ }
+ },
{
"name": "10.13.1000",
"images": {
diff --git a/gns3server/appliances/asterfusion-vAsterNOS-campus.gns3a b/gns3server/appliances/asterfusion-vAsterNOS-campus.gns3a
new file mode 100644
index 00000000..c9bcc69a
--- /dev/null
+++ b/gns3server/appliances/asterfusion-vAsterNOS-campus.gns3a
@@ -0,0 +1,50 @@
+{
+ "appliance_id": "9e934470-d898-4289-a5ed-50af094e629e",
+ "name": "Asterfusion vAsterNOS campus",
+ "category": "multilayer_switch",
+ "description": "AsterNOS is the core technology of Asterfusion’s one-stop SONiC turnkey solution designed for cloud, enterprise, and AI-driven scenarios. AsterNOS v5.2 Campus is specifically designed for traditional campus networks, offering comprehensive L2/L3 capabilities suitable for various campus scenarios such as schools, office buildings, and hospitals. This version supports a fully cloud-integrated Layer 3 network architecture, providing rich routing and switching functionalities to ensure high-performance operation and ease of maintenance. It can also be deployed in the GNS3 simulation environment to experience a complete All-Layer 3 Cloud-Campus network. This version is ideal for enterprises and campus networks requiring high-performance multi-service transport, supporting cross-domain connectivity and providing nanosecond-level time synchronization. It is well-suited for applications with stringent time accuracy requirements, such as financial trading, industrial automation, and smart manufacturing. NOTICE: This appliance file is a virtualized version of AsterNOS and is intended to be used only to experience the basic functionality and industry standard CLI (Klish), not for official software testing. For more information about AsterNOS commercial version, please feel free to contact us via Email: bd@cloudswit.ch",
+ "vendor_name": "Asterfusion",
+ "vendor_url": "https://cloudswit.ch/",
+ "vendor_logo_url": "https://raw.githubusercontent.com/GNS3/gns3-registry/master/vendor-logos/asterfusion.png",
+ "documentation_url": "https://help.cloudswit.ch/portal/en/kb/articles/vasternos",
+ "product_name": "vAsterNOS",
+ "product_url": "https://cloudswit.ch/",
+ "registry_version": 4,
+ "status": "experimental",
+ "maintainer": "Asterfusion",
+ "maintainer_email": "bd@cloudswit.ch",
+ "usage": "The login is admin and the password is asteros",
+ "symbol": "asterfusion-vAsterNOS.svg",
+ "first_port_name": "eth0",
+ "port_name_format": "Ethernet{0}",
+ "qemu": {
+ "adapter_type": "e1000",
+ "adapters": 10,
+ "ram": 4096,
+ "cpus": 4,
+ "hda_disk_interface": "virtio",
+ "arch": "x86_64",
+ "console_type": "telnet",
+ "boot_priority": "d",
+ "kvm": "require"
+ },
+ "images": [
+ {
+ "filename": "vAsterNOS-V5.2R012P01.img",
+ "version": "5.2-12-1",
+ "md5sum": "d18c0cfd786607ccc6dc1069a8f40465",
+ "filesize": 2823290880,
+ "download_url": "https://drive.cloudswitch.io/external/d29f6d0a6c8322fea42b3c08e95113d026b8ec6aafbe29193c338333077f3da7"
+ }
+
+ ],
+ "versions": [
+ {
+ "name": "5.2-12-1",
+ "images": {
+ "hda_disk_image": "vAsterNOS-V5.2R012P01.img"
+ }
+ }
+ ]
+}
+
diff --git a/gns3server/appliances/asterfusion-vAsterNOS.gns3a b/gns3server/appliances/asterfusion-vAsterNOS.gns3a
index ee7e87c0..e6e392c3 100644
--- a/gns3server/appliances/asterfusion-vAsterNOS.gns3a
+++ b/gns3server/appliances/asterfusion-vAsterNOS.gns3a
@@ -13,7 +13,7 @@
"status": "experimental",
"maintainer": "Asterfusion",
"maintainer_email": "bd@cloudswit.ch",
- "usage": "The login is admin, passwd asteros",
+ "usage": "The login is admin and the password is asteros",
"symbol": "asterfusion-vAsterNOS.svg",
"first_port_name": "eth0",
"port_name_format": "Ethernet{0}",
diff --git a/gns3server/appliances/centos-cloud.gns3a b/gns3server/appliances/centos-cloud.gns3a
index 74c645ff..e8061267 100644
--- a/gns3server/appliances/centos-cloud.gns3a
+++ b/gns3server/appliances/centos-cloud.gns3a
@@ -27,44 +27,28 @@
},
"images": [
{
- "filename": "CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2",
- "version": "Stream-9 (20230704.1)",
- "md5sum": "e04511e019325a97837edd9eafe02b48",
- "filesize": 1087868416,
+ "filename": "CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2",
+ "version": "Stream-10 (20250331.0)",
+ "md5sum": "776033371ca346001dd6390f0cbaf0d0",
+ "filesize": 952041472,
+ "download_url": "https://cloud.centos.org/centos/10-stream/x86_64/images",
+ "direct_download_url": "https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2"
+ },
+ {
+ "filename": "CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2",
+ "version": "Stream-9 (20250331.0)",
+ "md5sum": "4aaeddc6ca497065522c75a7471f9bfd",
+ "filesize": 1250625536,
"download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images",
- "direct_download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2"
+ "direct_download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2"
},
{
- "filename": "CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2",
- "version": "Stream-8 (20230710.0)",
- "md5sum": "83e02ce98c29753c86fb7be7d802aa75",
- "filesize": 1676164096,
+ "filename": "CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2",
+ "version": "Stream-8 (20240603.0)",
+ "md5sum": "77f3c9650785b8e977209796e09ee33e",
+ "filesize": 2003698688,
"download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images",
- "direct_download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2"
- },
- {
- "filename": "CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2",
- "version": "8.4 (2105)",
- "md5sum": "032eed270415526546eac07628905a62",
- "filesize": 1309652992,
- "download_url": "https://cloud.centos.org/centos/8/x86_64/images",
- "direct_download_url": "https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2"
- },
- {
- "filename": "CentOS-7-x86_64-GenericCloud-2111.qcow2",
- "version": "7 (2111)",
- "md5sum": "730b8662695831670721c8245be61dac",
- "filesize": 897384448,
- "download_url": "https://cloud.centos.org/centos/7/images",
- "direct_download_url": "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2111.qcow2"
- },
- {
- "filename": "CentOS-7-x86_64-GenericCloud-1809.qcow2",
- "version": "7 (1809)",
- "md5sum": "da79108d1324b27bd1759362b82fbe40",
- "filesize": 914948096,
- "download_url": "https://cloud.centos.org/centos/7/images",
- "direct_download_url": "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2"
+ "direct_download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2"
},
{
"filename": "centos-cloud-init-data.iso",
@@ -77,37 +61,23 @@
],
"versions": [
{
- "name": "Stream-9 (20230704.1)",
+ "name": "Stream-10 (20250331.0)",
"images": {
- "hda_disk_image": "CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2",
+ "hda_disk_image": "CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
- "name": "Stream-8 (20230710.0)",
+ "name": "Stream-9 (20250331.0)",
"images": {
- "hda_disk_image": "CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2",
+ "hda_disk_image": "CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
- "name": "8.4 (2105)",
+ "name": "Stream-8 (20240603.0)",
"images": {
- "hda_disk_image": "CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2",
- "cdrom_image": "centos-cloud-init-data.iso"
- }
- },
- {
- "name": "7 (2111)",
- "images": {
- "hda_disk_image": "CentOS-7-x86_64-GenericCloud-2111.qcow2",
- "cdrom_image": "centos-cloud-init-data.iso"
- }
- },
- {
- "name": "7 (1809)",
- "images": {
- "hda_disk_image": "CentOS-7-x86_64-GenericCloud-1809.qcow2",
+ "hda_disk_image": "CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
}
diff --git a/gns3server/appliances/exos.gns3a b/gns3server/appliances/exos.gns3a
index 3d2f4366..2393b3e0 100644
--- a/gns3server/appliances/exos.gns3a
+++ b/gns3server/appliances/exos.gns3a
@@ -30,6 +30,13 @@
"images": [
{
+ "filename": "EXOS-VM_32.7.2.19.qcow2",
+ "version": "32.7.2.19",
+ "md5sum": "eba580a2e18d2a9cc972c9ece8917ea8",
+ "filesize": 236847104,
+ "direct_download_url": "https://akamai-ep.extremenetworks.com/Extreme_P/github-en/Virtual_EXOS/EXOS-VM_32.7.2.19.qcow2"
+ },
+ {
"filename": "EXOS-VM_v32.6.3.126.qcow2",
"version": "32.6.3.126",
"md5sum": "5856b6c427bd605fe1c7adb6ee6b2659",
@@ -41,6 +48,12 @@
"versions": [
+ {
+ "name": "32.7.2.19",
+ "images": {
+ "hda_disk_image": "EXOS-VM_32.7.2.19.qcow2"
+ }
+ },
{
"name": "32.6.3.126",
"images": {
diff --git a/gns3server/appliances/fedora-cloud.gns3a b/gns3server/appliances/fedora-cloud.gns3a
index 5024c31b..70c33400 100644
--- a/gns3server/appliances/fedora-cloud.gns3a
+++ b/gns3server/appliances/fedora-cloud.gns3a
@@ -26,6 +26,22 @@
"options": "-nographic"
},
"images": [
+ {
+ "filename": "Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2",
+ "version": "41-1.4",
+ "md5sum": "8efc9edc04f38775de72ce067166b2a1",
+ "filesize": 491716608,
+ "download_url": "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images",
+ "direct_download_url": "https://fedora.mirrorservice.org/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
+ },
+ {
+ "filename": "Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2",
+ "version": "40-1.14",
+ "md5sum": "3eed4b1a9de35208ed30d9bb72c1522d",
+ "filesize": 397475840,
+ "download_url": "https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images",
+ "direct_download_url": "https://fedora.mirrorservice.org/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
+ },
{
"filename": "Fedora-Cloud-Base-39-1.5.x86_64.qcow2",
"version": "39-1.5",
@@ -52,6 +68,20 @@
}
],
"versions": [
+ {
+ "name": "41-1.4",
+ "images": {
+ "hda_disk_image": "Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2",
+ "cdrom_image": "fedora-cloud-init-data.iso"
+ }
+ },
+ {
+ "name": "40-1.14",
+ "images": {
+ "hda_disk_image": "Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2",
+ "cdrom_image": "fedora-cloud-init-data.iso"
+ }
+ },
{
"name": "39-1.5",
"images": {
diff --git a/gns3server/appliances/infix.gns3a b/gns3server/appliances/infix.gns3a
index 1fb796d5..2f19f77d 100644
--- a/gns3server/appliances/infix.gns3a
+++ b/gns3server/appliances/infix.gns3a
@@ -37,26 +37,69 @@
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/OVMF-edk2-stable202305.fd.zip/download",
"compression": "zip"
- },
- {
+ },
+ {
"filename": "infix-x86_64-disk-24.11.1.img",
"filesize": 536870912,
"md5sum": "673a123fe122d1c2f5724baf9965a19d",
"version": "24.11.1",
"download_url": "https://github.com/kernelkit/infix/releases/download/v24.11.1/infix-x86_64-24.11.1.tar.gz",
"compression": "gzip"
- },
- {
+ },
+ {
"filename": "infix-x86_64-disk-25.01.0.img",
"filesize": 536870912,
"md5sum": "a814d93b385116b4a35712c445b5f830",
"version": "25.01.0",
"download_url": "https://github.com/kernelkit/infix/releases/download/v25.01.0/infix-x86_64-25.01.0.tar.gz",
"compression": "gzip"
+ },
+ {
+ "filename": "infix-x86_64-disk-25.02.0.img",
+ "filesize": 536870912,
+ "md5sum": "8e29474c97df3486eb063a8af5043f50",
+ "version": "25.02.0",
+ "download_url": "https://github.com/kernelkit/infix/releases/download/v25.02.0/infix-x86_64-25.02.0.tar.gz",
+ "compression": "gzip"
+ },
+ {
+ "filename": "infix-x86_64-disk-25.03.0.img",
+ "filesize": 536870912,
+ "md5sum": "5e1ed1081cd1673bfed4a9b5b1c58e08",
+ "version": "25.03.0",
+ "download_url": "https://github.com/kernelkit/infix/releases/download/v25.03.0/infix-x86_64-25.03.0.tar.gz",
+ "compression": "gzip"
+ },
+ {
+ "filename": "infix-x86_64-disk-25.04.0.qcow2",
+ "filesize": 259723776,
+ "md5sum": "84bd999513325d0007d0e6587abc6140",
+ "version": "25.04.0",
+ "direct_download_url": "https://github.com/kernelkit/infix/releases/download/v25.04.0/infix-x86_64-disk-25.04.0.qcow2"
}
-
],
"versions": [
+ {
+ "name": "25.04.0",
+ "images": {
+ "bios_image": "OVMF-edk2-stable202305.fd",
+ "hda_disk_image": "infix-x86_64-disk-25.04.0.qcow2"
+ }
+ },
+ {
+ "name": "25.03.0",
+ "images": {
+ "bios_image": "OVMF-edk2-stable202305.fd",
+ "hda_disk_image": "infix-x86_64-disk-25.03.0.img"
+ }
+ },
+ {
+ "name": "25.02.0",
+ "images": {
+ "bios_image": "OVMF-edk2-stable202305.fd",
+ "hda_disk_image": "infix-x86_64-disk-25.02.0.img"
+ }
+ },
{
"name": "25.01.0",
"images": {
diff --git a/gns3server/appliances/juniper-vJunos-router.gns3a b/gns3server/appliances/juniper-vJunos-router.gns3a
new file mode 100644
index 00000000..9669079a
--- /dev/null
+++ b/gns3server/appliances/juniper-vJunos-router.gns3a
@@ -0,0 +1,75 @@
+{
+ "appliance_id": "12394e0d-9ac5-4da5-8e91-94a462536b61",
+ "name": "vJunos-router",
+ "category": "router",
+ "description": "vJunos-router",
+ "vendor_name": "Juniper",
+ "vendor_url": "https://www.juniper.net",
+ "documentation_url": "https://www.juniper.net/documentation/product/us/en/vjunos-router/",
+ "product_name": "vJunos Router",
+ "registry_version": 6,
+ "status": "stable",
+ "availability": "free",
+ "maintainer": "AAm-kun",
+ "maintainer_email": "github@sugarpapa.mozmail.com",
+ "usage": "GNS3 SHOULD be a baremetal installation. Using the GNS3 VM MIGHT result in unwanted issues. Default user is root. No password is needed.",
+ "symbol": "juniper-vmx.svg",
+ "first_port_name": "ge-0/0/0",
+ "port_name_format": "ge-0/0/{port0}",
+ "qemu": {
+ "adapter_type": "virtio-net-pci",
+ "adapters": 17,
+ "ram": 5120,
+ "cpus": 4,
+ "hda_disk_interface": "virtio",
+ "arch": "x86_64",
+ "console_type": "telnet",
+ "kvm": "require",
+ "options": "-serial mon:stdio -nographic -smbios type=1,product=VM-VMX,family=lab -cpu host",
+ "on_close": "power_off"
+ },
+ "images": [
+ {
+ "filename": "vJunos-router-24.2R1-S2.qcow2",
+ "version": "24.2R1-S2",
+ "md5sum": "dd906b4d19463e22f3e1a297ff1a7464",
+ "filesize": 3675783168,
+ "download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
+ },
+ {
+ "filename": "vJunos-router-23.4R2-S2.1.qcow2",
+ "version": "23.4R2-S2.1",
+ "md5sum": "e25f5acdfc6c076d0023fd8289bcdd89",
+ "filesize": 3644063744,
+ "download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
+ },
+ {
+ "filename": "vJunos-router-23.2R1.15.qcow2",
+ "version": "23.2R1.15",
+ "md5sum": "18670fb67633822697fdd3cf982e7eb1",
+ "filesize": 3653566464,
+ "download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
+ }
+
+ ],
+ "versions": [
+ {
+ "images": {
+ "hda_disk_image": "vJunos-router-24.2R1-S2.qcow2"
+ },
+ "name": "24.2R1-S2"
+ },
+ {
+ "images": {
+ "hda_disk_image": "vJunos-router-23.4R2-S2.1.qcow2"
+ },
+ "name": "23.4R2-S2.1"
+ },
+ {
+ "images": {
+ "hda_disk_image": "vJunos-router-23.2R1.15.qcow2"
+ },
+ "name": "23.2R1.15"
+ }
+ ]
+}
\ No newline at end of file
diff --git a/gns3server/appliances/nethsecurity.gns3a b/gns3server/appliances/nethsecurity.gns3a
new file mode 100644
index 00000000..565e2392
--- /dev/null
+++ b/gns3server/appliances/nethsecurity.gns3a
@@ -0,0 +1,44 @@
+{
+ "appliance_id": "bb9ff73a-701e-40e8-b68a-6a6efeb04e99",
+ "name": "NethSecurity",
+ "category": "firewall",
+ "description": "NethSecurity is an Unified Threat Management (UTM) solution that provides a comprehensive suite of security features, including firewall, content filtering, deep packet inspection (DPI) using Netifyd, Dedalo hotspot, OpenVPN, and an optional remote controller. It is designed to be easy to install and configure, making it a good choice for both small and medium-sized businesses (SMBs) as well as enterprise organizations.",
+ "vendor_name": "Nethesis",
+ "vendor_url": "https://www.nethesis.it/",
+ "documentation_url": "https://docs.nethsecurity.org/en/latest/",
+ "product_name": "NethSecurity",
+ "product_url": "https://nethsecurity.org/",
+ "registry_version": 4,
+ "status": "stable",
+ "maintainer": "GNS3 Team",
+ "maintainer_email": "developers@gns3.net",
+ "usage": "Ethernet0 is the LAN link, Ethernet1 the WAN link. The default username is root and the password is Nethesis,1234",
+ "qemu": {
+ "adapter_type": "virtio-net-pci",
+ "adapters": 2,
+ "ram": 1024,
+ "hda_disk_interface": "scsi",
+ "arch": "x86_64",
+ "console_type": "telnet",
+ "kvm": "allow"
+ },
+ "images": [
+ {
+ "filename": "nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img",
+ "version": "8-24.10.0-ns.1.5.1",
+ "md5sum": "be670218effca1b86dac6b8d95012791",
+ "filesize": 331644416,
+ "download_url": "https://nethsecurity.org/download",
+ "direct_download_url": "https://updates.nethsecurity.nethserver.org/stable/8-24.10.0-ns.1.5.1/targets/x86/64/nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img.gz",
+ "compression": "gzip"
+ }
+ ],
+ "versions": [
+ {
+ "name": "8-24.10.0-ns.1.5.1",
+ "images": {
+ "hda_disk_image": "nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img"
+ }
+ }
+ ]
+}
diff --git a/gns3server/appliances/oracle-linux-cloud.gns3a b/gns3server/appliances/oracle-linux-cloud.gns3a
index f4517beb..1b53077f 100644
--- a/gns3server/appliances/oracle-linux-cloud.gns3a
+++ b/gns3server/appliances/oracle-linux-cloud.gns3a
@@ -26,6 +26,14 @@
"options": "-cpu host -nographic"
},
"images": [
+ {
+ "filename": "OL9U5_x86_64-kvm-b259.qcow2",
+ "version": "9.5",
+ "md5sum": "05e9b62c408ab49a02d6833fc683d1ad",
+ "filesize": 652935168,
+ "download_url": "https://yum.oracle.com/oracle-linux-templates.html",
+ "direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL9/u5/x86_64/OL9U5_x86_64-kvm-b259.qcow2"
+ },
{
"filename": "OL9U2_x86_64-kvm-b197.qcow",
"version": "9.2",
@@ -42,6 +50,14 @@
"download_url": "https://yum.oracle.com/oracle-linux-templates.html",
"direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL9/u1/x86_64/OL9U1_x86_64-kvm-b158.qcow"
},
+ {
+ "filename": "OL8U10_x86_64-kvm-b258.qcow2",
+ "version": "8.10",
+ "md5sum": "bb07581af5122515b6822595ded5deef",
+ "filesize": 1251672064,
+ "download_url": "https://yum.oracle.com/oracle-linux-templates.html",
+ "direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL8/u10/x86_64/OL8U10_x86_64-kvm-b258.qcow2"
+ },
{
"filename": "OL8U8_x86_64-kvm-b198.qcow",
"version": "8.8",
@@ -76,7 +92,14 @@
}
],
"versions": [
-{
+ {
+ "name": "9.5",
+ "images": {
+ "hda_disk_image": "OL9U5_x86_64-kvm-b259.qcow2",
+ "cdrom_image": "oracle-cloud-init-data.iso"
+ }
+ },
+ {
"name": "9.2",
"images": {
"hda_disk_image": "OL9U2_x86_64-kvm-b197.qcow",
@@ -90,6 +113,13 @@
"cdrom_image": "oracle-cloud-init-data.iso"
}
},
+ {
+ "name": "8.10",
+ "images": {
+ "hda_disk_image": "OL8U10_x86_64-kvm-b258.qcow2",
+ "cdrom_image": "oracle-cloud-init-data.iso"
+ }
+ },
{
"name": "8.8",
"images": {
diff --git a/gns3server/appliances/rhel.gns3a b/gns3server/appliances/rhel.gns3a
index 79dbb768..c383c4b4 100644
--- a/gns3server/appliances/rhel.gns3a
+++ b/gns3server/appliances/rhel.gns3a
@@ -13,7 +13,7 @@
"availability": "service-contract",
"maintainer": "Da-Geek",
"maintainer_email": "dageek@dageeks-geeks.gg",
- "usage": "You should download Red Hat Enterprise Linux KVM Guest Image from https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.3/x86_64/product-software attach/customize rhel-cloud-init.iso and start.\nusername: cloud-user\npassword: redhat",
+ "usage": "You should download Red Hat Enterprise Linux KVM Guest Image from https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.5/x86_64/product-software attach/customize rhel-cloud-init.iso and start.\nusername: cloud-user\npassword: redhat",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 1,
@@ -26,6 +26,20 @@
"options": "-cpu host -nographic"
},
"images": [
+ {
+ "filename": "rhel-9.5-x86_64-kvm.qcow2",
+ "version": "9.5",
+ "md5sum": "8174396d5cb47727c59dd04dd9a05418",
+ "filesize": 974389248,
+ "download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.5/x86_64/product-software"
+ },
+ {
+ "filename": "rhel-9.4-x86_64-kvm.qcow2",
+ "version": "9.4",
+ "md5sum": "77a2ca9a4cb0448260e04f0d2ebf9807",
+ "filesize": 957218816,
+ "download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.4/x86_64/product-software"
+ },
{
"filename": "rhel-9.3-x86_64-kvm.qcow2",
"version": "9.3",
@@ -54,6 +68,20 @@
"filesize": 696582144,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/9.0/x86_64/product-software"
},
+ {
+ "filename": "rhel-8.10-x86_64-kvm.qcow2",
+ "version": "8.10",
+ "md5sum": "5fda99fcab47e3b235c6ccdb6e80d362",
+ "filesize": 1065091072,
+ "download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.10/x86_64/product-software"
+ },
+ {
+ "filename": "rhel-8.9-x86_64-kvm.qcow2",
+ "version": "8.9",
+ "md5sum": "23295fe508678cbdebfbdbd41ef6e6e2",
+ "filesize": 971833344,
+ "download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.9/x86_64/product-software"
+ },
{
"filename": "rhel-8.8-x86_64-kvm.qcow2",
"version": "8.8",
@@ -119,6 +147,20 @@
}
],
"versions": [
+ {
+ "name": "9.5",
+ "images": {
+ "hda_disk_image": "rhel-9.5-x86_64-kvm.qcow2",
+ "cdrom_image": "rhel-cloud-init.iso"
+ }
+ },
+ {
+ "name": "9.4",
+ "images": {
+ "hda_disk_image": "rhel-9.4-x86_64-kvm.qcow2",
+ "cdrom_image": "rhel-cloud-init.iso"
+ }
+ },
{
"name": "9.3",
"images": {
@@ -147,6 +189,20 @@
"cdrom_image": "rhel-cloud-init.iso"
}
},
+ {
+ "name": "8.10",
+ "images": {
+ "hda_disk_image": "rhel-8.10-x86_64-kvm.qcow2",
+ "cdrom_image": "rhel-cloud-init.iso"
+ }
+ },
+ {
+ "name": "8.9",
+ "images": {
+ "hda_disk_image": "rhel-8.9-x86_64-kvm.qcow2",
+ "cdrom_image": "rhel-cloud-init.iso"
+ }
+ },
{
"name": "8.8",
"images": {
diff --git a/gns3server/appliances/rockylinux.gns3a b/gns3server/appliances/rockylinux.gns3a
index 65817a76..49f7a4bc 100644
--- a/gns3server/appliances/rockylinux.gns3a
+++ b/gns3server/appliances/rockylinux.gns3a
@@ -26,6 +26,14 @@
"options": "-nographic -cpu host"
},
"images": [
+ {
+ "filename": "Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2",
+ "version": "9.5",
+ "md5sum": "880eccf788301bb9f34669faebe09276",
+ "filesize": 609812480,
+ "download_url": "https://download.rockylinux.org/pub/rocky/9/images/x86_64/",
+ "direct_download_url": "https://download.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2"
+ },
{
"filename": "Rocky-9-GenericCloud-Base-9.3-20231113.0.x86_64.qcow2",
"version": "9.3",
@@ -68,6 +76,13 @@
}
],
"versions": [
+ {
+ "name": "9.5",
+ "images": {
+ "hda_disk_image": "Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2",
+ "cdrom_image": "rocky-cloud-init-data.iso"
+ }
+ },
{
"name": "9.3",
"images": {
diff --git a/gns3server/compute/builtin/nodes/nat.py b/gns3server/compute/builtin/nodes/nat.py
index 29d17959..f833f4e5 100644
--- a/gns3server/compute/builtin/nodes/nat.py
+++ b/gns3server/compute/builtin/nodes/nat.py
@@ -37,7 +37,7 @@ class Nat(Cloud):
def __init__(self, name, node_id, project, manager, ports=None):
allowed_interfaces = Config.instance().settings.Server.allowed_interfaces
- if allowed_interfaces:
+ if allowed_interfaces and isinstance(allowed_interfaces, str):
allowed_interfaces = allowed_interfaces.split(',')
if sys.platform.startswith("linux"):
nat_interface = Config.instance().settings.Server.default_nat_interface
diff --git a/gns3server/compute/docker/__init__.py b/gns3server/compute/docker/__init__.py
index b6c13448..1ab159de 100644
--- a/gns3server/compute/docker/__init__.py
+++ b/gns3server/compute/docker/__init__.py
@@ -175,11 +175,10 @@ class Docker(BaseManager):
response = await self.http_query(method, path, data=data, params=params)
body = await response.read()
response.close()
- if body and len(body):
- if response.headers.get('CONTENT-TYPE') == 'application/json':
- body = json.loads(body.decode("utf-8"))
- else:
- body = body.decode("utf-8")
+ if response.headers.get('CONTENT-TYPE') == 'application/json':
+ body = json.loads(body.decode("utf-8", errors="ignore"))
+ else:
+ body = body.decode("utf-8", errors="ignore")
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
return body
@@ -267,12 +266,12 @@ class Docker(BaseManager):
pass
if progress_callback:
- progress_callback(f"Pulling '{image}' from docker hub")
+ progress_callback(f"Pulling '{image}' from Docker repository")
try:
response = await self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
except DockerError as e:
raise DockerError(
- f"Could not pull the '{image}' image from Docker Hub, "
+ f"Could not pull the '{image}' image from Docker repository, "
f"please check your Internet connection (original error: {e})"
)
# The pull api will stream status via an HTTP JSON stream
@@ -281,10 +280,10 @@ class Docker(BaseManager):
try:
chunk = await response.content.read(CHUNK_SIZE)
except aiohttp.ServerDisconnectedError:
- log.error(f"Disconnected from server while pulling Docker image '{image}' from docker hub")
+ log.error(f"Disconnected from server while pulling Docker image '{image}' from Docker repository")
break
except asyncio.TimeoutError:
- log.error(f"Timeout while pulling Docker image '{image}' from docker hub")
+ log.error("Timeout while pulling Docker image '{}' from Docker repository".format(image))
break
if not chunk:
break
diff --git a/gns3server/compute/docker/docker_vm.py b/gns3server/compute/docker/docker_vm.py
index 4dbced0e..2360bc8f 100644
--- a/gns3server/compute/docker/docker_vm.py
+++ b/gns3server/compute/docker/docker_vm.py
@@ -437,7 +437,7 @@ class DockerVM(BaseNode):
try:
image_infos = await self._get_image_information()
except DockerHttp404Error:
- log.info(f"Image '{self._image}' is missing, pulling it from Docker hub...")
+ log.info("Image '{}' is missing, pulling it from Docker repository...".format(self._image))
await self.pull_image(self._image)
image_infos = await self._get_image_information()
@@ -617,6 +617,7 @@ class DockerVM(BaseNode):
await self._clean_servers()
await self.manager.query("POST", f"containers/{self._cid}/start")
+ await asyncio.sleep(0.5) # give the Docker container some time to start
self._namespace = await self._get_namespace()
await self._start_ubridge(require_privileged_access=True)
diff --git a/gns3server/compute/qemu/qemu_vm.py b/gns3server/compute/qemu/qemu_vm.py
index 10023233..cc666f03 100644
--- a/gns3server/compute/qemu/qemu_vm.py
+++ b/gns3server/compute/qemu/qemu_vm.py
@@ -32,6 +32,7 @@ import subprocess
import time
import json
import shlex
+import psutil
from gns3server.utils import parse_version
from gns3server.utils.asyncio import subprocess_check_output, cancellable_wait_run_in_executor
@@ -265,17 +266,10 @@ class QemuVM(BaseNode):
if qemu_bin == "qemu":
self._platform = "i386"
else:
- self._platform = re.sub(r'^qemu-system-(\w+).*$', r'\1', qemu_bin, re.IGNORECASE)
-
- try:
- QemuPlatform(self._platform.split(".")[0])
- except ValueError:
+ self._platform = re.sub(r'^qemu-system-(\w+).*$', r'\1', qemu_bin, flags=re.IGNORECASE)
+ if self._platform.split(".")[0] not in list(QemuPlatform):
raise QemuError(f"Platform {self._platform} is unknown")
- log.info(
- 'QEMU VM "{name}" [{id}] has set the QEMU path to {qemu_path}'.format(
- name=self._name, id=self._id, qemu_path=qemu_path
- )
- )
+ log.info(f'QEMU VM "{self._name}" [{self._name}] has set the QEMU path to {qemu_path}')
def _check_qemu_path(self, qemu_path):
@@ -1225,6 +1219,21 @@ class QemuVM(BaseNode):
except OSError as e:
raise QemuError(f"Could not start Telnet QEMU console {e}\n")
+ def _find_partition_for_path(self, path):
+ """
+ Finds the disk partition for a given path.
+ """
+
+ path = os.path.abspath(path)
+ partitions = psutil.disk_partitions()
+ # find the partition with the longest matching mount point
+ matching_partition = None
+ for partition in partitions:
+ if path.startswith(partition.mountpoint):
+ if matching_partition is None or len(partition.mountpoint) > len(matching_partition.mountpoint):
+ matching_partition = partition
+ return matching_partition
+
async def _termination_callback(self, returncode):
"""
Called when the process has stopped.
@@ -1236,9 +1245,19 @@ class QemuVM(BaseNode):
log.info("QEMU process has stopped, return code: %d", returncode)
await self.stop()
if returncode != 0:
+ qemu_stdout = self.read_stdout()
+ # additional permissions need to be configured for swtpm in AppArmor if the working dir
+ # is located on a different partition than the partition for the root directory
+ if "TPM result for CMD_INIT" in qemu_stdout:
+ partition = self._find_partition_for_path(self.project.path)
+ if partition and partition.mountpoint != "/":
+ qemu_stdout += "\nTPM error: the project directory is not on the same partition as the root directory which can be a problem when using AppArmor.\n" \
+ "Please try to execute the following commands on the server:\n\n" \
+ "echo 'owner {}/** rwk,' | sudo tee /etc/apparmor.d/local/usr.bin.swtpm > /dev/null\n" \
+ "sudo service apparmor restart".format(os.path.dirname(self.project.path))
self.project.emit(
"log.error",
- {"message": f"QEMU process has stopped, return code: {returncode}\n{self.read_stdout()}"},
+ {"message": f"QEMU process has stopped, return code: {returncode}\n{qemu_stdout}"},
)
async def stop(self):
@@ -2287,19 +2306,42 @@ class QemuVM(BaseNode):
else:
raise QemuError(f"bios image '{self._bios_image}' is not accessible")
options.extend(["-bios", self._bios_image.replace(",", ",,")])
+
elif self._uefi:
- # get the OVMF firmware from the images directory
- ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE.fd")
+
+ old_ovmf_vars_path = os.path.join(self.working_dir, "OVMF_VARS.fd")
+ if os.path.exists(old_ovmf_vars_path):
+ # the node has its own UEFI variables store already, we must also use the old UEFI firmware
+ ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE.fd")
+ else:
+ system_ovmf_firmware_path = "/usr/share/OVMF/OVMF_CODE_4M.fd"
+ if os.path.exists(system_ovmf_firmware_path):
+ ovmf_firmware_path = system_ovmf_firmware_path
+ else:
+ # otherwise, get the UEFI firmware from the images directory
+ ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE_4M.fd")
+
log.info("Configuring UEFI boot mode using OVMF file: '{}'".format(ovmf_firmware_path))
options.extend(["-drive", "if=pflash,format=raw,readonly,file={}".format(ovmf_firmware_path)])
+ # try to use the UEFI variables store from the system first
+ system_ovmf_vars_path = "/usr/share/OVMF/OVMF_VARS_4M.fd"
+ if os.path.exists(system_ovmf_vars_path):
+ ovmf_vars_path = system_ovmf_vars_path
+ else:
+ # otherwise, get the UEFI variables store from the images directory
+ ovmf_vars_path = self.manager.get_abs_image_path("OVMF_VARS_4M.fd")
+
# the node should have its own copy of OVMF_VARS.fd (the UEFI variables store)
- ovmf_vars_node_path = os.path.join(self.working_dir, "OVMF_VARS.fd")
- if not os.path.exists(ovmf_vars_node_path):
- try:
- shutil.copyfile(self.manager.get_abs_image_path("OVMF_VARS.fd"), ovmf_vars_node_path)
- except OSError as e:
- raise QemuError("Cannot copy OVMF_VARS.fd file to the node working directory: {}".format(e))
+ if os.path.exists(old_ovmf_vars_path):
+ ovmf_vars_node_path = old_ovmf_vars_path
+ else:
+ ovmf_vars_node_path = os.path.join(self.working_dir, "OVMF_VARS_4M.fd")
+ if not os.path.exists(ovmf_vars_node_path):
+ try:
+ shutil.copyfile(ovmf_vars_path, ovmf_vars_node_path)
+ except OSError as e:
+ raise QemuError("Cannot copy OVMF_VARS_4M.fd file to the node working directory: {}".format(e))
options.extend(["-drive", "if=pflash,format=raw,file={}".format(ovmf_vars_node_path)])
return options
@@ -2396,16 +2438,13 @@ class QemuVM(BaseNode):
) # we do not want any user networking back-end if no adapter is connected.
# Each 32 PCI device we need to add a PCI bridge with max 9 bridges
- pci_devices = 4 + len(self._ethernet_adapters) # 4 PCI devices are use by default by qemu
- pci_bridges = math.floor(pci_devices / 32)
+ # Reserve 32 devices on root pci_bridge,
+ # since the number of devices used by templates may differ significantly
+ # and pci_bridges also consume IDs.
+ # Move network devices to their own bridge
+ pci_devices_reserved = 32
pci_bridges_created = 0
- if pci_bridges >= 1:
- if self._qemu_version and parse_version(self._qemu_version) < parse_version("2.4.0"):
- raise QemuError(
- "Qemu version 2.4 or later is required to run this VM with a large number of network adapters"
- )
-
- pci_device_id = 4 + pci_bridges # Bridge consume PCI ports
+ pci_device_id = pci_devices_reserved
for adapter_number, adapter in enumerate(self._ethernet_adapters):
mac = int_to_macaddress(macaddress_to_int(self._mac_address) + adapter_number)
@@ -2596,6 +2635,8 @@ class QemuVM(BaseNode):
"""
self._qemu_version = await self.manager.get_qemu_version(self.qemu_path)
+ if self._qemu_version and parse_version(self._qemu_version) < parse_version("2.4.0"):
+ raise QemuError("Qemu version 2.4 or later is required to run Qemu VMs")
vm_name = self._name.replace(",", ",,")
project_path = self.project.path.replace(",", ",,")
additional_options = self._options.strip()
diff --git a/gns3server/controller/compute.py b/gns3server/controller/compute.py
index aabdd5af..c01fddae 100644
--- a/gns3server/controller/compute.py
+++ b/gns3server/controller/compute.py
@@ -458,10 +458,11 @@ class Compute:
# FIXME: slow down number of compute events
self._controller.notification.controller_emit("compute.updated", self.asdict())
else:
- if action == "log.error":
- log.error(event.pop("message"))
await self._controller.notification.dispatch(
- action, event, project_id=project_id, compute_id=self.id
+ action,
+ event,
+ project_id=project_id,
+ compute_id=self.id
)
else:
if response.type == aiohttp.WSMsgType.CLOSE:
diff --git a/gns3server/controller/gns3vm/virtualbox_gns3_vm.py b/gns3server/controller/gns3vm/virtualbox_gns3_vm.py
index 79d97d94..97f309a1 100644
--- a/gns3server/controller/gns3vm/virtualbox_gns3_vm.py
+++ b/gns3server/controller/gns3vm/virtualbox_gns3_vm.py
@@ -249,6 +249,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
return True
return False
+
async def list(self):
"""
List all VirtualBox VMs
@@ -269,8 +270,8 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
# get a NAT interface number
nat_interface_number = await self._look_for_interface("nat")
- if nat_interface_number < 0:
- raise GNS3VMError(f'VM "{self.vmname}" must have a NAT interface configured in order to start')
+ if nat_interface_number < 0 and await self._look_for_interface("natnetwork") < 0:
+ raise GNS3VMError(f'VM "{self.vmname}" must have a NAT interface or NAT Network configured in order to start')
if sys.platform.startswith("darwin") and parse_version(self._system_properties["API version"]) >= parse_version("7_0"):
# VirtualBox 7.0+ on macOS requires a host-only network interface
@@ -339,42 +340,68 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
elif vm_state == "paused":
args = [self._vmname, "resume"]
await self._execute("controlvm", args)
- ip_address = "127.0.0.1"
- try:
- # get a random port on localhost
- with socket.socket() as s:
- s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
- s.bind((ip_address, 0))
- api_port = s.getsockname()[1]
- except OSError as e:
- raise GNS3VMError(f"Error while getting random port: {e}")
- if await self._check_vbox_port_forwarding():
- # delete the GNS3VM NAT port forwarding rule if it exists
- log.info(f"Removing GNS3VM NAT port forwarding rule from interface {nat_interface_number}")
- await self._execute("controlvm", [self._vmname, f"natpf{nat_interface_number}", "delete", "GNS3VM"])
+ log.info("Retrieving IP address from GNS3 VM...")
+ ip = await self._get_ip_from_guest_property()
+ if ip:
+ self.ip_address = ip
+ else:
+ # if we can't get the IP address from the guest property, we try to get it from the GNS3 server (a NAT interface is required)
+ if nat_interface_number < 0:
+ raise GNS3VMError("Could not find guest IP address for {}".format(self.vmname))
+ log.warning("Could not find IP address from guest property, trying to get it from GNS3 server")
+ ip_address = "127.0.0.1"
+ try:
+ # get a random port on localhost
+ with socket.socket() as s:
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.bind((ip_address, 0))
+ api_port = s.getsockname()[1]
+ except OSError as e:
+ raise GNS3VMError("Error while getting random port: {}".format(e))
- # add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to the port in the VM
- log.info(f"Adding GNS3VM NAT port forwarding rule with port {api_port} to interface {nat_interface_number}")
- await self._execute(
- "controlvm",
- [self._vmname, f"natpf{nat_interface_number}", f"GNS3VM,tcp,{ip_address},{api_port},,{self.port}"],
- )
+ if await self._check_vbox_port_forwarding():
+ # delete the GNS3VM NAT port forwarding rule if it exists
+ log.info("Removing GNS3VM NAT port forwarding rule from interface {}".format(nat_interface_number))
+ await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"])
- self.ip_address = await self._get_ip(interface_number, api_port)
- log.info("GNS3 VM has been started with IP {}".format(self.ip_address))
+ # add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to the port in the VM
+ log.info("Adding GNS3VM NAT port forwarding rule with port {} to interface {}".format(api_port, nat_interface_number))
+ await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number),
+ "GNS3VM,tcp,{},{},,{}".format(ip_address, api_port, self.port)])
+
+ self.ip_address = await self._get_ip_from_server(interface_number, api_port)
+
+ log.info("GNS3 VM has been started with IP '{}'".format(self.ip_address))
self.running = True
- async def _get_ip(self, hostonly_interface_number, api_port):
+ async def _get_ip_from_guest_property(self):
"""
- Get the IP from VirtualBox.
+ Get the IP from VirtualBox by retrieving the guest property (Guest Additions must be installed).
+ """
+
+ remaining_try = 180 # try for 3 minutes
+ while remaining_try > 0:
+ result = await self._execute("guestproperty", ["get", self._vmname, "/VirtualBox/GuestInfo/Net/0/V4/IP"])
+ for info in result.splitlines():
+ if ':' in info:
+ name, value = info.split(':', 1)
+ if name == "Value":
+ return value.strip()
+ remaining_try -= 1
+ await asyncio.sleep(1)
+ return None
+
+ async def _get_ip_from_server(self, hostonly_interface_number, api_port):
+ """
+ Get the IP from VirtualBox by sending a request to the GNS3 server.
Due to VirtualBox limitation the only way is to send request each
second to a GNS3 endpoint in order to get the list of the interfaces and
their IP and after that match it with VirtualBox host only.
"""
- remaining_try = 300
+ remaining_try = 180 # try for 3 minutes
while remaining_try > 0:
try:
async with HTTPClient.get(f"http://127.0.0.1:{api_port}/v3/compute/network/interfaces") as resp:
diff --git a/gns3server/crash_report.py b/gns3server/crash_report.py
index cdb02766..17a207fd 100644
--- a/gns3server/crash_report.py
+++ b/gns3server/crash_report.py
@@ -58,7 +58,7 @@ class CrashReport:
Report crash to a third party service
"""
- DSN = "https://0d64280ffb5ae409d448f255b9956a88@o19455.ingest.us.sentry.io/38482"
+ DSN = "https://61bb46252cabeebd49ee1e09fb8ba72e@o19455.ingest.us.sentry.io/38482"
_instance = None
def __init__(self):
diff --git a/gns3server/disks/OVMF_CODE_4M.fd b/gns3server/disks/OVMF_CODE_4M.fd
new file mode 100644
index 00000000..9bbbdd99
Binary files /dev/null and b/gns3server/disks/OVMF_CODE_4M.fd differ
diff --git a/gns3server/disks/OVMF_VARS_4M.fd b/gns3server/disks/OVMF_VARS_4M.fd
new file mode 100644
index 00000000..efb4f46c
Binary files /dev/null and b/gns3server/disks/OVMF_VARS_4M.fd differ
diff --git a/gns3server/schemas/__init__.py b/gns3server/schemas/__init__.py
index d331de54..a23d61d9 100644
--- a/gns3server/schemas/__init__.py
+++ b/gns3server/schemas/__init__.py
@@ -20,7 +20,7 @@ from .common import ErrorMessage
from .version import Version
# Controller schemas
-from .controller.links import LinkCreate, LinkUpdate, Link
+from .controller.links import LinkCreate, LinkUpdate, Link, UDPPortInfo, EthernetPortInfo
from .controller.computes import ComputeCreate, ComputeUpdate, ComputeVirtualBoxVM, ComputeVMwareVM, ComputeDockerImage, AutoIdlePC, Compute
from .controller.templates import TemplateCreate, TemplateUpdate, TemplateUsage, Template
from .controller.images import Image, ImageType
diff --git a/gns3server/schemas/controller/links.py b/gns3server/schemas/controller/links.py
index fd853e2f..cdcf6047 100644
--- a/gns3server/schemas/controller/links.py
+++ b/gns3server/schemas/controller/links.py
@@ -92,3 +92,24 @@ class Link(LinkBase):
None,
description="Read only property. The compute identifier where a capture is running"
)
+
+
+class UDPPortInfo(BaseModel):
+ """
+ UDP port information.
+ """
+
+ node_id: UUID
+ lport: int
+ rhost: str
+ rport: int
+ type: str
+
+class EthernetPortInfo(BaseModel):
+ """
+ Ethernet port information.
+ """
+
+ node_id: UUID
+ interface: str
+ type: str
diff --git a/gns3server/static/web-ui/index.html b/gns3server/static/web-ui/index.html
index 1aecd766..6a270668 100644
--- a/gns3server/static/web-ui/index.html
+++ b/gns3server/static/web-ui/index.html
@@ -46,6 +46,6 @@
gtag('config', 'G-0BT7QQV1W1');
-
+