1
0
mirror of https://github.com/GNS3/gns3-server synced 2025-05-21 08:18:50 +00:00

Merge remote-tracking branch 'origin/3.0' into gh-pages

This commit is contained in:
github-actions 2025-05-14 17:12:45 +00:00
commit 50dd9d0b0d
42 changed files with 927 additions and 290 deletions

View File

@ -1,5 +1,44 @@
# Change Log
## 3.0.5 14/05/2025
* Bundle web-ui v3.0.5
* Sync appliances
* Use Ubuntu 24.04 LTS in Dockerfile. Ref #2523
* Fix tests and require Qemu version >= 2.4
* Fix adding pci_bridges to qemu vms
* Resolve deprecation warnings of regex library
* Remove OVMF_CODE_4M.secboot.fd and associated code
* Add edk2-stable202502 UEFI firmwares and try to first use firmwares from the ovmf package if installed. Fixes #2494
* Prettify TPM error message
* Fix bug in qemu_stdout message
* Try to detect swtpm and AppArmor issue. Ref https://github.com/GNS3/gns3-gui/issues/3725
* Fix Docker logs decoding. Ref #2522
* Add delay after starting a Docker container and adding connections in uBridge. Ref #2522
* Fix TypeError when reading Docker container logs. Ref #2522
* Allow an image to be uploaded to the controller again even if it is already in the database
* Fix controller not reporting an error if invalid options are passed to Qemu command line. Fixes #2517
* Replace "Docker hub" by "Docker repository" because it is possible to use different repositories
* Fix unable to add NAT cloud after configuring “allowed_interfaces” in configuration file. Fixes #2508
* Fix interface information API endpoint for Cloud/NAT devices
* Upgrade Jinja2 to v3.1.6. Fixes #2515
* Fix packet capture for links connected to a cloud node. Fixes #2513
* fix: check if remote-install.sh is being run as root
## 2.2.54 21/04/2025
* Bundle web-ui v2.2.54
* Add new method to find the IP address of a VBox GNS3 VM + allow NAT Network
* Add edk2-stable202502 UEFI firmwares and try to first use firmwares from the ovmf package if installed. Fixes #2494
* Try to detect swtpm and AppArmor issue. Ref https://github.com/GNS3/gns3-gui/issues/3725
* Fix Docker logs decoding. Ref #2522
* Add delay after starting a Docker container and adding connections in uBridge. Ref #2522
* Fix TypeError when reading Docker container logs. Ref #2522
* Replace "Docker hub" by "Docker repository" because it is possible to use different repositories
* Upgrade dependencies
* Improvements for remote-install.sh
## 3.0.4 25/02/2025
* Require minimum 8 characters for passwords

View File

@ -1,4 +1,4 @@
FROM ubuntu:focal
FROM ubuntu:noble
WORKDIR /gns3server
@ -34,4 +34,4 @@ COPY . /gns3server
RUN mkdir -p ~/.config/GNS3/3.0/
RUN cp scripts/gns3_server.conf ~/.config/GNS3/3.0/
RUN python3 -m pip install .
RUN python3 -m pip install --break-system-packages .

View File

@ -99,6 +99,12 @@ Alternatively, you can run the GNS3 server in a container
bash scripts/docker_dev_server.sh
```
#### use Docker Compose
``` {.bash}
docker compose up -d
```
### Running tests
First, install the development dependencies:

7
compose.yaml Normal file
View File

@ -0,0 +1,7 @@
services:
gen3-server:
build:
context: .
dockerfile: Dockerfile
ports:
- "8001:3080"

View File

@ -232,7 +232,7 @@ async def stop_cloud_capture(
await node.stop_capture(port_number)
@router.get("/{node_id}/adapters/{adapter_number}/ports/{port_number}/pcap")
@router.get("/{node_id}/adapters/{adapter_number}/ports/{port_number}/capture/stream")
async def stream_pcap_file(
*,
adapter_number: int = Path(..., ge=0, le=0),

View File

@ -148,11 +148,6 @@ async def upload_image(
if os.path.commonprefix([base_images_directory, full_path]) != base_images_directory:
raise ControllerForbiddenError(f"Cannot write image, '{image_path}' is forbidden")
image = await images_repo.get_image(image_path)
if image:
log.warning(f"Image '{image_path}' already exists")
return image
try:
allow_raw_image = Config.instance().settings.Server.allow_raw_images
image = await write_image(image_path, full_path, request.stream(), images_repo, allow_raw_image=allow_raw_image)

View File

@ -24,7 +24,7 @@ import aiohttp
from fastapi import APIRouter, Depends, Request, status
from fastapi.responses import StreamingResponse
from fastapi.encoders import jsonable_encoder
from typing import List
from typing import List, Union
from uuid import UUID
from gns3server.controller import Controller
@ -285,3 +285,54 @@ async def stream_pcap(request: Request, link: Link = Depends(dep_link)) -> Strea
raise ControllerError(f"Client error received when receiving pcap stream from compute: {e}")
return StreamingResponse(compute_pcap_stream(), media_type="application/vnd.tcpdump.pcap")
@router.get(
"/{link_id}/iface",
response_model=Union[schemas.UDPPortInfo, schemas.EthernetPortInfo],
dependencies=[Depends(has_privilege("Link.Audit"))]
)
async def get_iface(link: Link = Depends(dep_link)) -> Union[schemas.UDPPortInfo, schemas.EthernetPortInfo]:
"""
Return iface info for links to Cloud or NAT devices.
Required privilege: Link.Audit
"""
ifaces_info = {}
for node_data in link._nodes:
node = node_data["node"]
if node.node_type not in ("cloud", "nat"):
continue
port_number = node_data["port_number"]
compute = node.compute
project_id = link.project.id
response = await compute.get(f"/projects/{project_id}/{node.node_type}/nodes/{node.id}")
if "ports_mapping" not in response.json:
continue
ports_mapping = response.json["ports_mapping"]
for port in ports_mapping:
port_num = port.get("port_number")
if port_num and int(port_num) == int(port_number):
port_type = port.get("type", "")
if "udp" in port_type.lower():
ifaces_info = {
"node_id": node.id,
"type": f"{port_type}",
"lport": port["lport"],
"rhost": port["rhost"],
"rport": port["rport"]
}
else:
ifaces_info = {
"node_id": node.id,
"type": f"{port_type}",
"interface": port["interface"],
}
if not ifaces_info:
raise ControllerError("Link not connected to Cloud/NAT")
return ifaces_info

View File

@ -25,6 +25,14 @@
"options": "-cpu host -nographic"
},
"images": [
{
"filename": "AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2",
"version": "9.4",
"md5sum": "7c5040c044a989c524d40824cebb4a4d",
"filesize": 591724544,
"download_url": "https://vault.almalinux.org/9.4/cloud/x86_64/images/",
"direct_download_url": "https://vault.almalinux.org/9.4/cloud/x86_64/images/AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2"
},
{
"filename": "AlmaLinux-9-GenericCloud-9.2-20230513.x86_64.qcow2",
"version": "9.2",
@ -33,6 +41,14 @@
"download_url": "https://vault.almalinux.org/9.2/cloud/x86_64/images/",
"direct_download_url": "https://vault.almalinux.org/9.2/cloud/x86_64/images/AlmaLinux-9-GenericCloud-9.2-20230513.x86_64.qcow2"
},
{
"filename": "AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2",
"version": "8.9",
"md5sum": "1afc48c798960f0c6ebb65428c0ea973",
"filesize": 697434112,
"download_url": "https://vault.almalinux.org/8.9/cloud/x86_64/images/",
"direct_download_url": "https://vault.almalinux.org/8.9/cloud/x86_64/images/AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2"
},
{
"filename": "AlmaLinux-8-GenericCloud-8.8-20230524.x86_64.qcow2",
"version": "8.8",
@ -59,6 +75,13 @@
}
],
"versions": [
{
"name": "9.4",
"images": {
"hda_disk_image": "AlmaLinux-9-GenericCloud-9.4-20240805.x86_64.qcow2",
"cdrom_image": "almalinux-cloud-init-data.iso"
}
},
{
"name": "9.2",
"images": {
@ -66,6 +89,13 @@
"cdrom_image": "almalinux-cloud-init-data.iso"
}
},
{
"name": "8.9",
"images": {
"hda_disk_image": "AlmaLinux-8-GenericCloud-8.9-20231128.x86_64.qcow2",
"cdrom_image": "almalinux-cloud-init-data.iso"
}
},
{
"name": "8.8",
"images": {

View File

@ -29,23 +29,23 @@
},
"images": [
{
"filename": "vEOS-lab-4.33.1F.qcow2",
"version": "4.33.1F",
"md5sum": "8f662409c0732ed9f682edce63601e8a",
"filesize": 611909632,
"filename": "vEOS64-lab-4.33.2F.qcow2",
"version": "4.33.2F",
"md5sum": "fbe629a8342cd0b3b19566b9d7ef4f4f",
"filesize": 610992128,
"download_url": "https://www.arista.com/en/support/software-download"
},
{
"filename": "vEOS-lab-4.32.3M.qcow2",
"version": "4.32.3M",
"md5sum": "46fc46f5ed1da8752eed8396f08862f8",
"filesize": 605683712,
"filename": "vEOS64-lab-4.32.4.1M.qcow2",
"version": "4.32.4.1M",
"md5sum": "cd369b5ccfd87ccd83a34538681ba35f",
"filesize": 605159424,
"download_url": "https://www.arista.com/en/support/software-download"
},
{
"filename": "vEOS-lab-4.31.6M.qcow2",
"filename": "vEOS64-lab-4.31.6M.qcow2",
"version": "4.31.6M",
"md5sum": "7410110b77472f058322ec4681f8a356",
"md5sum": "02fbd929de9416e1096cd2454507d6ce",
"filesize": 590479360,
"download_url": "https://www.arista.com/en/support/software-download"
},
@ -59,24 +59,24 @@
],
"versions": [
{
"name": "4.33.1F",
"name": "4.33.2F",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
"hdb_disk_image": "vEOS-lab-4.33.1F.qcow2"
"hdb_disk_image": "vEOS64-lab-4.33.2F.qcow2"
}
},
{
"name": "4.32.3M",
"name": "4.32.4.1M",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
"hdb_disk_image": "vEOS-lab-4.32.3M.qcow2"
"hdb_disk_image": "vEOS64-lab-4.32.4.1M.qcow2"
}
},
{
"name": "4.31.6M",
"images": {
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
"hdb_disk_image": "vEOS-lab-4.31.6M.qcow2"
"hdb_disk_image": "vEOS64-lab-4.31.6M.qcow2"
}
}
]

View File

@ -32,6 +32,27 @@
"process_priority": "normal"
},
"images": [
{
"filename": "arubaoscx-disk-image-genericx86-p4-20241115202521.vmdk",
"version": "10.15.0005",
"md5sum": "69b3675391c04c1a6e0fd0bf8d8bc2d9",
"filesize": 420049408,
"download_url": "https://networkingsupport.hpe.com"
},
{
"filename": "arubaoscx-disk-image-genericx86-p4-20240731173624.vmdk",
"version": "10.14.1000",
"md5sum": "01d6932fbc9c46180a4f41aee9e98301",
"filesize": 412140544,
"download_url": "https://networkingsupport.hpe.com"
},
{
"filename": "arubaoscx-disk-image-genericx86-p4-20240528190307.vmdk",
"version": "10.14.0001",
"md5sum": "83902dbaa74c37cdda3f066f79431933",
"filesize": 401023488,
"download_url": "https://networkingsupport.hpe.com"
},
{
"filename": "arubaoscx-disk-image-genericx86-p4-20240129204649.vmdk",
"version": "10.13.1000",
@ -139,6 +160,24 @@
}
],
"versions": [
{
"name": "10.15.0005",
"images": {
"hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20241115202521.vmdk"
}
},
{
"name": "10.14.1000",
"images": {
"hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20240731173624.vmdk"
}
},
{
"name": "10.14.0001",
"images": {
"hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20240528190307.vmdk"
}
},
{
"name": "10.13.1000",
"images": {

View File

@ -0,0 +1,50 @@
{
"appliance_id": "9e934470-d898-4289-a5ed-50af094e629e",
"name": "Asterfusion vAsterNOS campus",
"category": "multilayer_switch",
"description": "AsterNOS is the core technology of Asterfusions one-stop SONiC turnkey solution designed for cloud, enterprise, and AI-driven scenarios. AsterNOS v5.2 Campus is specifically designed for traditional campus networks, offering comprehensive L2/L3 capabilities suitable for various campus scenarios such as schools, office buildings, and hospitals. This version supports a fully cloud-integrated Layer 3 network architecture, providing rich routing and switching functionalities to ensure high-performance operation and ease of maintenance. It can also be deployed in the GNS3 simulation environment to experience a complete All-Layer 3 Cloud-Campus network. This version is ideal for enterprises and campus networks requiring high-performance multi-service transport, supporting cross-domain connectivity and providing nanosecond-level time synchronization. It is well-suited for applications with stringent time accuracy requirements, such as financial trading, industrial automation, and smart manufacturing. NOTICE: This appliance file is a virtualized version of AsterNOS and is intended to be used only to experience the basic functionality and industry standard CLI (Klish), not for official software testing. For more information about AsterNOS commercial version, please feel free to contact us via Email: bd@cloudswit.ch",
"vendor_name": "Asterfusion",
"vendor_url": "https://cloudswit.ch/",
"vendor_logo_url": "https://raw.githubusercontent.com/GNS3/gns3-registry/master/vendor-logos/asterfusion.png",
"documentation_url": "https://help.cloudswit.ch/portal/en/kb/articles/vasternos",
"product_name": "vAsterNOS",
"product_url": "https://cloudswit.ch/",
"registry_version": 4,
"status": "experimental",
"maintainer": "Asterfusion",
"maintainer_email": "bd@cloudswit.ch",
"usage": "The login is admin and the password is asteros",
"symbol": "asterfusion-vAsterNOS.svg",
"first_port_name": "eth0",
"port_name_format": "Ethernet{0}",
"qemu": {
"adapter_type": "e1000",
"adapters": 10,
"ram": 4096,
"cpus": 4,
"hda_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "telnet",
"boot_priority": "d",
"kvm": "require"
},
"images": [
{
"filename": "vAsterNOS-V5.2R012P01.img",
"version": "5.2-12-1",
"md5sum": "d18c0cfd786607ccc6dc1069a8f40465",
"filesize": 2823290880,
"download_url": "https://drive.cloudswitch.io/external/d29f6d0a6c8322fea42b3c08e95113d026b8ec6aafbe29193c338333077f3da7"
}
],
"versions": [
{
"name": "5.2-12-1",
"images": {
"hda_disk_image": "vAsterNOS-V5.2R012P01.img"
}
}
]
}

View File

@ -13,7 +13,7 @@
"status": "experimental",
"maintainer": "Asterfusion",
"maintainer_email": "bd@cloudswit.ch",
"usage": "The login is admin, passwd asteros",
"usage": "The login is admin and the password is asteros",
"symbol": "asterfusion-vAsterNOS.svg",
"first_port_name": "eth0",
"port_name_format": "Ethernet{0}",

View File

@ -27,44 +27,28 @@
},
"images": [
{
"filename": "CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2",
"version": "Stream-9 (20230704.1)",
"md5sum": "e04511e019325a97837edd9eafe02b48",
"filesize": 1087868416,
"filename": "CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2",
"version": "Stream-10 (20250331.0)",
"md5sum": "776033371ca346001dd6390f0cbaf0d0",
"filesize": 952041472,
"download_url": "https://cloud.centos.org/centos/10-stream/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/10-stream/x86_64/images/CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2"
},
{
"filename": "CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2",
"version": "Stream-9 (20250331.0)",
"md5sum": "4aaeddc6ca497065522c75a7471f9bfd",
"filesize": 1250625536,
"download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2"
"direct_download_url": "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2"
},
{
"filename": "CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2",
"version": "Stream-8 (20230710.0)",
"md5sum": "83e02ce98c29753c86fb7be7d802aa75",
"filesize": 1676164096,
"filename": "CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2",
"version": "Stream-8 (20240603.0)",
"md5sum": "77f3c9650785b8e977209796e09ee33e",
"filesize": 2003698688,
"download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2"
},
{
"filename": "CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2",
"version": "8.4 (2105)",
"md5sum": "032eed270415526546eac07628905a62",
"filesize": 1309652992,
"download_url": "https://cloud.centos.org/centos/8/x86_64/images",
"direct_download_url": "https://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2"
},
{
"filename": "CentOS-7-x86_64-GenericCloud-2111.qcow2",
"version": "7 (2111)",
"md5sum": "730b8662695831670721c8245be61dac",
"filesize": 897384448,
"download_url": "https://cloud.centos.org/centos/7/images",
"direct_download_url": "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-2111.qcow2"
},
{
"filename": "CentOS-7-x86_64-GenericCloud-1809.qcow2",
"version": "7 (1809)",
"md5sum": "da79108d1324b27bd1759362b82fbe40",
"filesize": 914948096,
"download_url": "https://cloud.centos.org/centos/7/images",
"direct_download_url": "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1809.qcow2"
"direct_download_url": "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2"
},
{
"filename": "centos-cloud-init-data.iso",
@ -77,37 +61,23 @@
],
"versions": [
{
"name": "Stream-9 (20230704.1)",
"name": "Stream-10 (20250331.0)",
"images": {
"hda_disk_image": "CentOS-Stream-GenericCloud-9-20230704.1.x86_64.qcow2",
"hda_disk_image": "CentOS-Stream-GenericCloud-x86_64-10-20250331.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "Stream-8 (20230710.0)",
"name": "Stream-9 (20250331.0)",
"images": {
"hda_disk_image": "CentOS-Stream-GenericCloud-8-20230710.0.x86_64.qcow2",
"hda_disk_image": "CentOS-Stream-GenericCloud-9-20250331.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "8.4 (2105)",
"name": "Stream-8 (20240603.0)",
"images": {
"hda_disk_image": "CentOS-8-GenericCloud-8.4.2105-20210603.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "7 (2111)",
"images": {
"hda_disk_image": "CentOS-7-x86_64-GenericCloud-2111.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
},
{
"name": "7 (1809)",
"images": {
"hda_disk_image": "CentOS-7-x86_64-GenericCloud-1809.qcow2",
"hda_disk_image": "CentOS-Stream-GenericCloud-8-20240603.0.x86_64.qcow2",
"cdrom_image": "centos-cloud-init-data.iso"
}
}

View File

@ -30,6 +30,13 @@
"images": [
{
"filename": "EXOS-VM_32.7.2.19.qcow2",
"version": "32.7.2.19",
"md5sum": "eba580a2e18d2a9cc972c9ece8917ea8",
"filesize": 236847104,
"direct_download_url": "https://akamai-ep.extremenetworks.com/Extreme_P/github-en/Virtual_EXOS/EXOS-VM_32.7.2.19.qcow2"
},
{
"filename": "EXOS-VM_v32.6.3.126.qcow2",
"version": "32.6.3.126",
"md5sum": "5856b6c427bd605fe1c7adb6ee6b2659",
@ -41,6 +48,12 @@
"versions": [
{
"name": "32.7.2.19",
"images": {
"hda_disk_image": "EXOS-VM_32.7.2.19.qcow2"
}
},
{
"name": "32.6.3.126",
"images": {

View File

@ -26,6 +26,22 @@
"options": "-nographic"
},
"images": [
{
"filename": "Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2",
"version": "41-1.4",
"md5sum": "8efc9edc04f38775de72ce067166b2a1",
"filesize": 491716608,
"download_url": "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Cloud/x86_64/images",
"direct_download_url": "https://fedora.mirrorservice.org/fedora/linux/releases/41/Cloud/x86_64/images/Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2"
},
{
"filename": "Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2",
"version": "40-1.14",
"md5sum": "3eed4b1a9de35208ed30d9bb72c1522d",
"filesize": 397475840,
"download_url": "https://download.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/x86_64/images",
"direct_download_url": "https://fedora.mirrorservice.org/fedora/linux/releases/40/Cloud/x86_64/images/Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2"
},
{
"filename": "Fedora-Cloud-Base-39-1.5.x86_64.qcow2",
"version": "39-1.5",
@ -52,6 +68,20 @@
}
],
"versions": [
{
"name": "41-1.4",
"images": {
"hda_disk_image": "Fedora-Cloud-Base-Generic-41-1.4.x86_64.qcow2",
"cdrom_image": "fedora-cloud-init-data.iso"
}
},
{
"name": "40-1.14",
"images": {
"hda_disk_image": "Fedora-Cloud-Base-Generic.x86_64-40-1.14.qcow2",
"cdrom_image": "fedora-cloud-init-data.iso"
}
},
{
"name": "39-1.5",
"images": {

View File

@ -37,26 +37,69 @@
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/OVMF-edk2-stable202305.fd.zip/download",
"compression": "zip"
},
{
},
{
"filename": "infix-x86_64-disk-24.11.1.img",
"filesize": 536870912,
"md5sum": "673a123fe122d1c2f5724baf9965a19d",
"version": "24.11.1",
"download_url": "https://github.com/kernelkit/infix/releases/download/v24.11.1/infix-x86_64-24.11.1.tar.gz",
"compression": "gzip"
},
{
},
{
"filename": "infix-x86_64-disk-25.01.0.img",
"filesize": 536870912,
"md5sum": "a814d93b385116b4a35712c445b5f830",
"version": "25.01.0",
"download_url": "https://github.com/kernelkit/infix/releases/download/v25.01.0/infix-x86_64-25.01.0.tar.gz",
"compression": "gzip"
},
{
"filename": "infix-x86_64-disk-25.02.0.img",
"filesize": 536870912,
"md5sum": "8e29474c97df3486eb063a8af5043f50",
"version": "25.02.0",
"download_url": "https://github.com/kernelkit/infix/releases/download/v25.02.0/infix-x86_64-25.02.0.tar.gz",
"compression": "gzip"
},
{
"filename": "infix-x86_64-disk-25.03.0.img",
"filesize": 536870912,
"md5sum": "5e1ed1081cd1673bfed4a9b5b1c58e08",
"version": "25.03.0",
"download_url": "https://github.com/kernelkit/infix/releases/download/v25.03.0/infix-x86_64-25.03.0.tar.gz",
"compression": "gzip"
},
{
"filename": "infix-x86_64-disk-25.04.0.qcow2",
"filesize": 259723776,
"md5sum": "84bd999513325d0007d0e6587abc6140",
"version": "25.04.0",
"direct_download_url": "https://github.com/kernelkit/infix/releases/download/v25.04.0/infix-x86_64-disk-25.04.0.qcow2"
}
],
"versions": [
{
"name": "25.04.0",
"images": {
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "infix-x86_64-disk-25.04.0.qcow2"
}
},
{
"name": "25.03.0",
"images": {
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "infix-x86_64-disk-25.03.0.img"
}
},
{
"name": "25.02.0",
"images": {
"bios_image": "OVMF-edk2-stable202305.fd",
"hda_disk_image": "infix-x86_64-disk-25.02.0.img"
}
},
{
"name": "25.01.0",
"images": {

View File

@ -0,0 +1,75 @@
{
"appliance_id": "12394e0d-9ac5-4da5-8e91-94a462536b61",
"name": "vJunos-router",
"category": "router",
"description": "vJunos-router",
"vendor_name": "Juniper",
"vendor_url": "https://www.juniper.net",
"documentation_url": "https://www.juniper.net/documentation/product/us/en/vjunos-router/",
"product_name": "vJunos Router",
"registry_version": 6,
"status": "stable",
"availability": "free",
"maintainer": "AAm-kun",
"maintainer_email": "github@sugarpapa.mozmail.com",
"usage": "GNS3 SHOULD be a baremetal installation. Using the GNS3 VM MIGHT result in unwanted issues. Default user is root. No password is needed.",
"symbol": "juniper-vmx.svg",
"first_port_name": "ge-0/0/0",
"port_name_format": "ge-0/0/{port0}",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 17,
"ram": 5120,
"cpus": 4,
"hda_disk_interface": "virtio",
"arch": "x86_64",
"console_type": "telnet",
"kvm": "require",
"options": "-serial mon:stdio -nographic -smbios type=1,product=VM-VMX,family=lab -cpu host",
"on_close": "power_off"
},
"images": [
{
"filename": "vJunos-router-24.2R1-S2.qcow2",
"version": "24.2R1-S2",
"md5sum": "dd906b4d19463e22f3e1a297ff1a7464",
"filesize": 3675783168,
"download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
},
{
"filename": "vJunos-router-23.4R2-S2.1.qcow2",
"version": "23.4R2-S2.1",
"md5sum": "e25f5acdfc6c076d0023fd8289bcdd89",
"filesize": 3644063744,
"download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
},
{
"filename": "vJunos-router-23.2R1.15.qcow2",
"version": "23.2R1.15",
"md5sum": "18670fb67633822697fdd3cf982e7eb1",
"filesize": 3653566464,
"download_url": "https://support.juniper.net/support/downloads/?p=vjunos-router"
}
],
"versions": [
{
"images": {
"hda_disk_image": "vJunos-router-24.2R1-S2.qcow2"
},
"name": "24.2R1-S2"
},
{
"images": {
"hda_disk_image": "vJunos-router-23.4R2-S2.1.qcow2"
},
"name": "23.4R2-S2.1"
},
{
"images": {
"hda_disk_image": "vJunos-router-23.2R1.15.qcow2"
},
"name": "23.2R1.15"
}
]
}

View File

@ -0,0 +1,44 @@
{
"appliance_id": "bb9ff73a-701e-40e8-b68a-6a6efeb04e99",
"name": "NethSecurity",
"category": "firewall",
"description": "NethSecurity is an Unified Threat Management (UTM) solution that provides a comprehensive suite of security features, including firewall, content filtering, deep packet inspection (DPI) using Netifyd, Dedalo hotspot, OpenVPN, and an optional remote controller. It is designed to be easy to install and configure, making it a good choice for both small and medium-sized businesses (SMBs) as well as enterprise organizations.",
"vendor_name": "Nethesis",
"vendor_url": "https://www.nethesis.it/",
"documentation_url": "https://docs.nethsecurity.org/en/latest/",
"product_name": "NethSecurity",
"product_url": "https://nethsecurity.org/",
"registry_version": 4,
"status": "stable",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Ethernet0 is the LAN link, Ethernet1 the WAN link. The default username is root and the password is Nethesis,1234",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 2,
"ram": 1024,
"hda_disk_interface": "scsi",
"arch": "x86_64",
"console_type": "telnet",
"kvm": "allow"
},
"images": [
{
"filename": "nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img",
"version": "8-24.10.0-ns.1.5.1",
"md5sum": "be670218effca1b86dac6b8d95012791",
"filesize": 331644416,
"download_url": "https://nethsecurity.org/download",
"direct_download_url": "https://updates.nethsecurity.nethserver.org/stable/8-24.10.0-ns.1.5.1/targets/x86/64/nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img.gz",
"compression": "gzip"
}
],
"versions": [
{
"name": "8-24.10.0-ns.1.5.1",
"images": {
"hda_disk_image": "nethsecurity-8-24.10.0-ns.1.5.1-x86-64-generic-squashfs-combined-efi.img"
}
}
]
}

View File

@ -26,6 +26,14 @@
"options": "-cpu host -nographic"
},
"images": [
{
"filename": "OL9U5_x86_64-kvm-b259.qcow2",
"version": "9.5",
"md5sum": "05e9b62c408ab49a02d6833fc683d1ad",
"filesize": 652935168,
"download_url": "https://yum.oracle.com/oracle-linux-templates.html",
"direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL9/u5/x86_64/OL9U5_x86_64-kvm-b259.qcow2"
},
{
"filename": "OL9U2_x86_64-kvm-b197.qcow",
"version": "9.2",
@ -42,6 +50,14 @@
"download_url": "https://yum.oracle.com/oracle-linux-templates.html",
"direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL9/u1/x86_64/OL9U1_x86_64-kvm-b158.qcow"
},
{
"filename": "OL8U10_x86_64-kvm-b258.qcow2",
"version": "8.10",
"md5sum": "bb07581af5122515b6822595ded5deef",
"filesize": 1251672064,
"download_url": "https://yum.oracle.com/oracle-linux-templates.html",
"direct_download_url": "https://yum.oracle.com/templates/OracleLinux/OL8/u10/x86_64/OL8U10_x86_64-kvm-b258.qcow2"
},
{
"filename": "OL8U8_x86_64-kvm-b198.qcow",
"version": "8.8",
@ -76,7 +92,14 @@
}
],
"versions": [
{
{
"name": "9.5",
"images": {
"hda_disk_image": "OL9U5_x86_64-kvm-b259.qcow2",
"cdrom_image": "oracle-cloud-init-data.iso"
}
},
{
"name": "9.2",
"images": {
"hda_disk_image": "OL9U2_x86_64-kvm-b197.qcow",
@ -90,6 +113,13 @@
"cdrom_image": "oracle-cloud-init-data.iso"
}
},
{
"name": "8.10",
"images": {
"hda_disk_image": "OL8U10_x86_64-kvm-b258.qcow2",
"cdrom_image": "oracle-cloud-init-data.iso"
}
},
{
"name": "8.8",
"images": {

View File

@ -13,7 +13,7 @@
"availability": "service-contract",
"maintainer": "Da-Geek",
"maintainer_email": "dageek@dageeks-geeks.gg",
"usage": "You should download Red Hat Enterprise Linux KVM Guest Image from https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.3/x86_64/product-software attach/customize rhel-cloud-init.iso and start.\nusername: cloud-user\npassword: redhat",
"usage": "You should download Red Hat Enterprise Linux KVM Guest Image from https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.5/x86_64/product-software attach/customize rhel-cloud-init.iso and start.\nusername: cloud-user\npassword: redhat",
"qemu": {
"adapter_type": "virtio-net-pci",
"adapters": 1,
@ -26,6 +26,20 @@
"options": "-cpu host -nographic"
},
"images": [
{
"filename": "rhel-9.5-x86_64-kvm.qcow2",
"version": "9.5",
"md5sum": "8174396d5cb47727c59dd04dd9a05418",
"filesize": 974389248,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.5/x86_64/product-software"
},
{
"filename": "rhel-9.4-x86_64-kvm.qcow2",
"version": "9.4",
"md5sum": "77a2ca9a4cb0448260e04f0d2ebf9807",
"filesize": 957218816,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---9/9.4/x86_64/product-software"
},
{
"filename": "rhel-9.3-x86_64-kvm.qcow2",
"version": "9.3",
@ -54,6 +68,20 @@
"filesize": 696582144,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/9.0/x86_64/product-software"
},
{
"filename": "rhel-8.10-x86_64-kvm.qcow2",
"version": "8.10",
"md5sum": "5fda99fcab47e3b235c6ccdb6e80d362",
"filesize": 1065091072,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.10/x86_64/product-software"
},
{
"filename": "rhel-8.9-x86_64-kvm.qcow2",
"version": "8.9",
"md5sum": "23295fe508678cbdebfbdbd41ef6e6e2",
"filesize": 971833344,
"download_url": "https://access.redhat.com/downloads/content/479/ver=/rhel---8/8.9/x86_64/product-software"
},
{
"filename": "rhel-8.8-x86_64-kvm.qcow2",
"version": "8.8",
@ -119,6 +147,20 @@
}
],
"versions": [
{
"name": "9.5",
"images": {
"hda_disk_image": "rhel-9.5-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "9.4",
"images": {
"hda_disk_image": "rhel-9.4-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "9.3",
"images": {
@ -147,6 +189,20 @@
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "8.10",
"images": {
"hda_disk_image": "rhel-8.10-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "8.9",
"images": {
"hda_disk_image": "rhel-8.9-x86_64-kvm.qcow2",
"cdrom_image": "rhel-cloud-init.iso"
}
},
{
"name": "8.8",
"images": {

View File

@ -26,6 +26,14 @@
"options": "-nographic -cpu host"
},
"images": [
{
"filename": "Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2",
"version": "9.5",
"md5sum": "880eccf788301bb9f34669faebe09276",
"filesize": 609812480,
"download_url": "https://download.rockylinux.org/pub/rocky/9/images/x86_64/",
"direct_download_url": "https://download.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2"
},
{
"filename": "Rocky-9-GenericCloud-Base-9.3-20231113.0.x86_64.qcow2",
"version": "9.3",
@ -68,6 +76,13 @@
}
],
"versions": [
{
"name": "9.5",
"images": {
"hda_disk_image": "Rocky-9-GenericCloud-Base-9.5-20241118.0.x86_64.qcow2",
"cdrom_image": "rocky-cloud-init-data.iso"
}
},
{
"name": "9.3",
"images": {

View File

@ -37,7 +37,7 @@ class Nat(Cloud):
def __init__(self, name, node_id, project, manager, ports=None):
allowed_interfaces = Config.instance().settings.Server.allowed_interfaces
if allowed_interfaces:
if allowed_interfaces and isinstance(allowed_interfaces, str):
allowed_interfaces = allowed_interfaces.split(',')
if sys.platform.startswith("linux"):
nat_interface = Config.instance().settings.Server.default_nat_interface

View File

@ -175,11 +175,10 @@ class Docker(BaseManager):
response = await self.http_query(method, path, data=data, params=params)
body = await response.read()
response.close()
if body and len(body):
if response.headers.get('CONTENT-TYPE') == 'application/json':
body = json.loads(body.decode("utf-8"))
else:
body = body.decode("utf-8")
if response.headers.get('CONTENT-TYPE') == 'application/json':
body = json.loads(body.decode("utf-8", errors="ignore"))
else:
body = body.decode("utf-8", errors="ignore")
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
return body
@ -267,12 +266,12 @@ class Docker(BaseManager):
pass
if progress_callback:
progress_callback(f"Pulling '{image}' from docker hub")
progress_callback(f"Pulling '{image}' from Docker repository")
try:
response = await self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
except DockerError as e:
raise DockerError(
f"Could not pull the '{image}' image from Docker Hub, "
f"Could not pull the '{image}' image from Docker repository, "
f"please check your Internet connection (original error: {e})"
)
# The pull api will stream status via an HTTP JSON stream
@ -281,10 +280,10 @@ class Docker(BaseManager):
try:
chunk = await response.content.read(CHUNK_SIZE)
except aiohttp.ServerDisconnectedError:
log.error(f"Disconnected from server while pulling Docker image '{image}' from docker hub")
log.error(f"Disconnected from server while pulling Docker image '{image}' from Docker repository")
break
except asyncio.TimeoutError:
log.error(f"Timeout while pulling Docker image '{image}' from docker hub")
log.error("Timeout while pulling Docker image '{}' from Docker repository".format(image))
break
if not chunk:
break

View File

@ -437,7 +437,7 @@ class DockerVM(BaseNode):
try:
image_infos = await self._get_image_information()
except DockerHttp404Error:
log.info(f"Image '{self._image}' is missing, pulling it from Docker hub...")
log.info("Image '{}' is missing, pulling it from Docker repository...".format(self._image))
await self.pull_image(self._image)
image_infos = await self._get_image_information()
@ -617,6 +617,7 @@ class DockerVM(BaseNode):
await self._clean_servers()
await self.manager.query("POST", f"containers/{self._cid}/start")
await asyncio.sleep(0.5) # give the Docker container some time to start
self._namespace = await self._get_namespace()
await self._start_ubridge(require_privileged_access=True)

View File

@ -32,6 +32,7 @@ import subprocess
import time
import json
import shlex
import psutil
from gns3server.utils import parse_version
from gns3server.utils.asyncio import subprocess_check_output, cancellable_wait_run_in_executor
@ -265,17 +266,10 @@ class QemuVM(BaseNode):
if qemu_bin == "qemu":
self._platform = "i386"
else:
self._platform = re.sub(r'^qemu-system-(\w+).*$', r'\1', qemu_bin, re.IGNORECASE)
try:
QemuPlatform(self._platform.split(".")[0])
except ValueError:
self._platform = re.sub(r'^qemu-system-(\w+).*$', r'\1', qemu_bin, flags=re.IGNORECASE)
if self._platform.split(".")[0] not in list(QemuPlatform):
raise QemuError(f"Platform {self._platform} is unknown")
log.info(
'QEMU VM "{name}" [{id}] has set the QEMU path to {qemu_path}'.format(
name=self._name, id=self._id, qemu_path=qemu_path
)
)
log.info(f'QEMU VM "{self._name}" [{self._name}] has set the QEMU path to {qemu_path}')
def _check_qemu_path(self, qemu_path):
@ -1225,6 +1219,21 @@ class QemuVM(BaseNode):
except OSError as e:
raise QemuError(f"Could not start Telnet QEMU console {e}\n")
def _find_partition_for_path(self, path):
"""
Finds the disk partition for a given path.
"""
path = os.path.abspath(path)
partitions = psutil.disk_partitions()
# find the partition with the longest matching mount point
matching_partition = None
for partition in partitions:
if path.startswith(partition.mountpoint):
if matching_partition is None or len(partition.mountpoint) > len(matching_partition.mountpoint):
matching_partition = partition
return matching_partition
async def _termination_callback(self, returncode):
"""
Called when the process has stopped.
@ -1236,9 +1245,19 @@ class QemuVM(BaseNode):
log.info("QEMU process has stopped, return code: %d", returncode)
await self.stop()
if returncode != 0:
qemu_stdout = self.read_stdout()
# additional permissions need to be configured for swtpm in AppArmor if the working dir
# is located on a different partition than the partition for the root directory
if "TPM result for CMD_INIT" in qemu_stdout:
partition = self._find_partition_for_path(self.project.path)
if partition and partition.mountpoint != "/":
qemu_stdout += "\nTPM error: the project directory is not on the same partition as the root directory which can be a problem when using AppArmor.\n" \
"Please try to execute the following commands on the server:\n\n" \
"echo 'owner {}/** rwk,' | sudo tee /etc/apparmor.d/local/usr.bin.swtpm > /dev/null\n" \
"sudo service apparmor restart".format(os.path.dirname(self.project.path))
self.project.emit(
"log.error",
{"message": f"QEMU process has stopped, return code: {returncode}\n{self.read_stdout()}"},
{"message": f"QEMU process has stopped, return code: {returncode}\n{qemu_stdout}"},
)
async def stop(self):
@ -2287,19 +2306,42 @@ class QemuVM(BaseNode):
else:
raise QemuError(f"bios image '{self._bios_image}' is not accessible")
options.extend(["-bios", self._bios_image.replace(",", ",,")])
elif self._uefi:
# get the OVMF firmware from the images directory
ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE.fd")
old_ovmf_vars_path = os.path.join(self.working_dir, "OVMF_VARS.fd")
if os.path.exists(old_ovmf_vars_path):
# the node has its own UEFI variables store already, we must also use the old UEFI firmware
ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE.fd")
else:
system_ovmf_firmware_path = "/usr/share/OVMF/OVMF_CODE_4M.fd"
if os.path.exists(system_ovmf_firmware_path):
ovmf_firmware_path = system_ovmf_firmware_path
else:
# otherwise, get the UEFI firmware from the images directory
ovmf_firmware_path = self.manager.get_abs_image_path("OVMF_CODE_4M.fd")
log.info("Configuring UEFI boot mode using OVMF file: '{}'".format(ovmf_firmware_path))
options.extend(["-drive", "if=pflash,format=raw,readonly,file={}".format(ovmf_firmware_path)])
# try to use the UEFI variables store from the system first
system_ovmf_vars_path = "/usr/share/OVMF/OVMF_VARS_4M.fd"
if os.path.exists(system_ovmf_vars_path):
ovmf_vars_path = system_ovmf_vars_path
else:
# otherwise, get the UEFI variables store from the images directory
ovmf_vars_path = self.manager.get_abs_image_path("OVMF_VARS_4M.fd")
# the node should have its own copy of OVMF_VARS.fd (the UEFI variables store)
ovmf_vars_node_path = os.path.join(self.working_dir, "OVMF_VARS.fd")
if not os.path.exists(ovmf_vars_node_path):
try:
shutil.copyfile(self.manager.get_abs_image_path("OVMF_VARS.fd"), ovmf_vars_node_path)
except OSError as e:
raise QemuError("Cannot copy OVMF_VARS.fd file to the node working directory: {}".format(e))
if os.path.exists(old_ovmf_vars_path):
ovmf_vars_node_path = old_ovmf_vars_path
else:
ovmf_vars_node_path = os.path.join(self.working_dir, "OVMF_VARS_4M.fd")
if not os.path.exists(ovmf_vars_node_path):
try:
shutil.copyfile(ovmf_vars_path, ovmf_vars_node_path)
except OSError as e:
raise QemuError("Cannot copy OVMF_VARS_4M.fd file to the node working directory: {}".format(e))
options.extend(["-drive", "if=pflash,format=raw,file={}".format(ovmf_vars_node_path)])
return options
@ -2396,16 +2438,13 @@ class QemuVM(BaseNode):
) # we do not want any user networking back-end if no adapter is connected.
# Each 32 PCI device we need to add a PCI bridge with max 9 bridges
pci_devices = 4 + len(self._ethernet_adapters) # 4 PCI devices are use by default by qemu
pci_bridges = math.floor(pci_devices / 32)
# Reserve 32 devices on root pci_bridge,
# since the number of devices used by templates may differ significantly
# and pci_bridges also consume IDs.
# Move network devices to their own bridge
pci_devices_reserved = 32
pci_bridges_created = 0
if pci_bridges >= 1:
if self._qemu_version and parse_version(self._qemu_version) < parse_version("2.4.0"):
raise QemuError(
"Qemu version 2.4 or later is required to run this VM with a large number of network adapters"
)
pci_device_id = 4 + pci_bridges # Bridge consume PCI ports
pci_device_id = pci_devices_reserved
for adapter_number, adapter in enumerate(self._ethernet_adapters):
mac = int_to_macaddress(macaddress_to_int(self._mac_address) + adapter_number)
@ -2596,6 +2635,8 @@ class QemuVM(BaseNode):
"""
self._qemu_version = await self.manager.get_qemu_version(self.qemu_path)
if self._qemu_version and parse_version(self._qemu_version) < parse_version("2.4.0"):
raise QemuError("Qemu version 2.4 or later is required to run Qemu VMs")
vm_name = self._name.replace(",", ",,")
project_path = self.project.path.replace(",", ",,")
additional_options = self._options.strip()

View File

@ -458,10 +458,11 @@ class Compute:
# FIXME: slow down number of compute events
self._controller.notification.controller_emit("compute.updated", self.asdict())
else:
if action == "log.error":
log.error(event.pop("message"))
await self._controller.notification.dispatch(
action, event, project_id=project_id, compute_id=self.id
action,
event,
project_id=project_id,
compute_id=self.id
)
else:
if response.type == aiohttp.WSMsgType.CLOSE:

View File

@ -249,6 +249,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
return True
return False
async def list(self):
"""
List all VirtualBox VMs
@ -269,8 +270,8 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
# get a NAT interface number
nat_interface_number = await self._look_for_interface("nat")
if nat_interface_number < 0:
raise GNS3VMError(f'VM "{self.vmname}" must have a NAT interface configured in order to start')
if nat_interface_number < 0 and await self._look_for_interface("natnetwork") < 0:
raise GNS3VMError(f'VM "{self.vmname}" must have a NAT interface or NAT Network configured in order to start')
if sys.platform.startswith("darwin") and parse_version(self._system_properties["API version"]) >= parse_version("7_0"):
# VirtualBox 7.0+ on macOS requires a host-only network interface
@ -339,42 +340,68 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
elif vm_state == "paused":
args = [self._vmname, "resume"]
await self._execute("controlvm", args)
ip_address = "127.0.0.1"
try:
# get a random port on localhost
with socket.socket() as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((ip_address, 0))
api_port = s.getsockname()[1]
except OSError as e:
raise GNS3VMError(f"Error while getting random port: {e}")
if await self._check_vbox_port_forwarding():
# delete the GNS3VM NAT port forwarding rule if it exists
log.info(f"Removing GNS3VM NAT port forwarding rule from interface {nat_interface_number}")
await self._execute("controlvm", [self._vmname, f"natpf{nat_interface_number}", "delete", "GNS3VM"])
log.info("Retrieving IP address from GNS3 VM...")
ip = await self._get_ip_from_guest_property()
if ip:
self.ip_address = ip
else:
# if we can't get the IP address from the guest property, we try to get it from the GNS3 server (a NAT interface is required)
if nat_interface_number < 0:
raise GNS3VMError("Could not find guest IP address for {}".format(self.vmname))
log.warning("Could not find IP address from guest property, trying to get it from GNS3 server")
ip_address = "127.0.0.1"
try:
# get a random port on localhost
with socket.socket() as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((ip_address, 0))
api_port = s.getsockname()[1]
except OSError as e:
raise GNS3VMError("Error while getting random port: {}".format(e))
# add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to the port in the VM
log.info(f"Adding GNS3VM NAT port forwarding rule with port {api_port} to interface {nat_interface_number}")
await self._execute(
"controlvm",
[self._vmname, f"natpf{nat_interface_number}", f"GNS3VM,tcp,{ip_address},{api_port},,{self.port}"],
)
if await self._check_vbox_port_forwarding():
# delete the GNS3VM NAT port forwarding rule if it exists
log.info("Removing GNS3VM NAT port forwarding rule from interface {}".format(nat_interface_number))
await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"])
self.ip_address = await self._get_ip(interface_number, api_port)
log.info("GNS3 VM has been started with IP {}".format(self.ip_address))
# add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to the port in the VM
log.info("Adding GNS3VM NAT port forwarding rule with port {} to interface {}".format(api_port, nat_interface_number))
await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number),
"GNS3VM,tcp,{},{},,{}".format(ip_address, api_port, self.port)])
self.ip_address = await self._get_ip_from_server(interface_number, api_port)
log.info("GNS3 VM has been started with IP '{}'".format(self.ip_address))
self.running = True
async def _get_ip(self, hostonly_interface_number, api_port):
async def _get_ip_from_guest_property(self):
"""
Get the IP from VirtualBox.
Get the IP from VirtualBox by retrieving the guest property (Guest Additions must be installed).
"""
remaining_try = 180 # try for 3 minutes
while remaining_try > 0:
result = await self._execute("guestproperty", ["get", self._vmname, "/VirtualBox/GuestInfo/Net/0/V4/IP"])
for info in result.splitlines():
if ':' in info:
name, value = info.split(':', 1)
if name == "Value":
return value.strip()
remaining_try -= 1
await asyncio.sleep(1)
return None
async def _get_ip_from_server(self, hostonly_interface_number, api_port):
"""
Get the IP from VirtualBox by sending a request to the GNS3 server.
Due to VirtualBox limitation the only way is to send request each
second to a GNS3 endpoint in order to get the list of the interfaces and
their IP and after that match it with VirtualBox host only.
"""
remaining_try = 300
remaining_try = 180 # try for 3 minutes
while remaining_try > 0:
try:
async with HTTPClient.get(f"http://127.0.0.1:{api_port}/v3/compute/network/interfaces") as resp:

View File

@ -58,7 +58,7 @@ class CrashReport:
Report crash to a third party service
"""
DSN = "https://0d64280ffb5ae409d448f255b9956a88@o19455.ingest.us.sentry.io/38482"
DSN = "https://61bb46252cabeebd49ee1e09fb8ba72e@o19455.ingest.us.sentry.io/38482"
_instance = None
def __init__(self):

Binary file not shown.

Binary file not shown.

View File

@ -20,7 +20,7 @@ from .common import ErrorMessage
from .version import Version
# Controller schemas
from .controller.links import LinkCreate, LinkUpdate, Link
from .controller.links import LinkCreate, LinkUpdate, Link, UDPPortInfo, EthernetPortInfo
from .controller.computes import ComputeCreate, ComputeUpdate, ComputeVirtualBoxVM, ComputeVMwareVM, ComputeDockerImage, AutoIdlePC, Compute
from .controller.templates import TemplateCreate, TemplateUpdate, TemplateUsage, Template
from .controller.images import Image, ImageType

View File

@ -92,3 +92,24 @@ class Link(LinkBase):
None,
description="Read only property. The compute identifier where a capture is running"
)
class UDPPortInfo(BaseModel):
"""
UDP port information.
"""
node_id: UUID
lport: int
rhost: str
rport: int
type: str
class EthernetPortInfo(BaseModel):
"""
Ethernet port information.
"""
node_id: UUID
interface: str
type: str

View File

@ -46,6 +46,6 @@
gtag('config', 'G-0BT7QQV1W1');
</script>
<script src="runtime.24fa95b7061d7056.js" type="module"></script><script src="polyfills.319c79dd175e50d0.js" type="module"></script><script src="main.87178dd64c9c79ba.js" type="module"></script>
<script src="runtime.24fa95b7061d7056.js" type="module"></script><script src="polyfills.319c79dd175e50d0.js" type="module"></script><script src="main.fd9d76d279fa7d5e.js" type="module"></script>
</body></html>

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -342,6 +342,12 @@ async def write_image(
allow_raw_image=False
) -> models.Image:
db_image = await images_repo.get_image(image_path)
if db_image and os.path.exists(image_path):
# the image already exists in the database and on disk
log.info(f"Image {image_path} already exists")
return db_image
image_dir, image_name = os.path.split(image_filename)
log.info(f"Writing image file to '{image_path}'")
# Store the file under its final name only when the upload is completed
@ -381,6 +387,10 @@ async def write_image(
except OSError:
log.warning(f"Could not remove '{tmp_path}'")
if db_image:
# the image already exists in the database, no need to add it again
return db_image
return await images_repo.add_image(
image_name,
image_type,

View File

@ -22,8 +22,8 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
__version__ = "3.0.4"
__version_info__ = (3, 0, 4, 0)
__version__ = "3.0.5"
__version_info__ = (3, 0, 5, 0)
if "dev" in __version__:
try:

View File

@ -1,22 +1,23 @@
uvicorn==0.34.0 # uvicorn 0.33 is the last version supporting Python 3.8
pydantic==2.10.6
fastapi==0.115.8
uvicorn==0.34.2 # uvicorn 0.33 is the last version supporting Python 3.8
pydantic==2.11.4
fastapi==0.115.12
python-multipart==0.0.20
websockets==15.0
aiohttp>=3.11.13,<3.12
websockets==15.0.1
aiohttp>=3.11.16,<3.12
async-timeout==5.0.1; python_version < '3.11'
aiofiles>=24.1.0,<25.0
Jinja2>=3.1.5,<3.2
sentry-sdk>=2.22,<2.23 # optional dependency
Jinja2>=3.1.6,<3.2
sentry-sdk>=2.26.1,<2.27 # optional dependency
psutil>=7.0.0
async-timeout>=5.0.1,<5.1
distro>=1.9.0
py-cpuinfo>=9.0.0,<10.0
greenlet==3.1.1 # necessary to run sqlalchemy on Python 3.13
sqlalchemy==2.0.38
greenlet==3.2.0 # necessary to run sqlalchemy on Python 3.13
sqlalchemy==2.0.40
aiosqlite==0.21.0
alembic==1.14.1
bcrypt==4.2.1
joserfc==1.0.3
alembic==1.15.2
bcrypt==4.3.0
joserfc==1.0.4
email-validator==2.2.0
watchdog==6.0.0
zstandard==0.23.0

View File

@ -1,6 +1,6 @@
#!/bin/bash
#
# Copyright (C) 2024 GNS3 Technologies Inc.
# Copyright (C) 2025 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -28,6 +28,7 @@ function help {
echo "--with-i386-repository: Add the i386 repositories required by IOU i386 images. This is not needed for recent x86_64 IOU images." >&2
echo "--with-welcome: Install GNS3-VM welcome.py script" >&2
echo "--without-kvm: Disable KVM, required if system do not support it (limitation in some hypervisors and cloud providers). Warning: only disable KVM if strictly necessary as this will degrade performance" >&2
echo "--without-system-upgrade: Do not upgrade the system" >&2
echo "--unstable: Use the GNS3 unstable repository" >&2
echo "--custom-repository <repository>: Use a custom repository" >&2
echo "--help: This help" >&2
@ -38,6 +39,13 @@ function log {
}
lsb_release -d | grep "LTS" > /dev/null
if [ "$EUID" -ne 0 ]
then
echo "This script must be run as root"
exit 1
fi
if [ $? != 0 ]
then
echo "This script can only be run on a Linux Ubuntu LTS release"
@ -52,6 +60,7 @@ USE_VPN=0
USE_IOU=0
I386_REPO=0
DISABLE_KVM=0
NO_SYSTEM_UPGRADE=0
WELCOME_SETUP=0
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,with-welcome,without-kvm,unstable,custom-repository:,help -n 'gns3-remote-install.sh' -- "$@"`
@ -85,6 +94,10 @@ while true ; do
DISABLE_KVM=1
shift
;;
--without-system-upgrade)
NO_SYSTEM_UPGRADE=1
shift
;;
--unstable)
REPOSITORY="unstable"
shift
@ -102,68 +115,33 @@ while true ; do
esac
done
if [ "$REPOSITORY" == "ppa-v3" ]
then
if ! python3 -c 'import sys; assert sys.version_info >= (3,9)' > /dev/null 2>&1; then
echo "GNS3 version >= 3.0 requires Python 3.9 or later"
exit 1
fi
fi
# Exit in case of error
set -e
export DEBIAN_FRONTEND="noninteractive"
UBUNTU_CODENAME=`lsb_release -c -s`
log "Add GNS3 repository"
log "Updating system packages, installing curl and software-properties-common"
apt update
apt install -y curl software-properties-common
if [ ! -f "/etc/apt/sources.list.d/ubuntu.sources" ]
if [ $NO_SYSTEM_UPGRADE == 0 ]
then
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B83AAABFFBD82D21B543C8EA86C22C2EC6A24D7F
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
deb http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
EOFLIST
else
cat <<EOFLIST > /etc/apt/sources.list.d/gns3-ppa.sources
Types: deb
URIs: https://ppa.launchpadcontent.net/gns3/$REPOSITORY/ubuntu/
Suites: $UBUNTU_CODENAME
Components: main
Signed-By:
-----BEGIN PGP PUBLIC KEY BLOCK-----
.
mQINBGY0jSYBEADMH5CvX8ZVX4XzAxdQ2CmF7t86IjFnQgtI18Q19nVnpKEGNyB5
pgotDMzkhGnxuhvz2zE9PZhd8VgkodB81V607d/Dy8FfI7t1BVQhLvJDx0H/q6RE
n2y9WxiuBzTHitoQTCTY3hjcr7AUNFFI64gUqwbkQmYbCWWsYOlDpRSkWKg8P8WK
08RetwTI0Iwoz8j+BkbPlubuImiVfh1TeH23FBuGIwL1r1Cps0wel6JAi+jaU9WG
j8MX3mQYFTAtk7f1lRubqWosB/A4xIu609pF1e1tAkWAGltYAeoFhDn+PfA9KgmV
fvxfVR7zmxp31imTJgXgUFCz+H0Xb3vpve8XsrsHZUP6StJ3+6cFXjNBV6PuO1FT
JWp86a+AYHg7+sUWcoJRZPCTbb/pOcCa0q1ch5qcLkiYEOGK+pYhbPptq6y8IsJW
N6EDNCVvVqVyTJy14FZWoOqxcpUiDOQ+su28j8++V+PMo+FO3SQqwEZwJXk7LF/4
wUipDCUh/WNjDqqgmYLoO+ttiiJPbEw3jtbO+zopbzYpyEC1f06Nz7uz1daOIN3J
etFPzSqWCE7Eq+hoVmAAm8gVmQir3rFJbIGBAvAaOLQEOkUlOlS7AezqUhdyhGER
Zrvc3eNqxY7G61SEHipEJ7/hpcDq0RRWCXHsoQqyHaPje826n2pGkJYt4QARAQAB
tBZMYXVuY2hwYWQgUFBBIGZvciBHTlMziQJOBBMBCgA4FiEEuDqqv/vYLSG1Q8jq
hsIsLsaiTX8FAmY0jSYCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQhsIs
LsaiTX9z9xAAq1uHmRgfYmELS0cr2YEnTWHPVE6s95Qx+0cr5zzNeWfmoAS9uSyl
z8bCm+Q2ZapzU/nOtkozU+RGjgcRRTKMVTyS0PjFX22965xHCRWnw79fPyrYouUw
H2cAT8WSGYEeVAbqhJSns0RnDpXuaxmWE1wT+iitY/QAjeXo22Z2mjv2bFTitKbY
hZbE5Eu8Olc5YHCVI0ofq84/Ii921iMibU6EDMmm/iOnMK2uHGbC59t0YG8Rm7mK
uk6+TpxOULjFeCWSkF2Dr33m8JQmtYZuFUnmqWPuSdBo3J0O1b0qTg+EP9FbDAtj
CoEKT/V1ccMBd3r77o23CGsvpV7bzEU60A+NsU8vb/AkOmouYiF+qaYDFGZDfWhK
p1HFmd1kt7YdgxsmoKoFJkbt1bBdcFJLV0Jcad5sfArg2aFDYf2giMxAw4iQ+9jc
MCuwWxiqWicPqJ5erNTzVfayBkjuZqBDVTO9wmG3DL4QmNosIBS7kq+NGrT8Ql22
FqYfdIZJDlKVtJKHK8eKJSB0dbFawV2h5p/CvQlIm6nthg5FzOyjvCkPkvxvveq+
SuNxFEscumFCgo7j7RMWHW9HWK3TUvMmYLMVjxL8kXyCwknp9GklBQHA/IPxRa/2
eFqqkmVbmNAoMzzw5wqa/BPcFEbgn+E+TFyZqbzp0F4QzPJZFkz16SA=
=xnj5
-----END PGP PUBLIC KEY BLOCK-----
EOFLIST
log "Upgrading system packages"
apt upgrade --yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
fi
log "Updating system packages and installing curl"
apt update
apt install -y curl
log "Upgrading packages"
apt upgrade --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
log "Adding GNS3 repository ppa:gns3/$REPOSITORY"
# use sudo -E to preserve proxy config
sudo -E apt-add-repository -y "ppa:gns3/$REPOSITORY"
log "Installing the GNS3 server and its dependencies"
apt install -y gns3-server

View File

@ -173,15 +173,15 @@ class TestImageRoutes:
assert response.status_code == status.HTTP_200_OK
assert response.json()["filename"] == image_name
async def test_same_image_is_uploaded(self, app: FastAPI, client: AsyncClient, qcow2_image: str) -> None:
image_name = os.path.basename(qcow2_image)
with open(qcow2_image, "rb") as f:
image_data = f.read()
response = await client.post(
app.url_path_for("upload_image", image_path=image_name),
content=image_data)
assert response.status_code == status.HTTP_201_CREATED
# async def test_same_image_is_uploaded(self, app: FastAPI, client: AsyncClient, qcow2_image: str) -> None:
#
# image_name = os.path.basename(qcow2_image)
# with open(qcow2_image, "rb") as f:
# image_data = f.read()
# response = await client.post(
# app.url_path_for("upload_image", image_path=image_name),
# content=image_data)
# assert response.status_code == status.HTTP_201_CREATED
async def test_image_delete(self, app: FastAPI, client: AsyncClient, qcow2_image: str) -> None:
@ -212,7 +212,6 @@ class TestImageRoutes:
@pytest.mark.parametrize(
"subdir, expected_result",
(
("subdir", status.HTTP_201_CREATED),
("subdir", status.HTTP_201_CREATED),
("subdir2", status.HTTP_201_CREATED),
),

View File

@ -16,6 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import uuid
import pytest_asyncio
from typing import Tuple
@ -423,3 +424,84 @@ class TestLinkRoutes:
assert mock.called
assert response.status_code == status.HTTP_200_OK
assert response.json() == FILTERS
async def test_get_udp_interface(self, app: FastAPI, client: AsyncClient, project: Project) -> None:
"""
Test getting UDP tunnel interface information from a link.
"""
link = Link(project)
project._links = {link.id: link}
cloud_node = MagicMock()
cloud_node.node_type = "cloud"
cloud_node.id = str(uuid.uuid4())
cloud_node.name = "Cloud1"
compute = MagicMock()
response = MagicMock()
response.json = {
"ports_mapping": [
{
"port_number": 1,
"type": "udp",
"lport": 20000,
"rhost": "127.0.0.1",
"rport": 30000,
"name": "UDP tunnel 1"
}
]
}
compute.get = AsyncioMagicMock(return_value=response)
cloud_node.compute = compute
link._nodes = [{"node": cloud_node, "port_number": 1}]
response = await client.get(app.url_path_for("get_iface", project_id=project.id, link_id=link.id))
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert result["node_id"] == cloud_node.id
assert result["lport"] == 20000
assert result["rhost"] == "127.0.0.1"
assert result["rport"] == 30000
assert result["type"] == "udp"
async def test_get_ethernet_interface(self, app: FastAPI, client: AsyncClient, project: Project) -> None:
"""
Test getting ethernet interface information from a link.
"""
link = Link(project)
project._links = {link.id: link}
cloud_node = MagicMock()
cloud_node.node_type = "cloud"
cloud_node.id = str(uuid.uuid4())
cloud_node.name = "Cloud1"
compute = MagicMock()
response = MagicMock()
response.json = {
"ports_mapping": [
{
"port_number": 1,
"type": "ethernet",
"interface": "eth0",
"name": "Ethernet 1"
}
]
}
compute.get = AsyncioMagicMock(return_value=response)
cloud_node.compute = compute
link._nodes = [{"node": cloud_node, "port_number": 1}]
response = await client.get(app.url_path_for("get_iface", project_id=project.id, link_id=link.id))
assert response.status_code == status.HTTP_200_OK
result = response.json()
assert result["node_id"] == cloud_node.id
assert result["interface"] == "eth0"
assert result["type"] == "ethernet"

View File

@ -401,16 +401,20 @@ async def test_uefi_boot_mode_option(vm, tmpdir, images_dir, fake_qemu_img_binar
vm._uefi = True
# create fake OVMF files
ovmf_code_path = os.path.join(images_dir, "OVMF_CODE.fd")
with open(ovmf_code_path, "w+") as f:
f.write('1')
ovmf_vars_path = os.path.join(images_dir, "OVMF_VARS.fd")
system_ovmf_firmware_path = "/usr/share/OVMF/OVMF_CODE_4M.fd"
if os.path.exists(system_ovmf_firmware_path):
ovmf_code_path = system_ovmf_firmware_path
else:
ovmf_code_path = os.path.join(images_dir, "OVMF_CODE_4M.fd")
with open(ovmf_code_path, "w+") as f:
f.write('1')
ovmf_vars_path = os.path.join(images_dir, "OVMF_VARS_4M.fd")
with open(ovmf_vars_path, "w+") as f:
f.write('1')
options = await vm._build_command()
assert ' '.join(["-drive", "if=pflash,format=raw,readonly,file={}".format(ovmf_code_path)]) in ' '.join(options)
assert ' '.join(["-drive", "if=pflash,format=raw,file={}".format(os.path.join(vm.working_dir, "OVMF_VARS.fd"))]) in ' '.join(options)
assert ' '.join(["-drive", "if=pflash,format=raw,file={}".format(os.path.join(vm.working_dir, "OVMF_VARS_4M.fd"))]) in ' '.join(options)
@pytest.mark.asyncio
@ -566,7 +570,11 @@ async def test_build_command(vm, fake_qemu_binary):
"-net",
"none",
"-device",
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"i82801b11-bridge,id=dmi_pci_bridge1",
"-device",
"pci-bridge,id=pci-bridge1,bus=dmi_pci_bridge1,chassis_nr=0x1,addr=0x1,shpc=off",
"-device",
"e1000,mac={},bus=pci-bridge1,addr=0x00,netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport),
"-display",
@ -589,40 +597,16 @@ async def test_build_command_manual_uuid(vm):
@pytest.mark.asyncio
async def test_build_command_kvm(linux_platform, vm, fake_qemu_binary):
async def test_build_command_kvm_below_2_4(linux_platform, vm, fake_qemu_binary):
"""
Qemu 2.4 introduce an issue with KVM
"""
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.3.2")
os.environ["DISPLAY"] = "0:0"
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM._run_with_hardware_acceleration", return_value=True):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
cmd = await vm._build_command()
nio = vm._local_udp_tunnels[0][0]
assert cmd == [
fake_qemu_binary,
"-name",
"test",
"-m",
"256M",
"-smp",
"cpus=1,maxcpus=1,sockets=1",
"-enable-kvm",
"-boot",
"order=c",
"-uuid",
vm.id,
"-serial",
"telnet:127.0.0.1:{},server,nowait".format(vm._internal_console_port),
"-net",
"none",
"-device",
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport),
"-nographic"
]
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
with pytest.raises(QemuError):
await vm._build_command()
@pytest.mark.asyncio
@ -657,7 +641,11 @@ async def test_build_command_kvm_2_4(linux_platform, vm, fake_qemu_binary):
"-net",
"none",
"-device",
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"i82801b11-bridge,id=dmi_pci_bridge1",
"-device",
"pci-bridge,id=pci-bridge1,bus=dmi_pci_bridge1,chassis_nr=0x1,addr=0x1,shpc=off",
"-device",
"e1000,mac={},bus=pci-bridge1,addr=0x00,netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio.rport, nio.lport),
"-nographic"
@ -701,11 +689,15 @@ async def test_build_command_two_adapters(vm, fake_qemu_binary):
"-net",
"none",
"-device",
"e1000,mac={},netdev=gns3-0".format(vm._mac_address),
"i82801b11-bridge,id=dmi_pci_bridge1",
"-device",
"pci-bridge,id=pci-bridge1,bus=dmi_pci_bridge1,chassis_nr=0x1,addr=0x1,shpc=off",
"-device",
"e1000,mac={},bus=pci-bridge1,addr=0x00,netdev=gns3-0".format(vm._mac_address),
"-netdev",
"socket,id=gns3-0,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio1.rport, nio1.lport),
"-device",
"e1000,mac={},netdev=gns3-1".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)),
"e1000,mac={},bus=pci-bridge1,addr=0x01,netdev=gns3-1".format(int_to_macaddress(macaddress_to_int(vm._mac_address) + 1)),
"-netdev",
"socket,id=gns3-1,udp=127.0.0.1:{},localaddr=127.0.0.1:{}".format(nio2.rport, nio2.lport),
"-nographic"
@ -725,8 +717,8 @@ async def test_build_command_two_adapters_mac_address(vm):
assert mac_0[:8] == "00:00:ab"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
cmd = await vm._build_command()
assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x00,netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x01,netdev=gns3-1".format(mac_1) in cmd
vm.mac_address = "00:42:ab:0e:0f:0a"
mac_0 = vm._mac_address
@ -735,8 +727,8 @@ async def test_build_command_two_adapters_mac_address(vm):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
cmd = await vm._build_command()
assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x00,netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x01,netdev=gns3-1".format(mac_1) in cmd
@pytest.mark.asyncio
@ -758,28 +750,20 @@ async def test_build_command_large_number_of_adapters(vm):
assert len([l for l in cmd if "e1000" in l ]) == 100
assert len(vm._ethernet_adapters) == 100
assert "e1000,mac={},netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},netdev=gns3-1".format(mac_1) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x00,netdev=gns3-0".format(mac_0) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x01,netdev=gns3-1".format(mac_1) in cmd
assert "pci-bridge,id=pci-bridge0,bus=dmi_pci_bridge0,chassis_nr=0x1,addr=0x0,shpc=off" not in cmd
assert "pci-bridge,id=pci-bridge1,bus=dmi_pci_bridge1,chassis_nr=0x1,addr=0x1,shpc=off" in cmd
assert "pci-bridge,id=pci-bridge2,bus=dmi_pci_bridge2,chassis_nr=0x1,addr=0x2,shpc=off" in cmd
assert "i82801b11-bridge,id=dmi_pci_bridge1" in cmd
mac_29 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 29)
assert "e1000,mac={},bus=pci-bridge1,addr=0x04,netdev=gns3-29".format(mac_29) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x1d,netdev=gns3-29".format(mac_29) in cmd
mac_30 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 30)
assert "e1000,mac={},bus=pci-bridge1,addr=0x05,netdev=gns3-30".format(mac_30) in cmd
assert "e1000,mac={},bus=pci-bridge1,addr=0x1e,netdev=gns3-30".format(mac_30) in cmd
mac_74 = int_to_macaddress(macaddress_to_int(vm._mac_address) + 74)
assert "e1000,mac={},bus=pci-bridge2,addr=0x11,netdev=gns3-74".format(mac_74) in cmd
assert "e1000,mac={},bus=pci-bridge3,addr=0x0a,netdev=gns3-74".format(mac_74) in cmd
# Qemu < 2.4 doesn't support large number of adapters
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="2.0.0")
with pytest.raises(QemuError):
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
await vm._build_command()
vm.adapters = 5
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
await vm._build_command()
@pytest.mark.asyncio
async def test_build_command_with_virtio_net_pci_adapter(vm):
@ -792,7 +776,7 @@ async def test_build_command_with_virtio_net_pci_adapter(vm):
vm._adapter_type = "virtio-net-pci"
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()):
cmd = await vm._build_command()
assert "virtio-net-pci,mac=00:00:ab:0e:0f:09,speed=10000,duplex=full,netdev=gns3-0" in cmd
assert "virtio-net-pci,mac=00:00:ab:0e:0f:09,speed=10000,duplex=full,bus=pci-bridge1,addr=0x00,netdev=gns3-0" in cmd
@pytest.mark.asyncio