mirror of
https://github.com/GNS3/gns3-server
synced 2025-06-25 01:19:00 +00:00
Merge remote-tracking branch 'origin/3.0' into gh-pages
This commit is contained in:
commit
53adcfef61
@ -18,7 +18,7 @@ jobs:
|
|||||||
ref: "gh-pages"
|
ref: "gh-pages"
|
||||||
- uses: actions/setup-python@v5
|
- uses: actions/setup-python@v5
|
||||||
with:
|
with:
|
||||||
python-version: 3.8
|
python-version: 3.9
|
||||||
- name: Merge changes from 3.0 branch
|
- name: Merge changes from 3.0 branch
|
||||||
run: |
|
run: |
|
||||||
git config user.name github-actions
|
git config user.name github-actions
|
||||||
|
2
.github/workflows/testing.yml
vendored
2
.github/workflows/testing.yml
vendored
@ -18,7 +18,7 @@ jobs:
|
|||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
os: ["ubuntu-latest"]
|
os: ["ubuntu-latest"]
|
||||||
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
|
python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
#include:
|
#include:
|
||||||
# only test with Python 3.10 on Windows
|
# only test with Python 3.10 on Windows
|
||||||
# - os: windows-latest
|
# - os: windows-latest
|
||||||
|
19
CHANGELOG
19
CHANGELOG
@ -1,5 +1,24 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 3.0.2 03/01/2025
|
||||||
|
|
||||||
|
* Bundle web-ui v3.0.2
|
||||||
|
* Support to create templates based on image checksums.
|
||||||
|
* Improvements for installing built-in disks.
|
||||||
|
* Use watchdog instead of watchfiles to monitor for new images on the file system
|
||||||
|
* Drop Python 3.8
|
||||||
|
* Replace python-jose library by joserfc
|
||||||
|
* Upgrade dependencies
|
||||||
|
* Remove blocking IOU phone home call.
|
||||||
|
|
||||||
|
## 3.0.1 27/12/2024
|
||||||
|
|
||||||
|
* Bundle web-ui v3.0.1
|
||||||
|
* Allow for upgrading built-in disks
|
||||||
|
* Fix config parsing when configuring server protocol. Fixes https://github.com/GNS3/gns3-gui/issues/3681
|
||||||
|
* Update empty Qemu disks with correct MD5 checksums
|
||||||
|
* Increase timeout to run compute HTTP queries. Fixes #3453
|
||||||
|
|
||||||
## 3.0.0 20/12/2024
|
## 3.0.0 20/12/2024
|
||||||
|
|
||||||
* Bundle web-ui v3.0.0
|
* Bundle web-ui v3.0.0
|
||||||
|
@ -27,11 +27,11 @@ from fastapi.encoders import jsonable_encoder
|
|||||||
from starlette.requests import ClientDisconnect
|
from starlette.requests import ClientDisconnect
|
||||||
from sqlalchemy.orm.exc import MultipleResultsFound
|
from sqlalchemy.orm.exc import MultipleResultsFound
|
||||||
from typing import List, Optional
|
from typing import List, Optional
|
||||||
from gns3server import schemas
|
|
||||||
|
|
||||||
|
from gns3server import schemas
|
||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
from gns3server.compute.qemu import Qemu
|
from gns3server.compute.qemu import Qemu
|
||||||
from gns3server.utils.images import InvalidImageError, write_image, read_image_info, default_images_directory
|
from gns3server.utils.images import InvalidImageError, write_image, read_image_info, default_images_directory, get_builtin_disks
|
||||||
from gns3server.db.repositories.images import ImagesRepository
|
from gns3server.db.repositories.images import ImagesRepository
|
||||||
from gns3server.db.repositories.templates import TemplatesRepository
|
from gns3server.db.repositories.templates import TemplatesRepository
|
||||||
from gns3server.db.repositories.rbac import RbacRepository
|
from gns3server.db.repositories.rbac import RbacRepository
|
||||||
@ -51,7 +51,6 @@ log = logging.getLogger(__name__)
|
|||||||
|
|
||||||
router = APIRouter()
|
router = APIRouter()
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
@router.post(
|
||||||
"/qemu/{image_path:path}",
|
"/qemu/{image_path:path}",
|
||||||
response_model=schemas.Image,
|
response_model=schemas.Image,
|
||||||
@ -175,6 +174,61 @@ async def upload_image(
|
|||||||
return image
|
return image
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete(
|
||||||
|
"/prune",
|
||||||
|
status_code=status.HTTP_204_NO_CONTENT,
|
||||||
|
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
||||||
|
)
|
||||||
|
async def prune_images(
|
||||||
|
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Prune images not attached to any template.
|
||||||
|
|
||||||
|
Required privilege: Image.Allocate
|
||||||
|
"""
|
||||||
|
|
||||||
|
skip_images = get_builtin_disks()
|
||||||
|
await images_repo.prune_images(skip_images)
|
||||||
|
|
||||||
|
|
||||||
|
@router.post(
|
||||||
|
"/install",
|
||||||
|
status_code=status.HTTP_204_NO_CONTENT,
|
||||||
|
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
||||||
|
)
|
||||||
|
async def install_images(
|
||||||
|
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||||
|
templates_repo: TemplatesRepository = Depends(get_repository(TemplatesRepository))
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Attempt to automatically create templates based on image checksums.
|
||||||
|
|
||||||
|
Required privilege: Image.Allocate
|
||||||
|
"""
|
||||||
|
|
||||||
|
skip_images = get_builtin_disks()
|
||||||
|
images = await images_repo.get_images()
|
||||||
|
for image in images:
|
||||||
|
if skip_images and image.filename in skip_images:
|
||||||
|
log.debug(f"Skipping image '{image.path}' for image installation")
|
||||||
|
continue
|
||||||
|
templates = await images_repo.get_image_templates(image.image_id)
|
||||||
|
if templates:
|
||||||
|
# the image is already used by a template
|
||||||
|
log.warning(f"Image '{image.path}' is used by one or more templates")
|
||||||
|
continue
|
||||||
|
await Controller.instance().appliance_manager.install_appliances_from_image(
|
||||||
|
image.path,
|
||||||
|
image.checksum,
|
||||||
|
images_repo,
|
||||||
|
templates_repo,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
os.path.dirname(image.path)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
"/{image_path:path}",
|
"/{image_path:path}",
|
||||||
response_model=schemas.Image,
|
response_model=schemas.Image,
|
||||||
@ -218,7 +272,7 @@ async def delete_image(
|
|||||||
image = await images_repo.get_image(image_path)
|
image = await images_repo.get_image(image_path)
|
||||||
except MultipleResultsFound:
|
except MultipleResultsFound:
|
||||||
raise ControllerBadRequestError(f"Image '{image_path}' matches multiple images. "
|
raise ControllerBadRequestError(f"Image '{image_path}' matches multiple images. "
|
||||||
f"Please include the relative path of the image")
|
f"Please include the absolute path of the image")
|
||||||
|
|
||||||
if not image:
|
if not image:
|
||||||
raise ControllerNotFoundError(f"Image '{image_path}' not found")
|
raise ControllerNotFoundError(f"Image '{image_path}' not found")
|
||||||
@ -236,20 +290,3 @@ async def delete_image(
|
|||||||
success = await images_repo.delete_image(image_path)
|
success = await images_repo.delete_image(image_path)
|
||||||
if not success:
|
if not success:
|
||||||
raise ControllerError(f"Image '{image_path}' could not be deleted")
|
raise ControllerError(f"Image '{image_path}' could not be deleted")
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/prune",
|
|
||||||
status_code=status.HTTP_204_NO_CONTENT,
|
|
||||||
dependencies=[Depends(has_privilege("Image.Allocate"))]
|
|
||||||
)
|
|
||||||
async def prune_images(
|
|
||||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
|
||||||
) -> None:
|
|
||||||
"""
|
|
||||||
Prune images not attached to any template.
|
|
||||||
|
|
||||||
Required privilege: Image.Allocate
|
|
||||||
"""
|
|
||||||
|
|
||||||
await images_repo.prune_images()
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
API routes for templates.
|
API routes for templates.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
|
|
||||||
@ -34,6 +35,8 @@ from gns3server.db.repositories.templates import TemplatesRepository
|
|||||||
from gns3server.services.templates import TemplatesService
|
from gns3server.services.templates import TemplatesService
|
||||||
from gns3server.db.repositories.rbac import RbacRepository
|
from gns3server.db.repositories.rbac import RbacRepository
|
||||||
from gns3server.db.repositories.images import ImagesRepository
|
from gns3server.db.repositories.images import ImagesRepository
|
||||||
|
from gns3server.controller.controller_error import ControllerError
|
||||||
|
from gns3server.utils.images import get_builtin_disks
|
||||||
|
|
||||||
from .dependencies.authentication import get_current_active_user
|
from .dependencies.authentication import get_current_active_user
|
||||||
from .dependencies.rbac import has_privilege
|
from .dependencies.rbac import has_privilege
|
||||||
@ -132,10 +135,28 @@ async def delete_template(
|
|||||||
Required privilege: Template.Allocate
|
Required privilege: Template.Allocate
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
images = await templates_repo.get_template_images(template_id)
|
||||||
await TemplatesService(templates_repo).delete_template(template_id)
|
await TemplatesService(templates_repo).delete_template(template_id)
|
||||||
await rbac_repo.delete_all_ace_starting_with_path(f"/templates/{template_id}")
|
await rbac_repo.delete_all_ace_starting_with_path(f"/templates/{template_id}")
|
||||||
if prune_images:
|
if prune_images and images:
|
||||||
await images_repo.prune_images()
|
skip_images = get_builtin_disks()
|
||||||
|
for image in images:
|
||||||
|
if image.filename in skip_images:
|
||||||
|
continue
|
||||||
|
templates = await images_repo.get_image_templates(image.image_id)
|
||||||
|
if templates:
|
||||||
|
template_names = ", ".join([template.name for template in templates])
|
||||||
|
raise ControllerError(f"Image '{image.path}' is used by one or more templates: {template_names}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.remove(image.path)
|
||||||
|
except OSError:
|
||||||
|
log.warning(f"Could not delete image file {image.path}")
|
||||||
|
|
||||||
|
print(f"Deleting image '{image.path}'")
|
||||||
|
success = await images_repo.delete_image(image.path)
|
||||||
|
if not success:
|
||||||
|
raise ControllerError(f"Image '{image.path}' could not removed from the database")
|
||||||
|
|
||||||
|
|
||||||
@router.get(
|
@router.get(
|
||||||
|
@ -2,14 +2,14 @@
|
|||||||
"appliance_id": "c90f3ff3-4ed2-4437-9afb-21232fa92015",
|
"appliance_id": "c90f3ff3-4ed2-4437-9afb-21232fa92015",
|
||||||
"name": "Arista vEOS",
|
"name": "Arista vEOS",
|
||||||
"category": "multilayer_switch",
|
"category": "multilayer_switch",
|
||||||
"description": "Arista EOS\u00ae is the core of Arista cloud networking solutions for next-generation data centers and cloud networks. Cloud architectures built with Arista EOS scale to tens of thousands of compute and storage nodes with management and provisioning capabilities that work at scale. Through its programmability, EOS enables a set of software applications that deliver workflow automation, high availability, unprecedented network visibility and analytics and rapid integration with a wide range of third-party applications for virtualization, management, automation and orchestration services.\n\nArista Extensible Operating System (EOS) is a fully programmable and highly modular, Linux-based network operation system, using familiar industry standard CLI and runs a single binary software image across the Arista switching family. Architected for resiliency and programmability, EOS has a unique multi-process state sharing architecture that separates state information and packet forwarding from protocol processing and application logic.",
|
"description": "Arista EOS is the core of Arista cloud networking solutions for next-generation data centers and cloud networks. Cloud architectures built with Arista EOS scale to tens of thousands of compute and storage nodes with management and provisioning capabilities that work at scale. Through its programmability, EOS enables a set of software applications that deliver workflow automation, high availability, unprecedented network visibility and analytics and rapid integration with a wide range of third-party applications for virtualization, management, automation and orchestration services.\n\nArista Extensible Operating System (EOS) is a fully programmable and highly modular, Linux-based network operation system, using familiar industry standard CLI and runs a single binary software image across the Arista switching family. Architected for resiliency and programmability, EOS has a unique multi-process state sharing architecture that separates state information and packet forwarding from protocol processing and application logic.",
|
||||||
"vendor_name": "Arista",
|
"vendor_name": "Arista",
|
||||||
"vendor_url": "http://www.arista.com/",
|
"vendor_url": "http://www.arista.com/",
|
||||||
"documentation_url": "https://www.arista.com/assets/data/docs/Manuals/EOS-4.17.2F-Manual.pdf",
|
"documentation_url": "https://www.arista.com/assets/data/docs/Manuals/EOS-4.17.2F-Manual.pdf",
|
||||||
"product_name": "vEOS",
|
"product_name": "vEOS",
|
||||||
"product_url": "https://eos.arista.com/",
|
"product_url": "https://eos.arista.com/",
|
||||||
"registry_version": 4,
|
"registry_version": 4,
|
||||||
"status": "experimental",
|
"status": "stable",
|
||||||
"maintainer": "GNS3 Team",
|
"maintainer": "GNS3 Team",
|
||||||
"maintainer_email": "developers@gns3.net",
|
"maintainer_email": "developers@gns3.net",
|
||||||
"usage": "The login is admin, with no password by default",
|
"usage": "The login is admin, with no password by default",
|
||||||
@ -29,87 +29,24 @@
|
|||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
{
|
{
|
||||||
"filename": "vEOS64-lab-4.32.0F.vmdk",
|
"filename": "vEOS-lab-4.33.1F.qcow2",
|
||||||
"version": "4.32.0F",
|
"version": "4.33.1F",
|
||||||
"md5sum": "851771260bb18ad3e90fa6956f0c6161",
|
"md5sum": "8f662409c0732ed9f682edce63601e8a",
|
||||||
"filesize": 591724544,
|
"filesize": 611909632,
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
"download_url": "https://www.arista.com/en/support/software-download"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "vEOS64-lab-4.31.3M.vmdk",
|
"filename": "vEOS-lab-4.32.3M.qcow2",
|
||||||
"version": "4.31.3M",
|
"version": "4.32.3M",
|
||||||
"md5sum": "7df107da137f4a4e752014d4f0e94cd3",
|
"md5sum": "46fc46f5ed1da8752eed8396f08862f8",
|
||||||
"filesize": 577961984,
|
"filesize": 605683712,
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
"download_url": "https://www.arista.com/en/support/software-download"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "vEOS64-lab-4.30.6M.vmdk",
|
"filename": "vEOS-lab-4.31.6M.qcow2",
|
||||||
"version": "4.30.6M",
|
"version": "4.31.6M",
|
||||||
"md5sum": "19721aace820b9ebf6d7ae6524803cf5",
|
"md5sum": "7410110b77472f058322ec4681f8a356",
|
||||||
"filesize": 553123840,
|
"filesize": 590479360,
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS64-lab-4.29.8M.vmdk",
|
|
||||||
"version": "4.29.8M",
|
|
||||||
"md5sum": "131888f74cd63a93894521d40eb4d0b6",
|
|
||||||
"filesize": 548405248,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS64-lab-4.28.11M.vmdk",
|
|
||||||
"version": "4.28.11M",
|
|
||||||
"md5sum": "6cac0e7b04a74ee0dc358327a00accfd",
|
|
||||||
"filesize": 513343488,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS64-lab-4.27.12M.vmdk",
|
|
||||||
"version": "4.27.12M",
|
|
||||||
"md5sum": "34c4f785c7fc054cda8754dd13c0d7c7",
|
|
||||||
"filesize": 496697344,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.32.0F.vmdk",
|
|
||||||
"version": "4.32.0F",
|
|
||||||
"md5sum": "584b901a1249717504050e48f74fb8dd",
|
|
||||||
"filesize": 591396864,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.31.3M.vmdk",
|
|
||||||
"version": "4.31.3M",
|
|
||||||
"md5sum": "a2e130697cdf8547006eebebde6eefca",
|
|
||||||
"filesize": 590086144,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.30.6M.vmdk",
|
|
||||||
"version": "4.30.6M",
|
|
||||||
"md5sum": "a4467648bcfa7b19640af8a4ad3153c6",
|
|
||||||
"filesize": 565968896,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.29.8M.vmdk",
|
|
||||||
"version": "4.29.8M",
|
|
||||||
"md5sum": "1952f6114a4376212c525db9ec8efd5f",
|
|
||||||
"filesize": 558039040,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.28.11M.vmdk",
|
|
||||||
"version": "4.28.11M",
|
|
||||||
"md5sum": "5502df24dfc231c45afb33d6018c16d0",
|
|
||||||
"filesize": 521338880,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.27.12M.vmdk",
|
|
||||||
"version": "4.27.12M",
|
|
||||||
"md5sum": "e08a97e7c1977993f947fedeb4c6ddd5",
|
|
||||||
"filesize": 504299520,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
"download_url": "https://www.arista.com/en/support/software-download"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -118,459 +55,28 @@
|
|||||||
"md5sum": "8d7e754efebca1930a93a2587ff7606c",
|
"md5sum": "8d7e754efebca1930a93a2587ff7606c",
|
||||||
"filesize": 6291456,
|
"filesize": 6291456,
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
"download_url": "https://www.arista.com/en/support/software-download"
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.26.2F.vmdk",
|
|
||||||
"version": "4.26.2F",
|
|
||||||
"md5sum": "de8ce9750fddb63bd3f71bccfcd7651e",
|
|
||||||
"filesize": 475332608,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.25.3M.vmdk",
|
|
||||||
"version": "4.25.3M",
|
|
||||||
"md5sum": "2f196969036b4d283e86f15118d59c26",
|
|
||||||
"filesize": 451543040,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.24.3M.vmdk",
|
|
||||||
"version": "4.24.3M",
|
|
||||||
"md5sum": "0a28e44c7ce4a8965f24a4a463a89b7d",
|
|
||||||
"filesize": 455213056,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.24.2.1F.vmdk",
|
|
||||||
"version": "4.24.2.1F",
|
|
||||||
"md5sum": "6bab8b59ce5230e243e56f4127448fc8",
|
|
||||||
"filesize": 455213056,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.23.4.2M.vmdk",
|
|
||||||
"version": "4.23.4.2M",
|
|
||||||
"md5sum": "d21cbef4e39f1e783b13a926cb54a242",
|
|
||||||
"filesize": 454295552,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.23.0.1F.vmdk",
|
|
||||||
"version": "4.23.0.1F",
|
|
||||||
"md5sum": "08d52154aa11a834aef9f42bbf29f977",
|
|
||||||
"filesize": 439484416,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.22.2.1F.vmdk",
|
|
||||||
"version": "4.22.2.1F",
|
|
||||||
"md5sum": "2a425bf8efe569a2bdf0e328f240cd16",
|
|
||||||
"filesize": 426377216,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.22.0F.vmdk",
|
|
||||||
"version": "4.22.0F",
|
|
||||||
"md5sum": "cfcc75c2b8176cfd819afcfd6799b74c",
|
|
||||||
"filesize": 414121984,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.21.1.1F.vmdk",
|
|
||||||
"version": "4.21.1F",
|
|
||||||
"md5sum": "02bfb7e53781fd44ff02357f201586d9",
|
|
||||||
"filesize": 358809600,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.20.10M-combined.vmdk",
|
|
||||||
"version": "4.20.10M-combined",
|
|
||||||
"md5sum": "d1f2d650f93dbf24e04fdd2c9d62bd62",
|
|
||||||
"filesize": 334626816,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.20.1F.vmdk",
|
|
||||||
"version": "4.20.1F",
|
|
||||||
"md5sum": "aadb6f3dbff28317f68cb4c4502d0db8",
|
|
||||||
"filesize": 662044672,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.19.10M-combined.vmdk",
|
|
||||||
"version": "4.19.10M-combined",
|
|
||||||
"md5sum": "103daa45c33be4584cbe6adc60de46a3",
|
|
||||||
"filesize": 324141056,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.19.10M.vmdk",
|
|
||||||
"version": "4.19.10M",
|
|
||||||
"md5sum": "665ed14389411ae5f16ba0a2ff84240a",
|
|
||||||
"filesize": 637337600,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.18.10M-combined.vmdk",
|
|
||||||
"version": "4.18.10M-combined",
|
|
||||||
"md5sum": "e33e0ef5b8cecc84c5bb57569b36b9c6",
|
|
||||||
"filesize": 317652992,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.18.10M.vmdk",
|
|
||||||
"version": "4.18.10M",
|
|
||||||
"md5sum": "1d87e9ace37fe3706dbf3e49c8d4d231",
|
|
||||||
"filesize": 624427008,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.18.5M.vmdk",
|
|
||||||
"version": "4.18.5M",
|
|
||||||
"md5sum": "b1ee6268dbaf2b2276fd7a5286c7ce2b",
|
|
||||||
"filesize": 623116288,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.18.1F.vmdk",
|
|
||||||
"version": "4.18.1F",
|
|
||||||
"md5sum": "9648c63185f3b793b47528a858ca4364",
|
|
||||||
"filesize": 620625920,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.17.8M.vmdk",
|
|
||||||
"version": "4.17.8M",
|
|
||||||
"md5sum": "afc79a06f930ea2cc0ae3e03cbfd3f23",
|
|
||||||
"filesize": 608829440,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.17.2F.vmdk",
|
|
||||||
"version": "4.17.2F",
|
|
||||||
"md5sum": "3b4845edfa77cf9aaeb9c0a005d3e277",
|
|
||||||
"filesize": 609615872,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.16.13M.vmdk",
|
|
||||||
"version": "4.16.13M",
|
|
||||||
"md5sum": "4d0facf90140fc3aab031f0f8f88a32f",
|
|
||||||
"filesize": 521404416,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.16.6M.vmdk",
|
|
||||||
"version": "4.16.6M",
|
|
||||||
"md5sum": "b3f7b7cee17f2e66bb38b453a4939fef",
|
|
||||||
"filesize": 519962624,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.15.10M.vmdk",
|
|
||||||
"version": "4.15.10M",
|
|
||||||
"md5sum": "98e08281a9c48ddf6f3c5d62a124a20f",
|
|
||||||
"filesize": 517079040,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.15.5M.vmdk",
|
|
||||||
"version": "4.15.5M",
|
|
||||||
"md5sum": "cd74bb69c7ee905ac3d33c4d109f3ab7",
|
|
||||||
"filesize": 516030464,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.14.14M.vmdk",
|
|
||||||
"version": "4.14.14M",
|
|
||||||
"md5sum": "d81ba0522f4d7838d96f7985e41cdc47",
|
|
||||||
"filesize": 422641664,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.13.16M.vmdk",
|
|
||||||
"version": "4.13.16M",
|
|
||||||
"md5sum": "5763b2c043830c341c8b1009f4ea9a49",
|
|
||||||
"filesize": 404684800,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "vEOS-lab-4.13.8M.vmdk",
|
|
||||||
"version": "4.13.8M",
|
|
||||||
"md5sum": "a47145b9e6e7a24171c0850f8755535e",
|
|
||||||
"filesize": 409010176,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"filename": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"version": "8.0.0",
|
|
||||||
"md5sum": "488ad1c435d18c69bb8d69c7806457c9",
|
|
||||||
"filesize": 5242880,
|
|
||||||
"download_url": "https://www.arista.com/en/support/software-download"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
{
|
{
|
||||||
"name": "4.32.0F",
|
"name": "4.33.1F",
|
||||||
"images": {
|
"images": {
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
||||||
"hdb_disk_image": "vEOS64-lab-4.32.0F.vmdk"
|
"hdb_disk_image": "vEOS-lab-4.33.1F.qcow2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "4.31.3M",
|
"name": "4.32.3M",
|
||||||
"images": {
|
"images": {
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
||||||
"hdb_disk_image": "vEOS64-lab-4.31.3M.vmdk"
|
"hdb_disk_image": "vEOS-lab-4.32.3M.qcow2"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "4.30.6M",
|
"name": "4.31.6M",
|
||||||
"images": {
|
"images": {
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
||||||
"hdb_disk_image": "vEOS64-lab-4.30.6M.vmdk"
|
"hdb_disk_image": "vEOS-lab-4.31.6M.qcow2"
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.29.8M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS64-lab-4.29.8M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.28.11M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS64-lab-4.28.11M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.27.12M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS64-lab-4.27.12M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.32.0F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.32.0F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.31.3M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.31.3M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.30.6M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.30.6M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.29.8M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.29.8M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.28.11M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.28.11M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.27.12M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.2.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.27.12M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.26.2F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.26.2F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.25.3M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.25.3M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.24.3M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.24.3M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.24.2.1F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.24.2.1F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.23.4.2M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.23.4.2M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.23.0.1F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.23.0.1F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.22.2.1F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.22.2.1F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.22.0F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.22.0F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.21.1F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.21.1.1F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.20.10M-combined",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "vEOS-lab-4.20.10M-combined.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.20.1F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.20.1F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.19.10M-combined",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "vEOS-lab-4.19.10M-combined.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.19.10M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.19.10M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.18.10M-combined",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "vEOS-lab-4.18.10M-combined.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.18.10M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.18.10M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.18.5M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.18.5M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.18.1F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.18.1F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.17.8M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.17.8M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.17.2F",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.17.2F.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.16.13M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.16.13M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.16.6M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.16.6M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.15.10M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.15.10M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.15.5M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.15.5M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.14.14M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.14.14M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.13.16M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.13.16M.vmdk"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "4.13.8M",
|
|
||||||
"images": {
|
|
||||||
"hda_disk_image": "Aboot-veos-serial-8.0.0.iso",
|
|
||||||
"hdb_disk_image": "vEOS-lab-4.13.8M.vmdk"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
@ -24,6 +24,13 @@
|
|||||||
"process_priority": "normal"
|
"process_priority": "normal"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "pfSense-CE-2.7.2-RELEASE-amd64.iso",
|
||||||
|
"version": "2.7.2",
|
||||||
|
"md5sum": "50c3e723d68ec74d038041a34fa846f8",
|
||||||
|
"filesize": 874672128,
|
||||||
|
"download_url": "https://www.pfsense.org/download/mirror.php?section=downloads"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "pfSense-CE-2.7.0-RELEASE-amd64.iso",
|
"filename": "pfSense-CE-2.7.0-RELEASE-amd64.iso",
|
||||||
"version": "2.7.0",
|
"version": "2.7.0",
|
||||||
@ -76,6 +83,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "2.7.2",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "empty100G.qcow2",
|
||||||
|
"cdrom_image": "pfSense-CE-2.7.2-RELEASE-amd64.iso"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "2.7.0",
|
"name": "2.7.0",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -115,7 +115,7 @@ class Docker(BaseManager):
|
|||||||
dst_path = self.resources_path()
|
dst_path = self.resources_path()
|
||||||
log.info(f"Installing Docker resources in '{dst_path}'")
|
log.info(f"Installing Docker resources in '{dst_path}'")
|
||||||
from gns3server.controller import Controller
|
from gns3server.controller import Controller
|
||||||
Controller.instance().install_resource_files(dst_path, "compute/docker/resources")
|
await Controller.instance().install_resource_files(dst_path, "compute/docker/resources")
|
||||||
await self.install_busybox(dst_path)
|
await self.install_busybox(dst_path)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise DockerError(f"Could not install Docker resources to {dst_path}: {e}")
|
raise DockerError(f"Could not install Docker resources to {dst_path}: {e}")
|
||||||
|
@ -28,10 +28,10 @@ try:
|
|||||||
except ImportError:
|
except ImportError:
|
||||||
from importlib import resources as importlib_resources
|
from importlib import resources as importlib_resources
|
||||||
|
|
||||||
|
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from ..utils import parse_version, md5sum
|
from ..utils import parse_version, md5sum
|
||||||
from ..utils.images import default_images_directory
|
from ..utils.images import default_images_directory
|
||||||
|
from ..utils.asyncio import wait_run_in_executor
|
||||||
|
|
||||||
from .project import Project
|
from .project import Project
|
||||||
from .appliance import Appliance
|
from .appliance import Appliance
|
||||||
@ -43,6 +43,7 @@ from .topology import load_topology
|
|||||||
from .gns3vm import GNS3VM
|
from .gns3vm import GNS3VM
|
||||||
from .gns3vm.gns3_vm_error import GNS3VMError
|
from .gns3vm.gns3_vm_error import GNS3VMError
|
||||||
from .controller_error import ControllerError, ControllerNotFoundError
|
from .controller_error import ControllerError, ControllerNotFoundError
|
||||||
|
from ..db.tasks import update_disk_checksums
|
||||||
from ..version import __version__
|
from ..version import __version__
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@ -72,8 +73,11 @@ class Controller:
|
|||||||
async def start(self, computes=None):
|
async def start(self, computes=None):
|
||||||
|
|
||||||
log.info("Controller is starting")
|
log.info("Controller is starting")
|
||||||
self._install_base_configs()
|
await self._install_base_configs()
|
||||||
self._install_builtin_disks()
|
installed_disks = await self._install_builtin_disks()
|
||||||
|
if installed_disks:
|
||||||
|
await update_disk_checksums(installed_disks)
|
||||||
|
|
||||||
server_config = Config.instance().settings.Server
|
server_config = Config.instance().settings.Server
|
||||||
Config.instance().listen_for_config_changes(self._update_config)
|
Config.instance().listen_for_config_changes(self._update_config)
|
||||||
name = server_config.name
|
name = server_config.name
|
||||||
@ -86,7 +90,7 @@ class Controller:
|
|||||||
if host == "0.0.0.0":
|
if host == "0.0.0.0":
|
||||||
host = "127.0.0.1"
|
host = "127.0.0.1"
|
||||||
|
|
||||||
self._load_controller_vars()
|
await self._load_controller_vars()
|
||||||
|
|
||||||
if server_config.enable_ssl:
|
if server_config.enable_ssl:
|
||||||
self._ssl_context = self._create_ssl_context(server_config)
|
self._ssl_context = self._create_ssl_context(server_config)
|
||||||
@ -190,7 +194,7 @@ class Controller:
|
|||||||
async def reload(self):
|
async def reload(self):
|
||||||
|
|
||||||
log.info("Controller is reloading")
|
log.info("Controller is reloading")
|
||||||
self._load_controller_vars()
|
await self._load_controller_vars()
|
||||||
|
|
||||||
# remove all projects deleted from disk.
|
# remove all projects deleted from disk.
|
||||||
for project in self._projects.copy().values():
|
for project in self._projects.copy().values():
|
||||||
@ -234,7 +238,7 @@ class Controller:
|
|||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.error(f"Cannot write controller vars file '{self._vars_file}': {e}")
|
log.error(f"Cannot write controller vars file '{self._vars_file}': {e}")
|
||||||
|
|
||||||
def _load_controller_vars(self):
|
async def _load_controller_vars(self):
|
||||||
"""
|
"""
|
||||||
Reload the controller vars from disk
|
Reload the controller vars from disk
|
||||||
"""
|
"""
|
||||||
@ -274,9 +278,9 @@ class Controller:
|
|||||||
builtin_appliances_path = self._appliance_manager.builtin_appliances_path()
|
builtin_appliances_path = self._appliance_manager.builtin_appliances_path()
|
||||||
if not previous_version or \
|
if not previous_version or \
|
||||||
parse_version(__version__.split("+")[0]) > parse_version(previous_version.split("+")[0]):
|
parse_version(__version__.split("+")[0]) > parse_version(previous_version.split("+")[0]):
|
||||||
self._appliance_manager.install_builtin_appliances()
|
await self._appliance_manager.install_builtin_appliances()
|
||||||
elif not os.listdir(builtin_appliances_path):
|
elif not os.listdir(builtin_appliances_path):
|
||||||
self._appliance_manager.install_builtin_appliances()
|
await self._appliance_manager.install_builtin_appliances()
|
||||||
else:
|
else:
|
||||||
log.info(f"Built-in appliances are installed in '{builtin_appliances_path}'")
|
log.info(f"Built-in appliances are installed in '{builtin_appliances_path}'")
|
||||||
|
|
||||||
@ -307,18 +311,21 @@ class Controller:
|
|||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def install_resource_files(dst_path, resource_name, upgrade_resources=True):
|
async def install_resource_files(dst_path, resource_name, upgrade_resources=True):
|
||||||
"""
|
"""
|
||||||
Install files from resources to user's file system
|
Install files from resources to user's file system
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def should_copy(src, dst, upgrade_resources):
|
installed_resources = []
|
||||||
|
async def should_copy(src, dst, upgrade_resources):
|
||||||
if not os.path.exists(dst):
|
if not os.path.exists(dst):
|
||||||
return True
|
return True
|
||||||
if upgrade_resources is False:
|
if upgrade_resources is False:
|
||||||
return False
|
return False
|
||||||
# copy the resource if it is different
|
# copy the resource if it is different
|
||||||
return md5sum(src) != md5sum(dst)
|
src_md5 = await wait_run_in_executor(md5sum, src)
|
||||||
|
dst_md5 = await wait_run_in_executor(md5sum, dst)
|
||||||
|
return src_md5 != dst_md5
|
||||||
|
|
||||||
if hasattr(sys, "frozen") and sys.platform.startswith("win"):
|
if hasattr(sys, "frozen") and sys.platform.startswith("win"):
|
||||||
resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), resource_name))
|
resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), resource_name))
|
||||||
@ -328,14 +335,16 @@ class Controller:
|
|||||||
else:
|
else:
|
||||||
for entry in importlib_resources.files('gns3server').joinpath(resource_name).iterdir():
|
for entry in importlib_resources.files('gns3server').joinpath(resource_name).iterdir():
|
||||||
full_path = os.path.join(dst_path, entry.name)
|
full_path = os.path.join(dst_path, entry.name)
|
||||||
if entry.is_file() and should_copy(str(entry), full_path, upgrade_resources):
|
if entry.is_file() and await should_copy(str(entry), full_path, upgrade_resources):
|
||||||
log.debug(f'Installing {resource_name} resource file "{entry.name}" to "{full_path}"')
|
log.debug(f'Installing {resource_name} resource file "{entry.name}" to "{full_path}"')
|
||||||
shutil.copy(str(entry), os.path.join(dst_path, entry.name))
|
shutil.copy(str(entry), os.path.join(full_path))
|
||||||
|
installed_resources.append(full_path)
|
||||||
elif entry.is_dir():
|
elif entry.is_dir():
|
||||||
os.makedirs(full_path, exist_ok=True)
|
os.makedirs(full_path, exist_ok=True)
|
||||||
Controller.install_resource_files(full_path, os.path.join(resource_name, entry.name))
|
await Controller.install_resource_files(full_path, os.path.join(resource_name, entry.name))
|
||||||
|
return installed_resources
|
||||||
|
|
||||||
def _install_base_configs(self):
|
async def _install_base_configs(self):
|
||||||
"""
|
"""
|
||||||
At startup we copy base configs to the user location to allow
|
At startup we copy base configs to the user location to allow
|
||||||
them to customize it
|
them to customize it
|
||||||
@ -344,11 +353,12 @@ class Controller:
|
|||||||
dst_path = self.configs_path()
|
dst_path = self.configs_path()
|
||||||
log.info(f"Installing base configs in '{dst_path}'")
|
log.info(f"Installing base configs in '{dst_path}'")
|
||||||
try:
|
try:
|
||||||
Controller.install_resource_files(dst_path, "configs", upgrade_resources=False)
|
# do not overwrite base configs because they may have been customized by the user
|
||||||
|
await Controller.install_resource_files(dst_path, "configs", upgrade_resources=False)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.error(f"Could not install base config files to {dst_path}: {e}")
|
log.error(f"Could not install base config files to {dst_path}: {e}")
|
||||||
|
|
||||||
def _install_builtin_disks(self):
|
async def _install_builtin_disks(self):
|
||||||
"""
|
"""
|
||||||
At startup we copy built-in Qemu disks to the user location to allow
|
At startup we copy built-in Qemu disks to the user location to allow
|
||||||
them to use with appliances
|
them to use with appliances
|
||||||
@ -357,7 +367,7 @@ class Controller:
|
|||||||
dst_path = self.disks_path()
|
dst_path = self.disks_path()
|
||||||
log.info(f"Installing built-in disks in '{dst_path}'")
|
log.info(f"Installing built-in disks in '{dst_path}'")
|
||||||
try:
|
try:
|
||||||
Controller.install_resource_files(dst_path, "disks", upgrade_resources=False)
|
return await Controller.install_resource_files(dst_path, "disks")
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.error(f"Could not install disk files to {dst_path}: {e}")
|
log.error(f"Could not install disk files to {dst_path}: {e}")
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ class ApplianceManager:
|
|||||||
os.makedirs(appliances_dir, exist_ok=True)
|
os.makedirs(appliances_dir, exist_ok=True)
|
||||||
return appliances_dir
|
return appliances_dir
|
||||||
|
|
||||||
def install_builtin_appliances(self):
|
async def install_builtin_appliances(self):
|
||||||
"""
|
"""
|
||||||
At startup we copy the built-in appliances files.
|
At startup we copy the built-in appliances files.
|
||||||
"""
|
"""
|
||||||
@ -119,7 +119,7 @@ class ApplianceManager:
|
|||||||
log.info(f"Installing built-in appliances in '{dst_path}'")
|
log.info(f"Installing built-in appliances in '{dst_path}'")
|
||||||
from . import Controller
|
from . import Controller
|
||||||
try:
|
try:
|
||||||
Controller.instance().install_resource_files(dst_path, "appliances")
|
await Controller.instance().install_resource_files(dst_path, "appliances")
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.error(f"Could not install built-in appliance files to {dst_path}: {e}")
|
log.error(f"Could not install built-in appliance files to {dst_path}: {e}")
|
||||||
|
|
||||||
|
@ -18,14 +18,19 @@
|
|||||||
import ipaddress
|
import ipaddress
|
||||||
import aiohttp
|
import aiohttp
|
||||||
import asyncio
|
import asyncio
|
||||||
import async_timeout
|
|
||||||
import socket
|
import socket
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
import io
|
import io
|
||||||
|
|
||||||
from fastapi import HTTPException
|
from fastapi import HTTPException
|
||||||
from aiohttp import web
|
from aiohttp import web
|
||||||
|
|
||||||
|
if sys.version_info >= (3, 11):
|
||||||
|
from asyncio import timeout as asynctimeout
|
||||||
|
else:
|
||||||
|
from async_timeout import timeout as asynctimeout
|
||||||
|
|
||||||
from ..utils import parse_version
|
from ..utils import parse_version
|
||||||
from ..utils.asyncio import locking
|
from ..utils.asyncio import locking
|
||||||
from ..controller.controller_error import (
|
from ..controller.controller_error import (
|
||||||
@ -502,8 +507,8 @@ class Compute:
|
|||||||
""" Returns URL for specific path at Compute"""
|
""" Returns URL for specific path at Compute"""
|
||||||
return self._getUrl(path)
|
return self._getUrl(path)
|
||||||
|
|
||||||
async def _run_http_query(self, method, path, data=None, timeout=20, raw=False):
|
async def _run_http_query(self, method, path, data=None, timeout=120, raw=False):
|
||||||
async with async_timeout.timeout(delay=timeout):
|
async with asynctimeout(delay=timeout):
|
||||||
url = self._getUrl(path)
|
url = self._getUrl(path)
|
||||||
headers = {"content-type": "application/json"}
|
headers = {"content-type": "application/json"}
|
||||||
chunked = None
|
chunked = None
|
||||||
|
@ -58,7 +58,7 @@ class CrashReport:
|
|||||||
Report crash to a third party service
|
Report crash to a third party service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DSN = "https://8374a6208714ff37e18725c21a04b8d1@o19455.ingest.us.sentry.io/38482"
|
DSN = "https://9cf53e6b9adfe49b867f1847b7cc4d72@o19455.ingest.us.sentry.io/38482"
|
||||||
_instance = None
|
_instance = None
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
from sqlalchemy import select, delete
|
from sqlalchemy import select, delete, update
|
||||||
from sqlalchemy.ext.asyncio import AsyncSession
|
from sqlalchemy.ext.asyncio import AsyncSession
|
||||||
|
|
||||||
from .base import BaseRepository
|
from .base import BaseRepository
|
||||||
@ -103,6 +103,22 @@ class ImagesRepository(BaseRepository):
|
|||||||
await self._db_session.refresh(db_image)
|
await self._db_session.refresh(db_image)
|
||||||
return db_image
|
return db_image
|
||||||
|
|
||||||
|
async def update_image(self, image_path: str, checksum: str, checksum_algorithm: str) -> models.Image:
|
||||||
|
"""
|
||||||
|
Update an image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = update(models.Image).\
|
||||||
|
where(models.Image.path == image_path).\
|
||||||
|
values(checksum=checksum, checksum_algorithm=checksum_algorithm)
|
||||||
|
|
||||||
|
await self._db_session.execute(query)
|
||||||
|
await self._db_session.commit()
|
||||||
|
image_db = await self.get_image_by_checksum(checksum)
|
||||||
|
if image_db:
|
||||||
|
await self._db_session.refresh(image_db) # force refresh of updated_at value
|
||||||
|
return image_db
|
||||||
|
|
||||||
async def delete_image(self, image_path: str) -> bool:
|
async def delete_image(self, image_path: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Delete an image.
|
Delete an image.
|
||||||
@ -119,7 +135,7 @@ class ImagesRepository(BaseRepository):
|
|||||||
await self._db_session.commit()
|
await self._db_session.commit()
|
||||||
return result.rowcount > 0
|
return result.rowcount > 0
|
||||||
|
|
||||||
async def prune_images(self) -> int:
|
async def prune_images(self, skip_images: list[str] = None) -> int:
|
||||||
"""
|
"""
|
||||||
Prune images not attached to any template.
|
Prune images not attached to any template.
|
||||||
"""
|
"""
|
||||||
@ -130,12 +146,15 @@ class ImagesRepository(BaseRepository):
|
|||||||
images = result.scalars().all()
|
images = result.scalars().all()
|
||||||
images_deleted = 0
|
images_deleted = 0
|
||||||
for image in images:
|
for image in images:
|
||||||
|
if skip_images and image.filename in skip_images:
|
||||||
|
log.debug(f"Skipping image '{image.path}' for pruning")
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
log.debug(f"Deleting image '{image.path}'")
|
log.debug(f"Deleting image '{image.path}'")
|
||||||
os.remove(image.path)
|
os.remove(image.path)
|
||||||
except OSError:
|
except OSError:
|
||||||
log.warning(f"Could not delete image file {image.path}")
|
log.warning(f"Could not delete image file {image.path}")
|
||||||
if await self.delete_image(image.filename):
|
if await self.delete_image(image.path):
|
||||||
images_deleted += 1
|
images_deleted += 1
|
||||||
log.info(f"{images_deleted} image(s) have been deleted")
|
log.info(f"{images_deleted} image(s) have been deleted")
|
||||||
return images_deleted
|
return images_deleted
|
||||||
|
@ -170,3 +170,14 @@ class TemplatesRepository(BaseRepository):
|
|||||||
await self._db_session.commit()
|
await self._db_session.commit()
|
||||||
await self._db_session.refresh(template_in_db)
|
await self._db_session.refresh(template_in_db)
|
||||||
return template_in_db
|
return template_in_db
|
||||||
|
|
||||||
|
async def get_template_images(self, template_id: UUID) -> List[models.Image]:
|
||||||
|
"""
|
||||||
|
Return all images attached to a template.
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = select(models.Image).\
|
||||||
|
join(models.Image.templates).\
|
||||||
|
filter(models.Template.template_id == template_id)
|
||||||
|
result = await self._db_session.execute(query)
|
||||||
|
return result.scalars().all()
|
||||||
|
@ -16,13 +16,11 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import asyncio
|
import asyncio
|
||||||
import signal
|
import time
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
from watchfiles import awatch, Change
|
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
from sqlalchemy import event
|
from sqlalchemy import event
|
||||||
from sqlalchemy.engine import Engine
|
from sqlalchemy.engine import Engine
|
||||||
@ -32,10 +30,13 @@ from alembic import command, config
|
|||||||
from alembic.script import ScriptDirectory
|
from alembic.script import ScriptDirectory
|
||||||
from alembic.runtime.migration import MigrationContext
|
from alembic.runtime.migration import MigrationContext
|
||||||
from alembic.util.exc import CommandError
|
from alembic.util.exc import CommandError
|
||||||
|
from watchdog.observers import Observer
|
||||||
|
from watchdog.events import FileSystemEvent, PatternMatchingEventHandler
|
||||||
|
|
||||||
from gns3server.db.repositories.computes import ComputesRepository
|
from gns3server.db.repositories.computes import ComputesRepository
|
||||||
from gns3server.db.repositories.images import ImagesRepository
|
from gns3server.db.repositories.images import ImagesRepository
|
||||||
from gns3server.utils.images import discover_images, check_valid_image_header, read_image_info, default_images_directory, InvalidImageError
|
from gns3server.utils.images import md5sum, discover_images, read_image_info, InvalidImageError
|
||||||
|
from gns3server.utils.asyncio import wait_run_in_executor
|
||||||
from gns3server import schemas
|
from gns3server import schemas
|
||||||
|
|
||||||
from .models import Base
|
from .models import Base
|
||||||
@ -130,81 +131,7 @@ async def get_computes(app: FastAPI) -> List[dict]:
|
|||||||
return computes
|
return computes
|
||||||
|
|
||||||
|
|
||||||
def image_filter(change: Change, path: str) -> bool:
|
async def discover_images_on_filesystem(app: FastAPI) -> None:
|
||||||
|
|
||||||
if change == Change.added and os.path.isfile(path):
|
|
||||||
if path.endswith(".tmp") or path.endswith(".md5sum") or path.startswith("."):
|
|
||||||
return False
|
|
||||||
if "/lib/" in path or "/lib64/" in path:
|
|
||||||
# ignore custom IOU libraries
|
|
||||||
return False
|
|
||||||
header_magic_len = 7
|
|
||||||
with open(path, "rb") as f:
|
|
||||||
image_header = f.read(header_magic_len) # read the first 7 bytes of the file
|
|
||||||
if len(image_header) >= header_magic_len:
|
|
||||||
try:
|
|
||||||
check_valid_image_header(image_header)
|
|
||||||
except InvalidImageError as e:
|
|
||||||
log.debug(f"New image '{path}': {e}")
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
log.debug(f"New image '{path}': size is too small to be valid")
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
# FIXME: should we support image deletion?
|
|
||||||
# elif change == Change.deleted:
|
|
||||||
# return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
async def monitor_images_on_filesystem(app: FastAPI):
|
|
||||||
|
|
||||||
directories_to_monitor = []
|
|
||||||
for image_type in ("qemu", "ios", "iou"):
|
|
||||||
image_dir = default_images_directory(image_type)
|
|
||||||
if os.path.isdir(image_dir):
|
|
||||||
log.debug(f"Monitoring for new images in '{image_dir}'")
|
|
||||||
directories_to_monitor.append(image_dir)
|
|
||||||
|
|
||||||
try:
|
|
||||||
async for changes in awatch(
|
|
||||||
*directories_to_monitor,
|
|
||||||
watch_filter=image_filter,
|
|
||||||
raise_interrupt=True
|
|
||||||
):
|
|
||||||
async with AsyncSession(app.state._db_engine) as db_session:
|
|
||||||
images_repository = ImagesRepository(db_session)
|
|
||||||
for change in changes:
|
|
||||||
change_type, image_path = change
|
|
||||||
if change_type == Change.added:
|
|
||||||
try:
|
|
||||||
image = await read_image_info(image_path)
|
|
||||||
except InvalidImageError as e:
|
|
||||||
log.warning(str(e))
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
if await images_repository.get_image(image_path):
|
|
||||||
continue
|
|
||||||
await images_repository.add_image(**image)
|
|
||||||
log.info(f"Discovered image '{image_path}' has been added to the database")
|
|
||||||
except SQLAlchemyError as e:
|
|
||||||
log.warning(f"Error while adding image '{image_path}' to the database: {e}")
|
|
||||||
# if change_type == Change.deleted:
|
|
||||||
# try:
|
|
||||||
# if await images_repository.get_image(image_path):
|
|
||||||
# success = await images_repository.delete_image(image_path)
|
|
||||||
# if not success:
|
|
||||||
# log.warning(f"Could not delete image '{image_path}' from the database")
|
|
||||||
# else:
|
|
||||||
# log.info(f"Image '{image_path}' has been deleted from the database")
|
|
||||||
# except SQLAlchemyError as e:
|
|
||||||
# log.warning(f"Error while deleting image '{image_path}' from the database: {e}")
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
# send SIGTERM to the server PID so uvicorn can shutdown the process
|
|
||||||
os.kill(os.getpid(), signal.SIGTERM)
|
|
||||||
|
|
||||||
|
|
||||||
async def discover_images_on_filesystem(app: FastAPI):
|
|
||||||
|
|
||||||
async with AsyncSession(app.state._db_engine) as db_session:
|
async with AsyncSession(app.state._db_engine) as db_session:
|
||||||
images_repository = ImagesRepository(db_session)
|
images_repository = ImagesRepository(db_session)
|
||||||
@ -228,3 +155,117 @@ async def discover_images_on_filesystem(app: FastAPI):
|
|||||||
|
|
||||||
# monitor if images have been manually added
|
# monitor if images have been manually added
|
||||||
asyncio.create_task(monitor_images_on_filesystem(app))
|
asyncio.create_task(monitor_images_on_filesystem(app))
|
||||||
|
|
||||||
|
|
||||||
|
async def update_disk_checksums(updated_disks: List[str]) -> None:
|
||||||
|
"""
|
||||||
|
Update the checksum of a list of disks in the database.
|
||||||
|
|
||||||
|
:param updated_disks: list of updated disks
|
||||||
|
"""
|
||||||
|
|
||||||
|
from gns3server.api.server import app
|
||||||
|
async with AsyncSession(app.state._db_engine) as db_session:
|
||||||
|
images_repository = ImagesRepository(db_session)
|
||||||
|
for path in updated_disks:
|
||||||
|
image = await images_repository.get_image(path)
|
||||||
|
if image:
|
||||||
|
log.info(f"Updating image '{path}' in the database")
|
||||||
|
checksum = await wait_run_in_executor(md5sum, path, cache_to_md5file=False)
|
||||||
|
if image.checksum != checksum:
|
||||||
|
await images_repository.update_image(path, checksum, "md5")
|
||||||
|
|
||||||
|
class EventHandler(PatternMatchingEventHandler):
|
||||||
|
"""
|
||||||
|
Watchdog event handler.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, queue: asyncio.Queue, loop: asyncio.BaseEventLoop, **kwargs):
|
||||||
|
|
||||||
|
self._loop = loop
|
||||||
|
self._queue = queue
|
||||||
|
|
||||||
|
# ignore temporary files, md5sum files, hidden files and directories
|
||||||
|
super().__init__(ignore_patterns=["*.tmp", "*.md5sum", ".*"], ignore_directories = True, **kwargs)
|
||||||
|
|
||||||
|
def on_closed(self, event: FileSystemEvent) -> None:
|
||||||
|
# monitor for closed files (e.g. when a file has finished to be copied)
|
||||||
|
if "/lib/" in event.src_path or "/lib64/" in event.src_path:
|
||||||
|
return # ignore custom IOU libraries
|
||||||
|
self._loop.call_soon_threadsafe(self._queue.put_nowait, event)
|
||||||
|
|
||||||
|
class EventIterator(object):
|
||||||
|
"""
|
||||||
|
Watchdog Event iterator.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, queue: asyncio.Queue):
|
||||||
|
self.queue = queue
|
||||||
|
|
||||||
|
def __aiter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
async def __anext__(self):
|
||||||
|
|
||||||
|
item = await self.queue.get()
|
||||||
|
if item is None:
|
||||||
|
raise StopAsyncIteration
|
||||||
|
return item
|
||||||
|
|
||||||
|
async def monitor_images_on_filesystem(app: FastAPI):
|
||||||
|
|
||||||
|
def watchdog(
|
||||||
|
path: str,
|
||||||
|
queue: asyncio.Queue,
|
||||||
|
loop: asyncio.BaseEventLoop,
|
||||||
|
app: FastAPI, recursive: bool = False
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Thread to monitor a directory for new images.
|
||||||
|
"""
|
||||||
|
|
||||||
|
handler = EventHandler(queue, loop)
|
||||||
|
observer = Observer()
|
||||||
|
observer.schedule(handler, str(path), recursive=recursive)
|
||||||
|
observer.start()
|
||||||
|
log.info(f"Monitoring for new images in '{path}'")
|
||||||
|
while True:
|
||||||
|
time.sleep(1)
|
||||||
|
# stop when the app is exiting
|
||||||
|
if app.state.exiting:
|
||||||
|
observer.stop()
|
||||||
|
observer.join(10)
|
||||||
|
log.info(f"Stopping monitoring for new images in '{path}'")
|
||||||
|
loop.call_soon_threadsafe(queue.put_nowait, None)
|
||||||
|
break
|
||||||
|
|
||||||
|
queue = asyncio.Queue()
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
server_config = Config.instance().settings.Server
|
||||||
|
image_dir = os.path.expanduser(server_config.images_path)
|
||||||
|
asyncio.get_event_loop().run_in_executor(None, watchdog,image_dir, queue, loop, app, True)
|
||||||
|
|
||||||
|
async for filesystem_event in EventIterator(queue):
|
||||||
|
# read the file system event from the queue
|
||||||
|
image_path = filesystem_event.src_path
|
||||||
|
expected_image_type = None
|
||||||
|
if "IOU" in image_path:
|
||||||
|
expected_image_type = "iou"
|
||||||
|
elif "QEMU" in image_path:
|
||||||
|
expected_image_type = "qemu"
|
||||||
|
elif "IOS" in image_path:
|
||||||
|
expected_image_type = "ios"
|
||||||
|
async with AsyncSession(app.state._db_engine) as db_session:
|
||||||
|
images_repository = ImagesRepository(db_session)
|
||||||
|
try:
|
||||||
|
image = await read_image_info(image_path, expected_image_type)
|
||||||
|
except InvalidImageError as e:
|
||||||
|
log.warning(str(e))
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
if await images_repository.get_image(image_path):
|
||||||
|
continue
|
||||||
|
await images_repository.add_image(**image)
|
||||||
|
log.info(f"Discovered image '{image_path}' has been added to the database")
|
||||||
|
except SQLAlchemyError as e:
|
||||||
|
log.warning(f"Error while adding image '{image_path}' to the database: {e}")
|
||||||
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -147,7 +147,7 @@ class ServerSettings(BaseModel):
|
|||||||
allow_remote_console: bool = False
|
allow_remote_console: bool = False
|
||||||
enable_builtin_templates: bool = True
|
enable_builtin_templates: bool = True
|
||||||
install_builtin_appliances: bool = True
|
install_builtin_appliances: bool = True
|
||||||
model_config = ConfigDict(validate_assignment=True, str_strip_whitespace=True, use_enum_values=True)
|
model_config = ConfigDict(validate_assignment=True, str_strip_whitespace=True)
|
||||||
|
|
||||||
@field_validator("additional_images_paths", mode="before")
|
@field_validator("additional_images_paths", mode="before")
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -267,9 +267,9 @@ class Server:
|
|||||||
else:
|
else:
|
||||||
log.info(f"Compute authentication is enabled with username '{config.Server.compute_username}'")
|
log.info(f"Compute authentication is enabled with username '{config.Server.compute_username}'")
|
||||||
|
|
||||||
# we only support Python 3 version >= 3.8
|
# we only support Python 3 version >= 3.9
|
||||||
if sys.version_info < (3, 8, 0):
|
if sys.version_info < (3, 9, 0):
|
||||||
raise SystemExit("Python 3.8 or higher is required")
|
raise SystemExit("Python 3.9 or higher is required")
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
"Running with Python {major}.{minor}.{micro} and has PID {pid}".format(
|
"Running with Python {major}.{minor}.{micro} and has PID {pid}".format(
|
||||||
|
@ -14,8 +14,9 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from joserfc import jwt
|
||||||
from jose import JWTError, jwt
|
from joserfc.jwk import OctKey
|
||||||
|
from joserfc.errors import JoseError
|
||||||
from datetime import datetime, timedelta, timezone
|
from datetime import datetime, timedelta, timezone
|
||||||
import bcrypt
|
import bcrypt
|
||||||
|
|
||||||
@ -56,7 +57,8 @@ class AuthService:
|
|||||||
secret_key = DEFAULT_JWT_SECRET_KEY
|
secret_key = DEFAULT_JWT_SECRET_KEY
|
||||||
log.error("A JWT secret key must be configured to secure the server, using an unsecured default key!")
|
log.error("A JWT secret key must be configured to secure the server, using an unsecured default key!")
|
||||||
algorithm = Config.instance().settings.Controller.jwt_algorithm
|
algorithm = Config.instance().settings.Controller.jwt_algorithm
|
||||||
encoded_jwt = jwt.encode(to_encode, secret_key, algorithm=algorithm)
|
key = OctKey.import_key(secret_key)
|
||||||
|
encoded_jwt = jwt.encode({"alg": algorithm}, to_encode, key)
|
||||||
return encoded_jwt
|
return encoded_jwt
|
||||||
|
|
||||||
def get_username_from_token(self, token: str, secret_key: str = None) -> Optional[str]:
|
def get_username_from_token(self, token: str, secret_key: str = None) -> Optional[str]:
|
||||||
@ -73,11 +75,12 @@ class AuthService:
|
|||||||
secret_key = DEFAULT_JWT_SECRET_KEY
|
secret_key = DEFAULT_JWT_SECRET_KEY
|
||||||
log.error("A JWT secret key must be configured to secure the server, using an unsecured default key!")
|
log.error("A JWT secret key must be configured to secure the server, using an unsecured default key!")
|
||||||
algorithm = Config.instance().settings.Controller.jwt_algorithm
|
algorithm = Config.instance().settings.Controller.jwt_algorithm
|
||||||
payload = jwt.decode(token, secret_key, algorithms=[algorithm])
|
key = OctKey.import_key(secret_key)
|
||||||
username: str = payload.get("sub")
|
payload = jwt.decode(token, key, algorithms=[algorithm])
|
||||||
|
username: str = payload.claims.get("sub")
|
||||||
if username is None:
|
if username is None:
|
||||||
raise credentials_exception
|
raise credentials_exception
|
||||||
token_data = TokenData(username=username)
|
token_data = TokenData(username=username)
|
||||||
except (JWTError, ValidationError):
|
except (JoseError, ValidationError, ValueError):
|
||||||
raise credentials_exception
|
raise credentials_exception
|
||||||
return token_data.username
|
return token_data.username
|
||||||
|
@ -46,6 +46,6 @@
|
|||||||
|
|
||||||
gtag('config', 'G-0BT7QQV1W1');
|
gtag('config', 'G-0BT7QQV1W1');
|
||||||
</script>
|
</script>
|
||||||
<script src="runtime.24fa95b7061d7056.js" type="module"></script><script src="polyfills.319c79dd175e50d0.js" type="module"></script><script src="main.f802edd2b8c6db1d.js" type="module"></script>
|
<script src="runtime.24fa95b7061d7056.js" type="module"></script><script src="polyfills.319c79dd175e50d0.js" type="module"></script><script src="main.62c99707e4709a56.js" type="module"></script>
|
||||||
|
|
||||||
</body></html>
|
</body></html>
|
1
gns3server/static/web-ui/main.62c99707e4709a56.js
Normal file
1
gns3server/static/web-ui/main.62c99707e4709a56.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@ -97,18 +97,10 @@ async def wait_for_process_termination(process, timeout=10):
|
|||||||
:param timeout: Timeout in seconds
|
:param timeout: Timeout in seconds
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if sys.version_info >= (3, 5):
|
try:
|
||||||
try:
|
await asyncio.wait_for(process.wait(), timeout=timeout)
|
||||||
await asyncio.wait_for(process.wait(), timeout=timeout)
|
except ProcessLookupError:
|
||||||
except ProcessLookupError:
|
return
|
||||||
return
|
|
||||||
else:
|
|
||||||
while timeout > 0:
|
|
||||||
if process.returncode is not None:
|
|
||||||
return
|
|
||||||
await asyncio.sleep(0.1)
|
|
||||||
timeout -= 0.1
|
|
||||||
raise asyncio.TimeoutError()
|
|
||||||
|
|
||||||
|
|
||||||
async def _check_process(process, termination_callback):
|
async def _check_process(process, termination_callback):
|
||||||
|
@ -40,10 +40,7 @@ class Pool:
|
|||||||
while len(self._tasks) > 0 or len(pending) > 0:
|
while len(self._tasks) > 0 or len(pending) > 0:
|
||||||
while len(self._tasks) > 0 and len(pending) < self._concurrency:
|
while len(self._tasks) > 0 and len(pending) < self._concurrency:
|
||||||
task, args, kwargs = self._tasks.pop(0)
|
task, args, kwargs = self._tasks.pop(0)
|
||||||
if sys.version_info >= (3, 7):
|
t = asyncio.create_task(task(*args, **kwargs))
|
||||||
t = asyncio.create_task(task(*args, **kwargs))
|
|
||||||
else:
|
|
||||||
t = asyncio.get_event_loop().create_task(task(*args, **kwargs))
|
|
||||||
pending.add(t)
|
pending.add(t)
|
||||||
(done, pending) = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
(done, pending) = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
||||||
for task in done:
|
for task in done:
|
||||||
|
@ -20,6 +20,11 @@ import stat
|
|||||||
import aiofiles
|
import aiofiles
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
|
try:
|
||||||
|
import importlib_resources
|
||||||
|
except ImportError:
|
||||||
|
from importlib import resources as importlib_resources
|
||||||
|
|
||||||
from typing import List, AsyncGenerator
|
from typing import List, AsyncGenerator
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from . import force_unix_path
|
from . import force_unix_path
|
||||||
@ -111,6 +116,14 @@ async def list_images(image_type):
|
|||||||
return images
|
return images
|
||||||
|
|
||||||
|
|
||||||
|
def get_builtin_disks() -> List[str]:
|
||||||
|
builtin_disks = []
|
||||||
|
for entry in importlib_resources.files('gns3server').joinpath("disks").iterdir():
|
||||||
|
if entry.is_file():
|
||||||
|
builtin_disks.append(entry.name)
|
||||||
|
return builtin_disks
|
||||||
|
|
||||||
|
|
||||||
async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
||||||
|
|
||||||
header_magic_len = 7
|
header_magic_len = 7
|
||||||
@ -118,7 +131,7 @@ async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
|||||||
async with aiofiles.open(path, "rb") as f:
|
async with aiofiles.open(path, "rb") as f:
|
||||||
image_header = await f.read(header_magic_len) # read the first 7 bytes of the file
|
image_header = await f.read(header_magic_len) # read the first 7 bytes of the file
|
||||||
if len(image_header) >= header_magic_len:
|
if len(image_header) >= header_magic_len:
|
||||||
detected_image_type = check_valid_image_header(image_header)
|
detected_image_type = check_valid_image_header(path, image_header)
|
||||||
if expected_image_type and detected_image_type != expected_image_type:
|
if expected_image_type and detected_image_type != expected_image_type:
|
||||||
raise InvalidImageError(f"Detected image type for '{path}' is {detected_image_type}, "
|
raise InvalidImageError(f"Detected image type for '{path}' is {detected_image_type}, "
|
||||||
f"expected type is {expected_image_type}")
|
f"expected type is {expected_image_type}")
|
||||||
@ -302,7 +315,7 @@ class InvalidImageError(Exception):
|
|||||||
return self._message
|
return self._message
|
||||||
|
|
||||||
|
|
||||||
def check_valid_image_header(data: bytes, allow_raw_image: bool = False) -> str:
|
def check_valid_image_header(path: str, data: bytes, allow_raw_image: bool = False) -> str:
|
||||||
|
|
||||||
if data[:7] == b'\x7fELF\x01\x02\x01':
|
if data[:7] == b'\x7fELF\x01\x02\x01':
|
||||||
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
||||||
@ -317,7 +330,7 @@ def check_valid_image_header(data: bytes, allow_raw_image: bool = False) -> str:
|
|||||||
else:
|
else:
|
||||||
if allow_raw_image is True:
|
if allow_raw_image is True:
|
||||||
return "qemu"
|
return "qemu"
|
||||||
raise InvalidImageError("Could not detect image type, please make sure it is a valid image")
|
raise InvalidImageError(f"{path}: could not detect image type, please make sure it is a valid image")
|
||||||
|
|
||||||
|
|
||||||
async def write_image(
|
async def write_image(
|
||||||
@ -342,7 +355,7 @@ async def write_image(
|
|||||||
async for chunk in stream:
|
async for chunk in stream:
|
||||||
if check_image_header and len(chunk) >= header_magic_len:
|
if check_image_header and len(chunk) >= header_magic_len:
|
||||||
check_image_header = False
|
check_image_header = False
|
||||||
image_type = check_valid_image_header(chunk, allow_raw_image)
|
image_type = check_valid_image_header(image_path, chunk, allow_raw_image)
|
||||||
await f.write(chunk)
|
await f.write(chunk)
|
||||||
checksum.update(chunk)
|
checksum.update(chunk)
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@
|
|||||||
# or negative for a release candidate or beta (after the base version
|
# or negative for a release candidate or beta (after the base version
|
||||||
# number has been incremented)
|
# number has been incremented)
|
||||||
|
|
||||||
__version__ = "3.0.0"
|
__version__ = "3.0.3.dev1"
|
||||||
__version_info__ = (3, 0, 0, 0)
|
__version_info__ = (3, 0, 3, 99)
|
||||||
|
|
||||||
if "dev" in __version__:
|
if "dev" in __version__:
|
||||||
try:
|
try:
|
||||||
|
@ -10,7 +10,7 @@ authors = [
|
|||||||
{ name = "Jeremy Grossmann", email = "developers@gns3.com" }
|
{ name = "Jeremy Grossmann", email = "developers@gns3.com" }
|
||||||
]
|
]
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
requires-python = ">=3.8"
|
requires-python = ">=3.9"
|
||||||
classifiers = [
|
classifiers = [
|
||||||
"Development Status :: 5 - Production/Stable",
|
"Development Status :: 5 - Production/Stable",
|
||||||
"Environment :: Console",
|
"Environment :: Console",
|
||||||
@ -21,11 +21,11 @@ classifiers = [
|
|||||||
"Natural Language :: English",
|
"Natural Language :: English",
|
||||||
"Operating System :: POSIX :: Linux",
|
"Operating System :: POSIX :: Linux",
|
||||||
"Programming Language :: Python :: 3 :: Only",
|
"Programming Language :: Python :: 3 :: Only",
|
||||||
"Programming Language :: Python :: 3.8",
|
|
||||||
"Programming Language :: Python :: 3.9",
|
"Programming Language :: Python :: 3.9",
|
||||||
"Programming Language :: Python :: 3.10",
|
"Programming Language :: Python :: 3.10",
|
||||||
"Programming Language :: Python :: 3.11",
|
"Programming Language :: Python :: 3.11",
|
||||||
"Programming Language :: Python :: 3.12",
|
"Programming Language :: Python :: 3.12",
|
||||||
|
"Programming Language :: Python :: 3.13",
|
||||||
"Programming Language :: Python :: Implementation :: CPython"
|
"Programming Language :: Python :: Implementation :: CPython"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
@ -1,23 +1,24 @@
|
|||||||
uvicorn==0.32.0
|
uvicorn==0.33.0 # uvicorn 0.33 is the last version supporting Python 3.8
|
||||||
pydantic==2.9.2
|
pydantic==2.10.4
|
||||||
fastapi==0.115.5
|
fastapi==0.115.6
|
||||||
python-multipart==0.0.16
|
python-multipart==0.0.20
|
||||||
websockets==13.1
|
websockets==14.1
|
||||||
aiohttp>=3.10.10,<3.11
|
aiohttp>=3.10.10,<3.11
|
||||||
async-timeout==4.0.3
|
async-timeout==5.0.1; python_version < '3.11'
|
||||||
aiofiles>=24.1.0,<25.0
|
aiofiles>=24.1.0,<25.0
|
||||||
Jinja2>=3.1.4,<3.2
|
Jinja2>=3.1.5,<3.2
|
||||||
sentry-sdk>=2.17,<2.18 # optional dependency
|
sentry-sdk>=2.19.2,<2.20 # optional dependency
|
||||||
psutil>=6.1.0
|
psutil>=6.1.1
|
||||||
distro>=1.9.0
|
distro>=1.9.0
|
||||||
py-cpuinfo>=9.0.0,<10.0
|
py-cpuinfo>=9.0.0,<10.0
|
||||||
|
greenlet==3.1.1 # necessary to run sqlalchemy on Python 3.13
|
||||||
sqlalchemy==2.0.36
|
sqlalchemy==2.0.36
|
||||||
aiosqlite==0.20.0
|
aiosqlite==0.20.0
|
||||||
alembic==1.13.3
|
alembic==1.14.0
|
||||||
bcrypt==4.2.0
|
bcrypt==4.2.1
|
||||||
python-jose[cryptography]==3.3.0
|
joserfc==1.0.1
|
||||||
email-validator==2.2.0
|
email-validator==2.2.0
|
||||||
watchfiles==0.24.0
|
watchdog==6.0.0
|
||||||
zstandard==0.23.0
|
zstandard==0.23.0
|
||||||
platformdirs>=2.4.0,<3 # platformdirs >=3 conflicts when building Debian packages
|
platformdirs>=2.4.0,<3 # platformdirs >=3 conflicts when building Debian packages
|
||||||
importlib-resources>=1.3; python_version <= '3.9'
|
importlib-resources>=1.3; python_version <= '3.9'
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
# Copyright (C) 2024 GNS3 Technologies Inc.
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
# it under the terms of the GNU General Public License as published by
|
# it under the terms of the GNU General Public License as published by
|
||||||
@ -16,19 +16,20 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#
|
#
|
||||||
# Install GNS3 on a remote Ubuntu LTS server
|
# Install GNS3 on a remote Ubuntu server
|
||||||
# This create a dedicated user and setup all the package
|
# This creates a dedicated user and setup all the packages
|
||||||
# and optionnaly a VPN
|
# and optionally a VPN
|
||||||
#
|
#
|
||||||
|
|
||||||
function help {
|
function help {
|
||||||
echo "Usage:" >&2
|
echo "Usage:" >&2
|
||||||
echo "--with-openvpn: Install OpenVPN" >&2
|
echo "--with-openvpn: Install OpenVPN" >&2
|
||||||
echo "--with-iou: Install IOU" >&2
|
echo "--with-iou: Install IOU support" >&2
|
||||||
echo "--with-i386-repository: Add the i386 repositories required by IOU if they are not already available on the system. Warning: this will replace your source.list in order to use the official Ubuntu mirror" >&2
|
echo "--with-i386-repository: Add the i386 repositories required by IOU i386 images. This is not needed for recent x86_64 IOU images." >&2
|
||||||
echo "--with-welcome: Install GNS3-VM welcome.py script" >&2
|
echo "--with-welcome: Install GNS3-VM welcome.py script" >&2
|
||||||
echo "--without-kvm: Disable KVM, required if system do not support it (limitation in some hypervisors and cloud providers). Warning: only disable KVM if strictly necessary as this will degrade performance" >&2
|
echo "--without-kvm: Disable KVM, required if system do not support it (limitation in some hypervisors and cloud providers). Warning: only disable KVM if strictly necessary as this will degrade performance" >&2
|
||||||
echo "--unstable: Use the GNS3 unstable repository"
|
echo "--unstable: Use the GNS3 unstable repository" >&2
|
||||||
|
echo "--custom-repository <repository>: Use a custom repository" >&2
|
||||||
echo "--help: This help" >&2
|
echo "--help: This help" >&2
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,15 +44,17 @@ then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Default repository
|
||||||
|
REPOSITORY="ppa"
|
||||||
|
|
||||||
# Read the options
|
# Read the options
|
||||||
USE_VPN=0
|
USE_VPN=0
|
||||||
USE_IOU=0
|
USE_IOU=0
|
||||||
I386_REPO=0
|
I386_REPO=0
|
||||||
DISABLE_KVM=0
|
DISABLE_KVM=0
|
||||||
UNSTABLE=0
|
|
||||||
WELCOME_SETUP=0
|
WELCOME_SETUP=0
|
||||||
|
|
||||||
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,with-welcome,without-kvm,unstable,help -n 'gns3-remote-install.sh' -- "$@"`
|
TEMP=`getopt -o h --long with-openvpn,with-iou,with-i386-repository,with-welcome,without-kvm,unstable,custom-repository:,help -n 'gns3-remote-install.sh' -- "$@"`
|
||||||
if [ $? != 0 ]
|
if [ $? != 0 ]
|
||||||
then
|
then
|
||||||
help
|
help
|
||||||
@ -83,9 +86,13 @@ while true ; do
|
|||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
--unstable)
|
--unstable)
|
||||||
UNSTABLE=1
|
REPOSITORY="unstable"
|
||||||
shift
|
shift
|
||||||
;;
|
;;
|
||||||
|
--custom-repository)
|
||||||
|
REPOSITORY="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
-h|--help)
|
-h|--help)
|
||||||
help
|
help
|
||||||
exit 1
|
exit 1
|
||||||
@ -103,91 +110,93 @@ UBUNTU_CODENAME=`lsb_release -c -s`
|
|||||||
|
|
||||||
log "Add GNS3 repository"
|
log "Add GNS3 repository"
|
||||||
|
|
||||||
if [ "$UBUNTU_CODENAME" == "trusty" ]
|
if [ ! -f "/etc/apt/sources.list.d/ubuntu.sources" ]
|
||||||
then
|
then
|
||||||
if [ $UNSTABLE == 1 ]
|
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys B83AAABFFBD82D21B543C8EA86C22C2EC6A24D7F
|
||||||
then
|
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
||||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
|
||||||
deb http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
|
||||||
deb-src http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
|
||||||
deb http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
|
||||||
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
|
||||||
EOFLIST
|
|
||||||
else
|
|
||||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
|
||||||
deb http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
deb http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
||||||
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
||||||
deb http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
|
||||||
deb-src http://ppa.launchpad.net/gns3/qemu/ubuntu $UBUNTU_CODENAME main
|
|
||||||
EOFLIST
|
EOFLIST
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
if [ $UNSTABLE == 1 ]
|
|
||||||
then
|
cat <<EOFLIST > /etc/apt/sources.list.d/gns3-ppa.sources
|
||||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
Types: deb
|
||||||
deb http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
URIs: https://ppa.launchpadcontent.net/gns3/$REPOSITORY/ubuntu/
|
||||||
deb-src http://ppa.launchpad.net/gns3/unstable/ubuntu $UBUNTU_CODENAME main
|
Suites: $UBUNTU_CODENAME
|
||||||
|
Components: main
|
||||||
|
Signed-By:
|
||||||
|
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||||
|
.
|
||||||
|
mQINBGY0jSYBEADMH5CvX8ZVX4XzAxdQ2CmF7t86IjFnQgtI18Q19nVnpKEGNyB5
|
||||||
|
pgotDMzkhGnxuhvz2zE9PZhd8VgkodB81V607d/Dy8FfI7t1BVQhLvJDx0H/q6RE
|
||||||
|
n2y9WxiuBzTHitoQTCTY3hjcr7AUNFFI64gUqwbkQmYbCWWsYOlDpRSkWKg8P8WK
|
||||||
|
08RetwTI0Iwoz8j+BkbPlubuImiVfh1TeH23FBuGIwL1r1Cps0wel6JAi+jaU9WG
|
||||||
|
j8MX3mQYFTAtk7f1lRubqWosB/A4xIu609pF1e1tAkWAGltYAeoFhDn+PfA9KgmV
|
||||||
|
fvxfVR7zmxp31imTJgXgUFCz+H0Xb3vpve8XsrsHZUP6StJ3+6cFXjNBV6PuO1FT
|
||||||
|
JWp86a+AYHg7+sUWcoJRZPCTbb/pOcCa0q1ch5qcLkiYEOGK+pYhbPptq6y8IsJW
|
||||||
|
N6EDNCVvVqVyTJy14FZWoOqxcpUiDOQ+su28j8++V+PMo+FO3SQqwEZwJXk7LF/4
|
||||||
|
wUipDCUh/WNjDqqgmYLoO+ttiiJPbEw3jtbO+zopbzYpyEC1f06Nz7uz1daOIN3J
|
||||||
|
etFPzSqWCE7Eq+hoVmAAm8gVmQir3rFJbIGBAvAaOLQEOkUlOlS7AezqUhdyhGER
|
||||||
|
Zrvc3eNqxY7G61SEHipEJ7/hpcDq0RRWCXHsoQqyHaPje826n2pGkJYt4QARAQAB
|
||||||
|
tBZMYXVuY2hwYWQgUFBBIGZvciBHTlMziQJOBBMBCgA4FiEEuDqqv/vYLSG1Q8jq
|
||||||
|
hsIsLsaiTX8FAmY0jSYCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQhsIs
|
||||||
|
LsaiTX9z9xAAq1uHmRgfYmELS0cr2YEnTWHPVE6s95Qx+0cr5zzNeWfmoAS9uSyl
|
||||||
|
z8bCm+Q2ZapzU/nOtkozU+RGjgcRRTKMVTyS0PjFX22965xHCRWnw79fPyrYouUw
|
||||||
|
H2cAT8WSGYEeVAbqhJSns0RnDpXuaxmWE1wT+iitY/QAjeXo22Z2mjv2bFTitKbY
|
||||||
|
hZbE5Eu8Olc5YHCVI0ofq84/Ii921iMibU6EDMmm/iOnMK2uHGbC59t0YG8Rm7mK
|
||||||
|
uk6+TpxOULjFeCWSkF2Dr33m8JQmtYZuFUnmqWPuSdBo3J0O1b0qTg+EP9FbDAtj
|
||||||
|
CoEKT/V1ccMBd3r77o23CGsvpV7bzEU60A+NsU8vb/AkOmouYiF+qaYDFGZDfWhK
|
||||||
|
p1HFmd1kt7YdgxsmoKoFJkbt1bBdcFJLV0Jcad5sfArg2aFDYf2giMxAw4iQ+9jc
|
||||||
|
MCuwWxiqWicPqJ5erNTzVfayBkjuZqBDVTO9wmG3DL4QmNosIBS7kq+NGrT8Ql22
|
||||||
|
FqYfdIZJDlKVtJKHK8eKJSB0dbFawV2h5p/CvQlIm6nthg5FzOyjvCkPkvxvveq+
|
||||||
|
SuNxFEscumFCgo7j7RMWHW9HWK3TUvMmYLMVjxL8kXyCwknp9GklBQHA/IPxRa/2
|
||||||
|
eFqqkmVbmNAoMzzw5wqa/BPcFEbgn+E+TFyZqbzp0F4QzPJZFkz16SA=
|
||||||
|
=xnj5
|
||||||
|
-----END PGP PUBLIC KEY BLOCK-----
|
||||||
EOFLIST
|
EOFLIST
|
||||||
else
|
|
||||||
cat <<EOFLIST > /etc/apt/sources.list.d/gns3.list
|
|
||||||
deb http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
|
||||||
deb-src http://ppa.launchpad.net/gns3/ppa/ubuntu $UBUNTU_CODENAME main
|
|
||||||
EOFLIST
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $I386_REPO == 1 ]
|
log "Updating system packages and installing curl"
|
||||||
then
|
apt update
|
||||||
cat <<EOFLIST2 >> /etc/apt/sources.list
|
apt install -y curl
|
||||||
###### Ubuntu Main Repos
|
|
||||||
deb http://archive.ubuntu.com/ubuntu/ $UBUNTU_CODENAME main universe multiverse
|
|
||||||
deb-src http://archive.ubuntu.com/ubuntu/ $UBUNTU_CODENAME main universe multiverse
|
|
||||||
|
|
||||||
###### Ubuntu Update Repos
|
log "Upgrading packages"
|
||||||
deb http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-security main universe multiverse
|
apt upgrade --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
|
||||||
deb http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-updates main universe multiverse
|
|
||||||
deb-src http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-security main universe multiverse
|
|
||||||
deb-src http://archive.ubuntu.com/ubuntu/ ${UBUNTU_CODENAME}-updates main universe multiverse
|
|
||||||
EOFLIST2
|
|
||||||
fi
|
|
||||||
|
|
||||||
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys A2E3EF7B
|
log "Installing the GNS3 server and its dependencies"
|
||||||
|
apt install -y gns3-server
|
||||||
|
|
||||||
log "Update system packages"
|
log "Creating user GNS3 with /opt/gns3 as home directory"
|
||||||
apt-get update
|
|
||||||
|
|
||||||
log "Upgrade packages"
|
|
||||||
apt-get upgrade --yes --force-yes -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
|
|
||||||
|
|
||||||
log "Install GNS3 packages"
|
|
||||||
apt-get install -y gns3-server
|
|
||||||
|
|
||||||
log "Create user GNS3 with /opt/gns3 as home directory"
|
|
||||||
if [ ! -d "/opt/gns3" ]
|
if [ ! -d "/opt/gns3" ]
|
||||||
then
|
then
|
||||||
useradd -m -d /opt/gns3 gns3
|
useradd -m -d /opt/gns3 gns3
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
log "Adding GNS3 to the ubridge group"
|
||||||
log "Add GNS3 to the ubridge group"
|
|
||||||
usermod -aG ubridge gns3
|
usermod -aG ubridge gns3
|
||||||
|
|
||||||
log "Install docker"
|
log "Installing Docker"
|
||||||
if [ ! -f "/usr/bin/docker" ]
|
if [ ! -f "/usr/bin/docker" ]
|
||||||
then
|
then
|
||||||
curl -sSL https://get.docker.com | bash
|
curl -sSL https://get.docker.com | bash
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log "Add GNS3 to the docker group"
|
log "Adding GNS3 to the docker group"
|
||||||
usermod -aG docker gns3
|
usermod -aG docker gns3
|
||||||
|
|
||||||
if [ $USE_IOU == 1 ]
|
if [ $USE_IOU == 1 ]
|
||||||
then
|
then
|
||||||
log "Setup IOU"
|
log "Setting up IOU support"
|
||||||
dpkg --add-architecture i386
|
if [ $I386_REPO == 1 ]
|
||||||
apt-get update
|
then
|
||||||
|
log "Enabling i386 architecture for IOU support"
|
||||||
|
dpkg --add-architecture i386
|
||||||
|
apt update
|
||||||
|
fi
|
||||||
|
|
||||||
apt-get install -y gns3-iou
|
apt install -y gns3-iou
|
||||||
|
|
||||||
# Force the host name to gns3vm
|
# Force the host name to gns3vm
|
||||||
echo gns3vm > /etc/hostname
|
echo gns3vm > /etc/hostname
|
||||||
@ -196,31 +205,18 @@ then
|
|||||||
|
|
||||||
# Force hostid for IOU
|
# Force hostid for IOU
|
||||||
dd if=/dev/zero bs=4 count=1 of=/etc/hostid
|
dd if=/dev/zero bs=4 count=1 of=/etc/hostid
|
||||||
|
|
||||||
# Block potential IOU phone home call (xml.cisco.com is not in use at this time)
|
|
||||||
log "Block IOU phone home call"
|
|
||||||
if [ "$UBUNTU_CODENAME" == "focal" ]
|
|
||||||
then
|
|
||||||
iptables -I OUTPUT -p udp --dport 53 -m string --hex-string "|03|xml|05|cisco|03|com" --algo bm -j DROP
|
|
||||||
echo iptables-persistent iptables-persistent/autosave_v4 boolean true | debconf-set-selections
|
|
||||||
echo iptables-persistent iptables-persistent/autosave_v6 boolean true | debconf-set-selections
|
|
||||||
apt-get install -y iptables-persistent
|
|
||||||
else
|
|
||||||
echo "127.0.0.254 xml.cisco.com" | tee --append /etc/hosts
|
|
||||||
fi
|
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
log "Add gns3 to the kvm group"
|
log "Adding gns3 to the kvm group"
|
||||||
usermod -aG kvm gns3
|
usermod -aG kvm gns3
|
||||||
|
|
||||||
log "Setup GNS3 server"
|
log "Setting up the GNS3 server configuration"
|
||||||
|
|
||||||
mkdir -p /etc/gns3
|
mkdir -p /etc/gns3
|
||||||
cat <<EOFC > /etc/gns3/gns3_server.conf
|
cat <<EOFC > /etc/gns3/gns3_server.conf
|
||||||
[Server]
|
[Server]
|
||||||
host = 0.0.0.0
|
host = 0.0.0.0
|
||||||
port = 3080
|
port = 3080
|
||||||
images_path = /opt/gns3/images
|
images_path = /opt/gns3/images
|
||||||
projects_path = /opt/gns3/projects
|
projects_path = /opt/gns3/projects
|
||||||
appliances_path = /opt/gns3/appliances
|
appliances_path = /opt/gns3/appliances
|
||||||
@ -234,52 +230,15 @@ EOFC
|
|||||||
|
|
||||||
if [ $DISABLE_KVM == 1 ]
|
if [ $DISABLE_KVM == 1 ]
|
||||||
then
|
then
|
||||||
log "Disable KVM support"
|
log "Disabling KVM support"
|
||||||
sed -i 's/hardware_acceleration = True/hardware_acceleration = False/g' /etc/gns3/gns3_server.conf
|
sed -i 's/hardware_acceleration = True/hardware_acceleration = False/g' /etc/gns3/gns3_server.conf
|
||||||
fi
|
fi
|
||||||
|
|
||||||
chown -R gns3:gns3 /etc/gns3
|
chown -R gns3:gns3 /etc/gns3
|
||||||
chmod -R 700 /etc/gns3
|
chmod -R 700 /etc/gns3
|
||||||
|
|
||||||
if [ "$UBUNTU_CODENAME" == "trusty" ]
|
log "Installing the GNS3 systemd service"
|
||||||
then
|
cat <<EOFI > /lib/systemd/system/gns3.service
|
||||||
cat <<EOFI > /etc/init/gns3.conf
|
|
||||||
description "GNS3 server"
|
|
||||||
author "GNS3 Team"
|
|
||||||
|
|
||||||
start on filesystem or runlevel [2345]
|
|
||||||
stop on runlevel [016]
|
|
||||||
respawn
|
|
||||||
console log
|
|
||||||
|
|
||||||
|
|
||||||
script
|
|
||||||
exec start-stop-daemon --start --make-pidfile --pidfile /var/run/gns3.pid --chuid gns3 --exec "/usr/bin/gns3server"
|
|
||||||
end script
|
|
||||||
|
|
||||||
pre-start script
|
|
||||||
echo "" > /var/log/upstart/gns3.log
|
|
||||||
echo "[`date`] GNS3 Starting"
|
|
||||||
end script
|
|
||||||
|
|
||||||
pre-stop script
|
|
||||||
echo "[`date`] GNS3 Stopping"
|
|
||||||
end script
|
|
||||||
EOFI
|
|
||||||
|
|
||||||
chown root:root /etc/init/gns3.conf
|
|
||||||
chmod 644 /etc/init/gns3.conf
|
|
||||||
|
|
||||||
|
|
||||||
log "Start GNS3 service"
|
|
||||||
set +e
|
|
||||||
service gns3 stop
|
|
||||||
set -e
|
|
||||||
service gns3 start
|
|
||||||
|
|
||||||
else
|
|
||||||
# Install systemd service
|
|
||||||
cat <<EOFI > /lib/systemd/system/gns3.service
|
|
||||||
[Unit]
|
[Unit]
|
||||||
Description=GNS3 server
|
Description=GNS3 server
|
||||||
After=network-online.target
|
After=network-online.target
|
||||||
@ -302,15 +261,15 @@ LimitNOFILE=16384
|
|||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
EOFI
|
EOFI
|
||||||
chmod 755 /lib/systemd/system/gns3.service
|
|
||||||
chown root:root /lib/systemd/system/gns3.service
|
|
||||||
|
|
||||||
log "Start GNS3 service"
|
chmod 755 /lib/systemd/system/gns3.service
|
||||||
systemctl enable gns3
|
chown root:root /lib/systemd/system/gns3.service
|
||||||
systemctl start gns3
|
|
||||||
fi
|
|
||||||
|
|
||||||
log "GNS3 installed with success"
|
log "Starting the GNS3 service"
|
||||||
|
systemctl enable gns3
|
||||||
|
systemctl start gns3
|
||||||
|
|
||||||
|
log "GNS3 has been installed with success"
|
||||||
|
|
||||||
if [ $WELCOME_SETUP == 1 ]
|
if [ $WELCOME_SETUP == 1 ]
|
||||||
then
|
then
|
||||||
@ -319,11 +278,9 @@ gns3 ALL = (ALL) NOPASSWD: /usr/bin/apt-key
|
|||||||
gns3 ALL = (ALL) NOPASSWD: /usr/bin/apt-get
|
gns3 ALL = (ALL) NOPASSWD: /usr/bin/apt-get
|
||||||
gns3 ALL = (ALL) NOPASSWD: /usr/sbin/reboot
|
gns3 ALL = (ALL) NOPASSWD: /usr/sbin/reboot
|
||||||
EOFI
|
EOFI
|
||||||
NEEDRESTART_MODE=a apt-get install -y net-tools
|
NEEDRESTART_MODE=a apt install -y net-tools
|
||||||
NEEDRESTART_MODE=a apt-get install -y python3-pip
|
NEEDRESTART_MODE=a apt install -y dialog
|
||||||
NEEDRESTART_MODE=a apt-get install -y dialog
|
NEEDRESTART_MODE=a apt install -y python3-dialog
|
||||||
pip install --no-input --upgrade pip
|
|
||||||
pip install --no-input pythondialog
|
|
||||||
|
|
||||||
#Pull down welcome script from repo
|
#Pull down welcome script from repo
|
||||||
curl https://raw.githubusercontent.com/GNS3/gns3-server/master/scripts/welcome.py > /usr/local/bin/welcome.py
|
curl https://raw.githubusercontent.com/GNS3/gns3-server/master/scripts/welcome.py > /usr/local/bin/welcome.py
|
||||||
@ -350,19 +307,15 @@ fi
|
|||||||
|
|
||||||
if [ $USE_VPN == 1 ]
|
if [ $USE_VPN == 1 ]
|
||||||
then
|
then
|
||||||
log "Setup VPN"
|
log "Setting up OpenVPN"
|
||||||
|
|
||||||
log "Change GNS3 to listen on VPN interface"
|
log "Changing the GNS3 server configuration to listen on VPN interface"
|
||||||
|
|
||||||
sed -i 's/host = 0.0.0.0/host = 172.16.253.1/' /etc/gns3/gns3_server.conf
|
sed -i 's/host = 0.0.0.0/host = 172.16.253.1/' /etc/gns3/gns3_server.conf
|
||||||
|
|
||||||
log "Install packages for OpenVPN"
|
log "Installing the OpenVPN packages"
|
||||||
|
|
||||||
apt-get install -y \
|
apt install -y openvpn uuid dnsutils nginx-light
|
||||||
openvpn \
|
|
||||||
uuid \
|
|
||||||
dnsutils \
|
|
||||||
nginx-light
|
|
||||||
|
|
||||||
MY_IP_ADDR=$(dig @ns1.google.com -t txt o-o.myaddr.l.google.com +short -4 | sed 's/"//g')
|
MY_IP_ADDR=$(dig @ns1.google.com -t txt o-o.myaddr.l.google.com +short -4 | sed 's/"//g')
|
||||||
|
|
||||||
@ -370,7 +323,7 @@ log "IP detected: $MY_IP_ADDR"
|
|||||||
|
|
||||||
UUID=$(uuid)
|
UUID=$(uuid)
|
||||||
|
|
||||||
log "Update motd"
|
log "Updating motd"
|
||||||
|
|
||||||
cat <<EOFMOTD > /etc/update-motd.d/70-openvpn
|
cat <<EOFMOTD > /etc/update-motd.d/70-openvpn
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
@ -381,7 +334,7 @@ echo "http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn"
|
|||||||
echo ""
|
echo ""
|
||||||
echo "And add it to your openvpn client."
|
echo "And add it to your openvpn client."
|
||||||
echo ""
|
echo ""
|
||||||
echo "apt-get remove nginx-light to disable the HTTP server."
|
echo "apt remove nginx-light to disable the HTTP server."
|
||||||
echo "And remove this file with rm /etc/update-motd.d/70-openvpn"
|
echo "And remove this file with rm /etc/update-motd.d/70-openvpn"
|
||||||
EOFMOTD
|
EOFMOTD
|
||||||
chmod 755 /etc/update-motd.d/70-openvpn
|
chmod 755 /etc/update-motd.d/70-openvpn
|
||||||
@ -391,7 +344,7 @@ mkdir -p /etc/openvpn/
|
|||||||
[ -d /dev/net ] || mkdir -p /dev/net
|
[ -d /dev/net ] || mkdir -p /dev/net
|
||||||
[ -c /dev/net/tun ] || mknod /dev/net/tun c 10 200
|
[ -c /dev/net/tun ] || mknod /dev/net/tun c 10 200
|
||||||
|
|
||||||
log "Create keys"
|
log "Creating OpenVPN keys"
|
||||||
|
|
||||||
[ -f /etc/openvpn/dh.pem ] || openssl dhparam -out /etc/openvpn/dh.pem 2048
|
[ -f /etc/openvpn/dh.pem ] || openssl dhparam -out /etc/openvpn/dh.pem 2048
|
||||||
[ -f /etc/openvpn/key.pem ] || openssl genrsa -out /etc/openvpn/key.pem 2048
|
[ -f /etc/openvpn/key.pem ] || openssl genrsa -out /etc/openvpn/key.pem 2048
|
||||||
@ -399,7 +352,7 @@ chmod 600 /etc/openvpn/key.pem
|
|||||||
[ -f /etc/openvpn/csr.pem ] || openssl req -new -key /etc/openvpn/key.pem -out /etc/openvpn/csr.pem -subj /CN=OpenVPN/
|
[ -f /etc/openvpn/csr.pem ] || openssl req -new -key /etc/openvpn/key.pem -out /etc/openvpn/csr.pem -subj /CN=OpenVPN/
|
||||||
[ -f /etc/openvpn/cert.pem ] || openssl x509 -req -in /etc/openvpn/csr.pem -out /etc/openvpn/cert.pem -signkey /etc/openvpn/key.pem -days 24855
|
[ -f /etc/openvpn/cert.pem ] || openssl x509 -req -in /etc/openvpn/csr.pem -out /etc/openvpn/cert.pem -signkey /etc/openvpn/key.pem -days 24855
|
||||||
|
|
||||||
log "Create client configuration"
|
log "Creating OpenVPN client configuration"
|
||||||
cat <<EOFCLIENT > /root/client.ovpn
|
cat <<EOFCLIENT > /root/client.ovpn
|
||||||
client
|
client
|
||||||
nobind
|
nobind
|
||||||
@ -441,7 +394,7 @@ status openvpn-status-1194.log
|
|||||||
log-append /var/log/openvpn-udp1194.log
|
log-append /var/log/openvpn-udp1194.log
|
||||||
EOFUDP
|
EOFUDP
|
||||||
|
|
||||||
log "Setup HTTP server for serving client certificate"
|
log "Setting up an HTTP server for serving client certificate"
|
||||||
mkdir -p /usr/share/nginx/openvpn/$UUID
|
mkdir -p /usr/share/nginx/openvpn/$UUID
|
||||||
cp /root/client.ovpn /usr/share/nginx/openvpn/$UUID/$HOSTNAME.ovpn
|
cp /root/client.ovpn /usr/share/nginx/openvpn/$UUID/$HOSTNAME.ovpn
|
||||||
touch /usr/share/nginx/openvpn/$UUID/index.html
|
touch /usr/share/nginx/openvpn/$UUID/index.html
|
||||||
@ -458,7 +411,7 @@ EOFNGINX
|
|||||||
service nginx stop
|
service nginx stop
|
||||||
service nginx start
|
service nginx start
|
||||||
|
|
||||||
log "Restart OpenVPN and GNS3"
|
log "Restarting OpenVPN and GNS3"
|
||||||
|
|
||||||
set +e
|
set +e
|
||||||
service openvpn stop
|
service openvpn stop
|
||||||
@ -466,15 +419,15 @@ service openvpn start
|
|||||||
service gns3 stop
|
service gns3 stop
|
||||||
service gns3 start
|
service gns3 start
|
||||||
|
|
||||||
log "Download http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn to setup your OpenVPN client after rebooting the server"
|
log "Please download http://$MY_IP_ADDR:8003/$UUID/$HOSTNAME.ovpn to setup your OpenVPN client after rebooting the server"
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ $WELCOME_SETUP == 1 ]
|
if [ $WELCOME_SETUP == 1 ]
|
||||||
then
|
then
|
||||||
NEEDRESTART_MODE=a apt-get update
|
NEEDRESTART_MODE=a apt update
|
||||||
NEEDRESTART_MODE=a apt-get upgrade
|
NEEDRESTART_MODE=a apt upgrade
|
||||||
python3 -c 'import sys; sys.path.append("/usr/local/bin/"); import welcome; ws = welcome.Welcome_dialog(); ws.repair_remote_install()'
|
python3 -c 'import sys; sys.path.append("/usr/local/bin/"); import welcome; ws = welcome.Welcome_dialog(); ws.repair_remote_install()'
|
||||||
cd /opt/gns3
|
cd /opt/gns3
|
||||||
su gns3
|
su gns3
|
||||||
fi
|
fi
|
||||||
|
@ -28,14 +28,16 @@ pytestmark = pytest.mark.asyncio
|
|||||||
|
|
||||||
class TestApplianceRoutes:
|
class TestApplianceRoutes:
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
# @pytest.fixture(autouse=True)
|
||||||
def _install_builtin_appliances(self, controller: Controller):
|
# def _install_builtin_appliances(self, controller: Controller):
|
||||||
|
#
|
||||||
|
# controller.appliance_manager.install_builtin_appliances()
|
||||||
|
# controller.appliance_manager.load_appliances()
|
||||||
|
|
||||||
controller.appliance_manager.install_builtin_appliances()
|
async def test_appliances_list(self, app: FastAPI, client: AsyncClient, controller: Controller) -> None:
|
||||||
|
|
||||||
|
await controller.appliance_manager.install_builtin_appliances()
|
||||||
controller.appliance_manager.load_appliances()
|
controller.appliance_manager.load_appliances()
|
||||||
|
|
||||||
async def test_appliances_list(self, app: FastAPI, client: AsyncClient) -> None:
|
|
||||||
|
|
||||||
response = await client.get(app.url_path_for("get_appliances"))
|
response = await client.get(app.url_path_for("get_appliances"))
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
assert len(response.json()) > 0
|
assert len(response.json()) > 0
|
||||||
|
@ -19,6 +19,7 @@ import os
|
|||||||
import pytest
|
import pytest
|
||||||
import hashlib
|
import hashlib
|
||||||
|
|
||||||
|
from tests.utils import asyncio_patch
|
||||||
from sqlalchemy.ext.asyncio import AsyncSession
|
from sqlalchemy.ext.asyncio import AsyncSession
|
||||||
from fastapi import FastAPI, status
|
from fastapi import FastAPI, status
|
||||||
from httpx import AsyncClient
|
from httpx import AsyncClient
|
||||||
@ -261,10 +262,13 @@ class TestImageRoutes:
|
|||||||
|
|
||||||
async def test_prune_images(self, app: FastAPI, client: AsyncClient, db_session: AsyncSession) -> None:
|
async def test_prune_images(self, app: FastAPI, client: AsyncClient, db_session: AsyncSession) -> None:
|
||||||
|
|
||||||
response = await client.post(app.url_path_for("prune_images"))
|
images_repo = ImagesRepository(db_session)
|
||||||
|
images_in_db = await images_repo.get_images()
|
||||||
|
assert len(images_in_db) != 0
|
||||||
|
|
||||||
|
response = await client.delete(app.url_path_for("prune_images"))
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
images_repo = ImagesRepository(db_session)
|
|
||||||
images_in_db = await images_repo.get_images()
|
images_in_db = await images_repo.get_images()
|
||||||
assert len(images_in_db) == 0
|
assert len(images_in_db) == 0
|
||||||
|
|
||||||
@ -275,7 +279,7 @@ class TestImageRoutes:
|
|||||||
controller: Controller
|
controller: Controller
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
controller.appliance_manager.install_builtin_appliances()
|
await controller.appliance_manager.install_builtin_appliances()
|
||||||
controller.appliance_manager.load_appliances() # make sure appliances are loaded
|
controller.appliance_manager.load_appliances() # make sure appliances are loaded
|
||||||
image_path = "tests/resources/empty30G.qcow2"
|
image_path = "tests/resources/empty30G.qcow2"
|
||||||
image_name = os.path.basename(image_path)
|
image_name = os.path.basename(image_path)
|
||||||
@ -292,3 +296,32 @@ class TestImageRoutes:
|
|||||||
assert len(templates) == 1
|
assert len(templates) == 1
|
||||||
assert templates[0].name == "Empty VM"
|
assert templates[0].name == "Empty VM"
|
||||||
assert templates[0].version == "30G"
|
assert templates[0].version == "30G"
|
||||||
|
await templates_repo.delete_template(templates[0].template_id)
|
||||||
|
|
||||||
|
async def test_install_all(
|
||||||
|
self, app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
db_session: AsyncSession,
|
||||||
|
controller: Controller
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
image_path = "tests/resources/empty100G.qcow2"
|
||||||
|
image_name = os.path.basename(image_path)
|
||||||
|
with open(image_path, "rb") as f:
|
||||||
|
image_data = f.read()
|
||||||
|
response = await client.post(
|
||||||
|
app.url_path_for("upload_image", image_path=image_name),
|
||||||
|
content=image_data)
|
||||||
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
|
|
||||||
|
controller.appliance_manager.load_appliances() # make sure appliances are loaded
|
||||||
|
with asyncio_patch("gns3server.api.routes.controller.images.get_builtin_disks", return_value=[]) as mock:
|
||||||
|
response = await client.post(app.url_path_for("install_images"))
|
||||||
|
assert mock.called
|
||||||
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
templates_repo = TemplatesRepository(db_session)
|
||||||
|
templates = await templates_repo.get_templates()
|
||||||
|
assert len(templates) == 1
|
||||||
|
assert templates[0].name == "Empty VM"
|
||||||
|
assert templates[0].version == "100G"
|
@ -16,6 +16,8 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import uuid
|
import uuid
|
||||||
import unittest.mock
|
import unittest.mock
|
||||||
@ -145,29 +147,39 @@ class TestTemplateRoutes:
|
|||||||
tmpdir: str,
|
tmpdir: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
path = os.path.join(tmpdir, "test.qcow2")
|
image1 = os.path.join(tmpdir, "image1.qcow2")
|
||||||
with open(path, "wb+") as f:
|
with open(image1, "wb+") as f:
|
||||||
f.write(b'\x42\x42\x42\x42')
|
f.write(b'\x42\x42\x42\x42')
|
||||||
|
|
||||||
|
image2 = os.path.join(tmpdir, "image2.qcow2")
|
||||||
|
with open(image2, "wb+") as f:
|
||||||
|
f.write(b'\x42\x42\x42\x42')
|
||||||
|
|
||||||
images_repo = ImagesRepository(db_session)
|
images_repo = ImagesRepository(db_session)
|
||||||
await images_repo.add_image("test.qcow2", "qemu", 42, path, "e342eb86c1229b6c154367a5476969b5", "md5")
|
await images_repo.add_image("image1.qcow2", "qemu", 42, image1, "e342eb86c1229b6c154367a5476969b5", "md5")
|
||||||
|
await images_repo.add_image("image2.qcow2", "qemu", 42, image2, "e342eb86c1229b6c154367a5476969b5", "md5")
|
||||||
|
|
||||||
template_id = str(uuid.uuid4())
|
template_id = str(uuid.uuid4())
|
||||||
params = {"template_id": template_id,
|
params = {"template_id": template_id,
|
||||||
"name": "QEMU_TEMPLATE",
|
"name": "QEMU_TEMPLATE",
|
||||||
"compute_id": "local",
|
"compute_id": "local",
|
||||||
"hda_disk_image": "test.qcow2",
|
"hda_disk_image": "image1.qcow2",
|
||||||
|
"hdb_disk_image": "image2.qcow2",
|
||||||
"template_type": "qemu"}
|
"template_type": "qemu"}
|
||||||
|
|
||||||
response = await client.post(app.url_path_for("create_template"), json=params)
|
response = await client.post(app.url_path_for("create_template"), json=params)
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
|
|
||||||
|
templates_repo = TemplatesRepository(db_session)
|
||||||
|
images = await templates_repo.get_template_images(response.json().get("template_id"))
|
||||||
|
assert len(images) == 2
|
||||||
|
|
||||||
response = await client.delete(
|
response = await client.delete(
|
||||||
app.url_path_for("delete_template", template_id=template_id),
|
app.url_path_for("delete_template", template_id=template_id),
|
||||||
params={"prune_images": True}
|
params={"prune_images": True}
|
||||||
)
|
)
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
images_repo = ImagesRepository(db_session)
|
|
||||||
images = await images_repo.get_images()
|
images = await images_repo.get_images()
|
||||||
assert len(images) == 0
|
assert len(images) == 0
|
||||||
|
|
||||||
|
@ -21,7 +21,8 @@ from typing import Optional
|
|||||||
from fastapi import FastAPI, HTTPException, status
|
from fastapi import FastAPI, HTTPException, status
|
||||||
from sqlalchemy import update
|
from sqlalchemy import update
|
||||||
from httpx import AsyncClient
|
from httpx import AsyncClient
|
||||||
from jose import jwt
|
from joserfc import jwt
|
||||||
|
from joserfc.jwk import OctKey
|
||||||
|
|
||||||
from sqlalchemy.ext.asyncio import AsyncSession
|
from sqlalchemy.ext.asyncio import AsyncSession
|
||||||
from gns3server.db.repositories.users import UsersRepository
|
from gns3server.db.repositories.users import UsersRepository
|
||||||
@ -166,16 +167,23 @@ class TestAuthTokens:
|
|||||||
|
|
||||||
jwt_secret = config.settings.Controller.jwt_secret_key
|
jwt_secret = config.settings.Controller.jwt_secret_key
|
||||||
token = auth_service.create_access_token(test_user.username)
|
token = auth_service.create_access_token(test_user.username)
|
||||||
payload = jwt.decode(token, jwt_secret, algorithms=["HS256"])
|
key = OctKey.import_key(jwt_secret)
|
||||||
username = payload.get("sub")
|
payload = jwt.decode(token, key, algorithms=["HS256"])
|
||||||
|
username = payload.claims.get("sub")
|
||||||
assert username == test_user.username
|
assert username == test_user.username
|
||||||
|
|
||||||
async def test_token_missing_user_is_invalid(self, app: FastAPI, client: AsyncClient, config: Config) -> None:
|
async def test_decode_token_with_wrong_algorithm(
|
||||||
|
self,
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
test_user: User,
|
||||||
|
config: Config
|
||||||
|
) -> None:
|
||||||
|
|
||||||
jwt_secret = config.settings.Controller.jwt_secret_key
|
jwt_secret = config.settings.Controller.jwt_secret_key
|
||||||
token = auth_service.create_access_token(None)
|
token = auth_service.create_access_token(test_user.username)
|
||||||
with pytest.raises(jwt.JWTError):
|
with pytest.raises(ValueError):
|
||||||
jwt.decode(token, jwt_secret, algorithms=["HS256"])
|
jwt.decode(token, jwt_secret, algorithms=["ES256"])
|
||||||
|
|
||||||
async def test_can_retrieve_username_from_token(
|
async def test_can_retrieve_username_from_token(
|
||||||
self,
|
self,
|
||||||
@ -236,9 +244,10 @@ class TestUserLogin:
|
|||||||
|
|
||||||
# check that token exists in response and has user encoded within it
|
# check that token exists in response and has user encoded within it
|
||||||
token = response.json().get("access_token")
|
token = response.json().get("access_token")
|
||||||
payload = jwt.decode(token, jwt_secret, algorithms=["HS256"])
|
key = OctKey.import_key(jwt_secret)
|
||||||
assert "sub" in payload
|
payload = jwt.decode(token, key, algorithms=["HS256"])
|
||||||
username = payload.get("sub")
|
assert "sub" in payload.claims
|
||||||
|
username = payload.claims.get("sub")
|
||||||
assert username == test_user.username
|
assert username == test_user.username
|
||||||
|
|
||||||
# check that token is proper type
|
# check that token is proper type
|
||||||
|
@ -400,10 +400,12 @@ def run_around_tests(monkeypatch, config, port_manager):
|
|||||||
config.settings.VMware.vmrun_path = tmppath
|
config.settings.VMware.vmrun_path = tmppath
|
||||||
config.settings.Dynamips.dynamips_path = tmppath
|
config.settings.Dynamips.dynamips_path = tmppath
|
||||||
|
|
||||||
|
|
||||||
# Force turn off KVM because it's not available on CI
|
# Force turn off KVM because it's not available on CI
|
||||||
config.settings.Qemu.enable_hardware_acceleration = False
|
config.settings.Qemu.enable_hardware_acceleration = False
|
||||||
|
|
||||||
|
# avoid monitoring for new images while testing
|
||||||
|
config.settings.Server.auto_discover_images = False
|
||||||
|
|
||||||
monkeypatch.setattr("gns3server.utils.path.get_default_project_directory", lambda *args: os.path.join(tmppath, 'projects'))
|
monkeypatch.setattr("gns3server.utils.path.get_default_project_directory", lambda *args: os.path.join(tmppath, 'projects'))
|
||||||
|
|
||||||
# Force sys.platform to the original value. Because it seems not be restored correctly after each test
|
# Force sys.platform to the original value. Because it seems not be restored correctly after each test
|
||||||
|
@ -87,7 +87,7 @@ async def test_compute_httpQuery(compute):
|
|||||||
response.status = 200
|
response.status = 200
|
||||||
await compute.post("/projects", {"a": "b"})
|
await compute.post("/projects", {"a": "b"})
|
||||||
await compute.close()
|
await compute.close()
|
||||||
mock.assert_called_with("POST", "https://example.com:84/v3/compute/projects", data=b'{"a": "b"}', headers={'content-type': 'application/json'}, auth=None, chunked=None, timeout=20)
|
mock.assert_called_with("POST", "https://example.com:84/v3/compute/projects", data=b'{"a": "b"}', headers={'content-type': 'application/json'}, auth=None, chunked=None, timeout=120)
|
||||||
assert compute._auth is None
|
assert compute._auth is None
|
||||||
|
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ async def test_compute_httpQueryAuth(compute):
|
|||||||
compute.password = SecretStr("toor")
|
compute.password = SecretStr("toor")
|
||||||
await compute.post("/projects", {"a": "b"})
|
await compute.post("/projects", {"a": "b"})
|
||||||
await compute.close()
|
await compute.close()
|
||||||
mock.assert_called_with("POST", "https://example.com:84/v3/compute/projects", data=b'{"a": "b"}', headers={'content-type': 'application/json'}, auth=compute._auth, chunked=None, timeout=20)
|
mock.assert_called_with("POST", "https://example.com:84/v3/compute/projects", data=b'{"a": "b"}', headers={'content-type': 'application/json'}, auth=compute._auth, chunked=None, timeout=120)
|
||||||
assert compute._auth.login == "root"
|
assert compute._auth.login == "root"
|
||||||
assert compute._auth.password == "toor"
|
assert compute._auth.password == "toor"
|
||||||
|
|
||||||
@ -162,7 +162,7 @@ async def test_compute_httpQueryNotConnectedInvalidVersion(compute):
|
|||||||
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
||||||
with pytest.raises(ControllerError):
|
with pytest.raises(ControllerError):
|
||||||
await compute.post("/projects", {"a": "b"})
|
await compute.post("/projects", {"a": "b"})
|
||||||
mock.assert_any_call("GET", "https://example.com:84/v3/compute/capabilities", headers={'content-type': 'application/json'}, data=None, auth=None, chunked=None, timeout=20)
|
mock.assert_any_call("GET", "https://example.com:84/v3/compute/capabilities", headers={'content-type': 'application/json'}, data=None, auth=None, chunked=None, timeout=120)
|
||||||
await compute.close()
|
await compute.close()
|
||||||
|
|
||||||
|
|
||||||
@ -176,7 +176,7 @@ async def test_compute_httpQueryNotConnectedNonGNS3Server(compute):
|
|||||||
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
||||||
with pytest.raises(ControllerError):
|
with pytest.raises(ControllerError):
|
||||||
await compute.post("/projects", {"a": "b"})
|
await compute.post("/projects", {"a": "b"})
|
||||||
mock.assert_any_call("GET", "https://example.com:84/v3/compute/capabilities", headers={'content-type': 'application/json'}, data=None, auth=None, chunked=None, timeout=20)
|
mock.assert_any_call("GET", "https://example.com:84/v3/compute/capabilities", headers={'content-type': 'application/json'}, data=None, auth=None, chunked=None, timeout=120)
|
||||||
await compute.close()
|
await compute.close()
|
||||||
|
|
||||||
|
|
||||||
@ -190,7 +190,7 @@ async def test_compute_httpQueryNotConnectedNonGNS3Server2(compute):
|
|||||||
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
||||||
with pytest.raises(ControllerError):
|
with pytest.raises(ControllerError):
|
||||||
await compute.post("/projects", {"a": "b"})
|
await compute.post("/projects", {"a": "b"})
|
||||||
mock.assert_any_call("GET", "https://example.com:84/v3/compute/capabilities", headers={'content-type': 'application/json'}, data=None, auth=None, chunked=None, timeout=20)
|
mock.assert_any_call("GET", "https://example.com:84/v3/compute/capabilities", headers={'content-type': 'application/json'}, data=None, auth=None, chunked=None, timeout=120)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
@ -228,7 +228,7 @@ async def test_compute_httpQuery_project(compute):
|
|||||||
project = Project(name="Test")
|
project = Project(name="Test")
|
||||||
mock_notification.assert_called()
|
mock_notification.assert_called()
|
||||||
await compute.post("/projects", project)
|
await compute.post("/projects", project)
|
||||||
mock.assert_called_with("POST", "https://example.com:84/v3/compute/projects", data=json.dumps(project.asdict()), headers={'content-type': 'application/json'}, auth=None, chunked=None, timeout=20)
|
mock.assert_called_with("POST", "https://example.com:84/v3/compute/projects", data=json.dumps(project.asdict()), headers={'content-type': 'application/json'}, auth=None, chunked=None, timeout=120)
|
||||||
await compute.close()
|
await compute.close()
|
||||||
|
|
||||||
# FIXME: https://github.com/aio-libs/aiohttp/issues/2525
|
# FIXME: https://github.com/aio-libs/aiohttp/issues/2525
|
||||||
@ -430,7 +430,7 @@ async def test_interfaces(compute):
|
|||||||
response.status = 200
|
response.status = 200
|
||||||
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
||||||
assert await compute.interfaces() == res
|
assert await compute.interfaces() == res
|
||||||
mock.assert_any_call("GET", "https://example.com:84/v3/compute/network/interfaces", auth=None, chunked=None, data=None, headers={'content-type': 'application/json'}, timeout=20)
|
mock.assert_any_call("GET", "https://example.com:84/v3/compute/network/interfaces", auth=None, chunked=None, data=None, headers={'content-type': 'application/json'}, timeout=120)
|
||||||
await compute.close()
|
await compute.close()
|
||||||
|
|
||||||
|
|
||||||
|
@ -245,7 +245,8 @@ async def test_start(controller):
|
|||||||
}
|
}
|
||||||
|
|
||||||
#with asyncio_patch("gns3server.controller.compute.Compute.connect") as mock:
|
#with asyncio_patch("gns3server.controller.compute.Compute.connect") as mock:
|
||||||
await controller.start()
|
with asyncio_patch("gns3server.controller.Controller._install_builtin_disks", return_value=[]):
|
||||||
|
await controller.start()
|
||||||
#assert mock.called
|
#assert mock.called
|
||||||
assert len(controller.computes) == 1 # Local compute is created
|
assert len(controller.computes) == 1 # Local compute is created
|
||||||
assert controller.computes["local"].name == f"{socket.gethostname()} (controller)"
|
assert controller.computes["local"].name == f"{socket.gethostname()} (controller)"
|
||||||
@ -266,8 +267,9 @@ async def test_start_vm(controller):
|
|||||||
with asyncio_patch("gns3server.controller.gns3vm.vmware_gns3_vm.VMwareGNS3VM.start") as mock:
|
with asyncio_patch("gns3server.controller.gns3vm.vmware_gns3_vm.VMwareGNS3VM.start") as mock:
|
||||||
with asyncio_patch("gns3server.controller.gns3vm.GNS3VM._check_network"):
|
with asyncio_patch("gns3server.controller.gns3vm.GNS3VM._check_network"):
|
||||||
with asyncio_patch("gns3server.controller.compute.Compute.connect"):
|
with asyncio_patch("gns3server.controller.compute.Compute.connect"):
|
||||||
await controller.start()
|
with asyncio_patch("gns3server.controller.Controller._install_builtin_disks", return_value=[]):
|
||||||
assert mock.called
|
await controller.start()
|
||||||
|
assert mock.called
|
||||||
assert "local" in controller.computes
|
assert "local" in controller.computes
|
||||||
assert "vm" in controller.computes
|
assert "vm" in controller.computes
|
||||||
assert len(controller.computes) == 2 # Local compute and vm are created
|
assert len(controller.computes) == 2 # Local compute and vm are created
|
||||||
@ -356,7 +358,7 @@ async def test_install_base_configs(controller, config, tmpdir):
|
|||||||
with open(str(tmpdir / 'iou_l2_base_startup-config.txt'), 'w+') as f:
|
with open(str(tmpdir / 'iou_l2_base_startup-config.txt'), 'w+') as f:
|
||||||
f.write('test')
|
f.write('test')
|
||||||
|
|
||||||
controller._install_base_configs()
|
await controller._install_base_configs()
|
||||||
assert os.path.exists(str(tmpdir / 'iou_l3_base_startup-config.txt'))
|
assert os.path.exists(str(tmpdir / 'iou_l3_base_startup-config.txt'))
|
||||||
|
|
||||||
# Check is the file has not been overwritten
|
# Check is the file has not been overwritten
|
||||||
@ -385,12 +387,13 @@ async def test_install_base_configs(controller, config, tmpdir):
|
|||||||
async def test_install_builtin_disks(controller, config, tmpdir, builtin_disk):
|
async def test_install_builtin_disks(controller, config, tmpdir, builtin_disk):
|
||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir)
|
config.settings.Server.images_path = str(tmpdir)
|
||||||
controller._install_builtin_disks()
|
await controller._install_builtin_disks()
|
||||||
# we only install Qemu empty disks at this time
|
# we only install Qemu empty disks at this time
|
||||||
assert os.path.exists(str(tmpdir / "QEMU" / builtin_disk))
|
assert os.path.exists(str(tmpdir / "QEMU" / builtin_disk))
|
||||||
|
|
||||||
|
|
||||||
def test_appliances(controller, config, tmpdir):
|
@pytest.mark.asyncio
|
||||||
|
async def test_appliances(controller, config, tmpdir):
|
||||||
|
|
||||||
my_appliance = {
|
my_appliance = {
|
||||||
"name": "My Appliance",
|
"name": "My Appliance",
|
||||||
@ -406,7 +409,7 @@ def test_appliances(controller, config, tmpdir):
|
|||||||
json.dump(my_appliance, f)
|
json.dump(my_appliance, f)
|
||||||
|
|
||||||
config.settings.Server.appliances_path = str(tmpdir)
|
config.settings.Server.appliances_path = str(tmpdir)
|
||||||
controller.appliance_manager.install_builtin_appliances()
|
await controller.appliance_manager.install_builtin_appliances()
|
||||||
controller.appliance_manager.load_appliances()
|
controller.appliance_manager.load_appliances()
|
||||||
assert len(controller.appliance_manager.appliances) > 0
|
assert len(controller.appliance_manager.appliances) > 0
|
||||||
for appliance in controller.appliance_manager.appliances.values():
|
for appliance in controller.appliance_manager.appliances.values():
|
||||||
|
BIN
tests/resources/empty100G.qcow2
Normal file
BIN
tests/resources/empty100G.qcow2
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user