mirror of
https://github.com/GNS3/gns3-server
synced 2025-02-13 08:31:59 +00:00
Merge remote-tracking branch 'origin/3.0' into gh-pages
This commit is contained in:
commit
e8107de550
70
.github/workflows/codeql-analysis.yml
vendored
Normal file
70
.github/workflows/codeql-analysis.yml
vendored
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
# For most projects, this workflow file will not need changing; you simply need
|
||||||
|
# to commit it to your repository.
|
||||||
|
#
|
||||||
|
# You may wish to alter this file to override the set of languages analyzed,
|
||||||
|
# or to provide custom queries or build logic.
|
||||||
|
#
|
||||||
|
# ******** NOTE ********
|
||||||
|
# We have attempted to detect the languages in your repository. Please check
|
||||||
|
# the `language` matrix defined below to confirm you have the correct set of
|
||||||
|
# supported CodeQL languages.
|
||||||
|
#
|
||||||
|
name: "CodeQL"
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: [ master ]
|
||||||
|
pull_request:
|
||||||
|
# The branches below must be a subset of the branches above
|
||||||
|
branches: [ master ]
|
||||||
|
schedule:
|
||||||
|
- cron: '44 1 * * 3'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
analyze:
|
||||||
|
name: Analyze
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
actions: read
|
||||||
|
contents: read
|
||||||
|
security-events: write
|
||||||
|
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
language: [ 'python' ]
|
||||||
|
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ]
|
||||||
|
# Learn more about CodeQL language support at https://git.io/codeql-language-support
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
# Initializes the CodeQL tools for scanning.
|
||||||
|
- name: Initialize CodeQL
|
||||||
|
uses: github/codeql-action/init@v1
|
||||||
|
with:
|
||||||
|
languages: ${{ matrix.language }}
|
||||||
|
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||||
|
# By default, queries listed here will override any specified in a config file.
|
||||||
|
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||||
|
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||||
|
|
||||||
|
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||||
|
# If this step fails, then you should remove it and run the build manually (see below)
|
||||||
|
- name: Autobuild
|
||||||
|
uses: github/codeql-action/autobuild@v1
|
||||||
|
|
||||||
|
# ℹ️ Command-line programs to run using the OS shell.
|
||||||
|
# 📚 https://git.io/JvXDl
|
||||||
|
|
||||||
|
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||||
|
# and modify them (or add more) to build your code if your project
|
||||||
|
# uses a compiled language
|
||||||
|
|
||||||
|
#- run: |
|
||||||
|
# make bootstrap
|
||||||
|
# make release
|
||||||
|
|
||||||
|
- name: Perform CodeQL Analysis
|
||||||
|
uses: github/codeql-action/analyze@v1
|
@ -18,7 +18,7 @@ jobs:
|
|||||||
ref: "gh-pages"
|
ref: "gh-pages"
|
||||||
- uses: actions/setup-python@v2
|
- uses: actions/setup-python@v2
|
||||||
with:
|
with:
|
||||||
python-version: 3.6
|
python-version: 3.7
|
||||||
- name: Merge changes from 3.0 branch
|
- name: Merge changes from 3.0 branch
|
||||||
run: |
|
run: |
|
||||||
git config user.name github-actions
|
git config user.name github-actions
|
||||||
|
12
CHANGELOG
12
CHANGELOG
@ -1,5 +1,17 @@
|
|||||||
# Change Log
|
# Change Log
|
||||||
|
|
||||||
|
## 2.2.32 27/04/2022
|
||||||
|
|
||||||
|
* Docker: load custom interface files from /etc/network/interfaces (commented by default). Ref #2052
|
||||||
|
* Release web UI 2.2.32
|
||||||
|
* Create `/etc/network/interfaces.d` in Docker container. Fixes #2052
|
||||||
|
* Prettify Docker '/etc/network/interfaces' file. Ref #2040
|
||||||
|
* Use public DSNs for Sentry
|
||||||
|
* Fix VMware Fusion VM does not start on macOS >= 11. Fixes #2027
|
||||||
|
* Include conf file in MANIFEST.in Ref #2044
|
||||||
|
* Use Python 3.7 to publish API documentation
|
||||||
|
* Development on 2.2.32dev1
|
||||||
|
|
||||||
## 2.2.31 26/02/2022
|
## 2.2.31 26/02/2022
|
||||||
|
|
||||||
* Install setuptools v59.6.0 when using Python 3.6
|
* Install setuptools v59.6.0 when using Python 3.6
|
||||||
|
@ -3,6 +3,7 @@ include AUTHORS
|
|||||||
include LICENSE
|
include LICENSE
|
||||||
include MANIFEST.in
|
include MANIFEST.in
|
||||||
include requirements.txt
|
include requirements.txt
|
||||||
|
include conf/*.conf
|
||||||
recursive-include tests *
|
recursive-include tests *
|
||||||
recursive-exclude docs *
|
recursive-exclude docs *
|
||||||
recursive-include gns3server *
|
recursive-include gns3server *
|
||||||
|
17
SECURITY.md
Normal file
17
SECURITY.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Security Policy
|
||||||
|
|
||||||
|
## Supported Versions
|
||||||
|
|
||||||
|
These are the versions of the GNS3 server that are
|
||||||
|
currently being supported with security updates.
|
||||||
|
|
||||||
|
| Version | Supported |
|
||||||
|
| ------- | ------------------ |
|
||||||
|
| 3.x.x | :white_check_mark: |
|
||||||
|
| 2.2.x | :white_check_mark: |
|
||||||
|
| 2.1.x | :x: |
|
||||||
|
| 1.x.x | :x: |
|
||||||
|
|
||||||
|
## Reporting a Vulnerability
|
||||||
|
|
||||||
|
Please contact us at security@gns3.net
|
@ -51,6 +51,11 @@ from . import virtualbox_nodes
|
|||||||
from . import vmware_nodes
|
from . import vmware_nodes
|
||||||
from . import vpcs_nodes
|
from . import vpcs_nodes
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
compute_api = FastAPI(
|
compute_api = FastAPI(
|
||||||
title="GNS3 compute API",
|
title="GNS3 compute API",
|
||||||
dependencies=[Depends(compute_authentication)],
|
dependencies=[Depends(compute_authentication)],
|
||||||
@ -63,6 +68,7 @@ compute_api.state.controller_host = None
|
|||||||
|
|
||||||
@compute_api.exception_handler(ComputeError)
|
@compute_api.exception_handler(ComputeError)
|
||||||
async def controller_error_handler(request: Request, exc: ComputeError):
|
async def controller_error_handler(request: Request, exc: ComputeError):
|
||||||
|
log.error(f"Compute error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=409,
|
status_code=409,
|
||||||
content={"message": str(exc)},
|
content={"message": str(exc)},
|
||||||
@ -71,6 +77,7 @@ async def controller_error_handler(request: Request, exc: ComputeError):
|
|||||||
|
|
||||||
@compute_api.exception_handler(ComputeTimeoutError)
|
@compute_api.exception_handler(ComputeTimeoutError)
|
||||||
async def controller_timeout_error_handler(request: Request, exc: ComputeTimeoutError):
|
async def controller_timeout_error_handler(request: Request, exc: ComputeTimeoutError):
|
||||||
|
log.error(f"Compute timeout error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=408,
|
status_code=408,
|
||||||
content={"message": str(exc)},
|
content={"message": str(exc)},
|
||||||
@ -79,6 +86,7 @@ async def controller_timeout_error_handler(request: Request, exc: ComputeTimeout
|
|||||||
|
|
||||||
@compute_api.exception_handler(ComputeUnauthorizedError)
|
@compute_api.exception_handler(ComputeUnauthorizedError)
|
||||||
async def controller_unauthorized_error_handler(request: Request, exc: ComputeUnauthorizedError):
|
async def controller_unauthorized_error_handler(request: Request, exc: ComputeUnauthorizedError):
|
||||||
|
log.error(f"Compute unauthorized error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=401,
|
status_code=401,
|
||||||
content={"message": str(exc)},
|
content={"message": str(exc)},
|
||||||
@ -87,6 +95,7 @@ async def controller_unauthorized_error_handler(request: Request, exc: ComputeUn
|
|||||||
|
|
||||||
@compute_api.exception_handler(ComputeForbiddenError)
|
@compute_api.exception_handler(ComputeForbiddenError)
|
||||||
async def controller_forbidden_error_handler(request: Request, exc: ComputeForbiddenError):
|
async def controller_forbidden_error_handler(request: Request, exc: ComputeForbiddenError):
|
||||||
|
log.error(f"Compute forbidden error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=403,
|
status_code=403,
|
||||||
content={"message": str(exc)},
|
content={"message": str(exc)},
|
||||||
@ -95,6 +104,7 @@ async def controller_forbidden_error_handler(request: Request, exc: ComputeForbi
|
|||||||
|
|
||||||
@compute_api.exception_handler(ComputeNotFoundError)
|
@compute_api.exception_handler(ComputeNotFoundError)
|
||||||
async def controller_not_found_error_handler(request: Request, exc: ComputeNotFoundError):
|
async def controller_not_found_error_handler(request: Request, exc: ComputeNotFoundError):
|
||||||
|
log.error(f"Compute not found error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=404,
|
status_code=404,
|
||||||
content={"message": str(exc)},
|
content={"message": str(exc)},
|
||||||
@ -103,6 +113,7 @@ async def controller_not_found_error_handler(request: Request, exc: ComputeNotFo
|
|||||||
|
|
||||||
@compute_api.exception_handler(GNS3VMError)
|
@compute_api.exception_handler(GNS3VMError)
|
||||||
async def controller_error_handler(request: Request, exc: GNS3VMError):
|
async def controller_error_handler(request: Request, exc: GNS3VMError):
|
||||||
|
log.error(f"Compute GNS3 VM error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=409,
|
status_code=409,
|
||||||
content={"message": str(exc)},
|
content={"message": str(exc)},
|
||||||
@ -111,6 +122,7 @@ async def controller_error_handler(request: Request, exc: GNS3VMError):
|
|||||||
|
|
||||||
@compute_api.exception_handler(ImageMissingError)
|
@compute_api.exception_handler(ImageMissingError)
|
||||||
async def image_missing_error_handler(request: Request, exc: ImageMissingError):
|
async def image_missing_error_handler(request: Request, exc: ImageMissingError):
|
||||||
|
log.error(f"Compute image missing error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=409,
|
status_code=409,
|
||||||
content={"message": str(exc), "image": exc.image, "exception": exc.__class__.__name__},
|
content={"message": str(exc), "image": exc.image, "exception": exc.__class__.__name__},
|
||||||
@ -119,6 +131,7 @@ async def image_missing_error_handler(request: Request, exc: ImageMissingError):
|
|||||||
|
|
||||||
@compute_api.exception_handler(NodeError)
|
@compute_api.exception_handler(NodeError)
|
||||||
async def node_error_handler(request: Request, exc: NodeError):
|
async def node_error_handler(request: Request, exc: NodeError):
|
||||||
|
log.error(f"Compute node error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=409,
|
status_code=409,
|
||||||
content={"message": str(exc), "exception": exc.__class__.__name__},
|
content={"message": str(exc), "exception": exc.__class__.__name__},
|
||||||
@ -127,6 +140,7 @@ async def node_error_handler(request: Request, exc: NodeError):
|
|||||||
|
|
||||||
@compute_api.exception_handler(UbridgeError)
|
@compute_api.exception_handler(UbridgeError)
|
||||||
async def ubridge_error_handler(request: Request, exc: UbridgeError):
|
async def ubridge_error_handler(request: Request, exc: UbridgeError):
|
||||||
|
log.error(f"Compute uBridge error: {exc}")
|
||||||
return JSONResponse(
|
return JSONResponse(
|
||||||
status_code=409,
|
status_code=409,
|
||||||
content={"message": str(exc), "exception": exc.__class__.__name__},
|
content={"message": str(exc), "exception": exc.__class__.__name__},
|
||||||
|
@ -25,7 +25,7 @@ import psutil
|
|||||||
from gns3server.config import Config
|
from gns3server.config import Config
|
||||||
from gns3server.utils.cpu_percent import CpuPercent
|
from gns3server.utils.cpu_percent import CpuPercent
|
||||||
from gns3server.version import __version__
|
from gns3server.version import __version__
|
||||||
from gns3server.utils.path import get_default_project_directory
|
from gns3server.utils.path import get_default_project_directory, is_safe_path
|
||||||
from gns3server.compute.port_manager import PortManager
|
from gns3server.compute.port_manager import PortManager
|
||||||
from gns3server.compute.project_manager import ProjectManager
|
from gns3server.compute.project_manager import ProjectManager
|
||||||
from gns3server.utils.interfaces import interfaces
|
from gns3server.utils.interfaces import interfaces
|
||||||
@ -81,8 +81,7 @@ def compute_version() -> dict:
|
|||||||
Retrieve the server version number.
|
Retrieve the server version number.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
local_server = Config.instance().settings.Server.local
|
return {"version": __version__}
|
||||||
return {"version": __version__, "local": local_server}
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/statistics")
|
@router.get("/statistics")
|
||||||
@ -145,47 +144,6 @@ async def get_qemu_capabilities() -> dict:
|
|||||||
return capabilities
|
return capabilities
|
||||||
|
|
||||||
|
|
||||||
@router.post(
|
|
||||||
"/qemu/img",
|
|
||||||
status_code=status.HTTP_204_NO_CONTENT,
|
|
||||||
responses={403: {"model": schemas.ErrorMessage, "description": "Forbidden to create Qemu image"}},
|
|
||||||
)
|
|
||||||
async def create_qemu_image(image_data: schemas.QemuImageCreate) -> Response:
|
|
||||||
"""
|
|
||||||
Create a Qemu image.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if os.path.isabs(image_data.path):
|
|
||||||
if Config.instance().settings.Server.local is False:
|
|
||||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
|
|
||||||
|
|
||||||
await Qemu.instance().create_disk(
|
|
||||||
image_data.qemu_img, image_data.path, jsonable_encoder(image_data, exclude_unset=True)
|
|
||||||
)
|
|
||||||
|
|
||||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
|
||||||
|
|
||||||
|
|
||||||
@router.put(
|
|
||||||
"/qemu/img",
|
|
||||||
status_code=status.HTTP_204_NO_CONTENT,
|
|
||||||
responses={403: {"model": schemas.ErrorMessage, "description": "Forbidden to update Qemu image"}},
|
|
||||||
)
|
|
||||||
async def update_qemu_image(image_data: schemas.QemuImageUpdate) -> Response:
|
|
||||||
"""
|
|
||||||
Update a Qemu image.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if os.path.isabs(image_data.path):
|
|
||||||
if Config.instance().settings.Server.local is False:
|
|
||||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN)
|
|
||||||
|
|
||||||
if image_data.extend:
|
|
||||||
await Qemu.instance().resize_disk(image_data.qemu_img, image_data.path, image_data.extend)
|
|
||||||
|
|
||||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
|
||||||
|
|
||||||
|
|
||||||
@router.get("/virtualbox/vms", response_model=List[dict])
|
@router.get("/virtualbox/vms", response_model=List[dict])
|
||||||
async def get_virtualbox_vms() -> List[dict]:
|
async def get_virtualbox_vms() -> List[dict]:
|
||||||
|
|
||||||
|
@ -26,10 +26,10 @@ from fastapi.responses import StreamingResponse
|
|||||||
from uuid import UUID
|
from uuid import UUID
|
||||||
|
|
||||||
from gns3server import schemas
|
from gns3server import schemas
|
||||||
from gns3server.compute.project_manager import ProjectManager
|
|
||||||
from gns3server.compute.qemu import Qemu
|
from gns3server.compute.qemu import Qemu
|
||||||
from gns3server.compute.qemu.qemu_vm import QemuVM
|
from gns3server.compute.qemu.qemu_vm import QemuVM
|
||||||
|
|
||||||
|
|
||||||
responses = {404: {"model": schemas.ErrorMessage, "description": "Could not find project or Qemu node"}}
|
responses = {404: {"model": schemas.ErrorMessage, "description": "Could not find project or Qemu node"}}
|
||||||
|
|
||||||
router = APIRouter(responses=responses)
|
router = APIRouter(responses=responses)
|
||||||
@ -126,10 +126,55 @@ async def duplicate_qemu_node(
|
|||||||
return new_node.asdict()
|
return new_node.asdict()
|
||||||
|
|
||||||
|
|
||||||
@router.post("/{node_id}/resize_disk", status_code=status.HTTP_204_NO_CONTENT)
|
@router.post(
|
||||||
async def resize_qemu_node_disk(node_data: schemas.QemuDiskResize, node: QemuVM = Depends(dep_node)) -> Response:
|
"/{node_id}/disk_image/{disk_name}",
|
||||||
|
status_code=status.HTTP_204_NO_CONTENT
|
||||||
|
)
|
||||||
|
async def create_qemu_disk_image(
|
||||||
|
disk_name: str,
|
||||||
|
disk_data: schemas.QemuDiskImageCreate,
|
||||||
|
node: QemuVM = Depends(dep_node)
|
||||||
|
) -> Response:
|
||||||
|
"""
|
||||||
|
Create a Qemu disk image.
|
||||||
|
"""
|
||||||
|
|
||||||
await node.resize_disk(node_data.drive_name, node_data.extend)
|
options = jsonable_encoder(disk_data, exclude_unset=True)
|
||||||
|
await node.create_disk_image(disk_name, options)
|
||||||
|
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
|
@router.put(
|
||||||
|
"/{node_id}/disk_image/{disk_name}",
|
||||||
|
status_code=status.HTTP_204_NO_CONTENT
|
||||||
|
)
|
||||||
|
async def update_qemu_disk_image(
|
||||||
|
disk_name: str,
|
||||||
|
disk_data: schemas.QemuDiskImageUpdate,
|
||||||
|
node: QemuVM = Depends(dep_node)
|
||||||
|
) -> Response:
|
||||||
|
"""
|
||||||
|
Update a Qemu disk image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if disk_data.extend:
|
||||||
|
await node.resize_disk_image(disk_name, disk_data.extend)
|
||||||
|
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete(
|
||||||
|
"/{node_id}/disk_image/{disk_name}",
|
||||||
|
status_code=status.HTTP_204_NO_CONTENT
|
||||||
|
)
|
||||||
|
async def delete_qemu_disk_image(
|
||||||
|
disk_name: str,
|
||||||
|
node: QemuVM = Depends(dep_node)
|
||||||
|
) -> Response:
|
||||||
|
"""
|
||||||
|
Delete a Qemu disk image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
node.delete_disk_image(disk_name)
|
||||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
|
@ -52,12 +52,13 @@ router = APIRouter()
|
|||||||
@router.get("", response_model=List[schemas.Image])
|
@router.get("", response_model=List[schemas.Image])
|
||||||
async def get_images(
|
async def get_images(
|
||||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||||
|
image_type: Optional[schemas.ImageType] = None
|
||||||
) -> List[schemas.Image]:
|
) -> List[schemas.Image]:
|
||||||
"""
|
"""
|
||||||
Return all images.
|
Return all images.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
return await images_repo.get_images()
|
return await images_repo.get_images(image_type)
|
||||||
|
|
||||||
|
|
||||||
@router.post("/upload/{image_path:path}", response_model=schemas.Image, status_code=status.HTTP_201_CREATED)
|
@router.post("/upload/{image_path:path}", response_model=schemas.Image, status_code=status.HTTP_201_CREATED)
|
||||||
@ -85,7 +86,6 @@ async def upload_image(
|
|||||||
if os.path.commonprefix([base_images_directory, full_path]) != base_images_directory:
|
if os.path.commonprefix([base_images_directory, full_path]) != base_images_directory:
|
||||||
raise ControllerForbiddenError(f"Cannot write image, '{image_path}' is forbidden")
|
raise ControllerForbiddenError(f"Cannot write image, '{image_path}' is forbidden")
|
||||||
|
|
||||||
print(image_path)
|
|
||||||
if await images_repo.get_image(image_path):
|
if await images_repo.get_image(image_path):
|
||||||
raise ControllerBadRequestError(f"Image '{image_path}' already exists")
|
raise ControllerBadRequestError(f"Image '{image_path}' already exists")
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ from gns3server.controller.node import Node
|
|||||||
from gns3server.controller.project import Project
|
from gns3server.controller.project import Project
|
||||||
from gns3server.utils import force_unix_path
|
from gns3server.utils import force_unix_path
|
||||||
from gns3server.utils.http_client import HTTPClient
|
from gns3server.utils.http_client import HTTPClient
|
||||||
from gns3server.controller.controller_error import ControllerForbiddenError
|
from gns3server.controller.controller_error import ControllerForbiddenError, ControllerBadRequestError
|
||||||
from gns3server import schemas
|
from gns3server import schemas
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
@ -300,6 +300,8 @@ async def auto_idlepc(node: Node = Depends(dep_node)) -> str:
|
|||||||
Compute an Idle-PC value for a Dynamips node
|
Compute an Idle-PC value for a Dynamips node
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if node.node_type != "dynamips":
|
||||||
|
raise ControllerBadRequestError("Auto Idle-PC is only supported on a Dynamips node")
|
||||||
return await node.dynamips_auto_idlepc()
|
return await node.dynamips_auto_idlepc()
|
||||||
|
|
||||||
|
|
||||||
@ -309,16 +311,55 @@ async def idlepc_proposals(node: Node = Depends(dep_node)) -> List[str]:
|
|||||||
Compute a list of potential idle-pc values for a Dynamips node
|
Compute a list of potential idle-pc values for a Dynamips node
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if node.node_type != "dynamips":
|
||||||
|
raise ControllerBadRequestError("Idle-PC proposals is only supported on a Dynamips node")
|
||||||
return await node.dynamips_idlepc_proposals()
|
return await node.dynamips_idlepc_proposals()
|
||||||
|
|
||||||
|
|
||||||
@router.post("/{node_id}/resize_disk", status_code=status.HTTP_204_NO_CONTENT)
|
@router.post("/{node_id}/qemu/disk_image/{disk_name}", status_code=status.HTTP_204_NO_CONTENT)
|
||||||
async def resize_disk(resize_data: dict, node: Node = Depends(dep_node)) -> Response:
|
async def create_disk_image(
|
||||||
|
disk_name: str,
|
||||||
|
disk_data: schemas.QemuDiskImageCreate,
|
||||||
|
node: Node = Depends(dep_node)
|
||||||
|
) -> Response:
|
||||||
"""
|
"""
|
||||||
Resize a disk image.
|
Create a Qemu disk image.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
await node.post("/resize_disk", **resize_data)
|
if node.node_type != "qemu":
|
||||||
|
raise ControllerBadRequestError("Creating a disk image is only supported on a Qemu node")
|
||||||
|
await node.post(f"/disk_image/{disk_name}", data=disk_data.dict(exclude_unset=True))
|
||||||
|
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
|
@router.put("/{node_id}/qemu/disk_image/{disk_name}", status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
async def update_disk_image(
|
||||||
|
disk_name: str,
|
||||||
|
disk_data: schemas.QemuDiskImageUpdate,
|
||||||
|
node: Node = Depends(dep_node)
|
||||||
|
) -> Response:
|
||||||
|
"""
|
||||||
|
Update a Qemu disk image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if node.node_type != "qemu":
|
||||||
|
raise ControllerBadRequestError("Updating a disk image is only supported on a Qemu node")
|
||||||
|
await node.put(f"/disk_image/{disk_name}", data=disk_data.dict(exclude_unset=True))
|
||||||
|
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
|
@router.delete("/{node_id}/qemu/disk_image/{disk_name}", status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
async def delete_disk_image(
|
||||||
|
disk_name: str,
|
||||||
|
node: Node = Depends(dep_node)
|
||||||
|
) -> Response:
|
||||||
|
"""
|
||||||
|
Delete a Qemu disk image.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if node.node_type != "qemu":
|
||||||
|
raise ControllerBadRequestError("Deleting a disk image is only supported on a Qemu node")
|
||||||
|
await node.delete(f"/disk_image/{disk_name}")
|
||||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,10 +21,10 @@ API routes for projects.
|
|||||||
import os
|
import os
|
||||||
import asyncio
|
import asyncio
|
||||||
import tempfile
|
import tempfile
|
||||||
import zipfile
|
|
||||||
import aiofiles
|
import aiofiles
|
||||||
import time
|
import time
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
import gns3server.utils.zipfile_zstd as zipfile
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -41,7 +41,7 @@ from pathlib import Path
|
|||||||
from gns3server import schemas
|
from gns3server import schemas
|
||||||
from gns3server.controller import Controller
|
from gns3server.controller import Controller
|
||||||
from gns3server.controller.project import Project
|
from gns3server.controller.project import Project
|
||||||
from gns3server.controller.controller_error import ControllerError, ControllerForbiddenError
|
from gns3server.controller.controller_error import ControllerError, ControllerBadRequestError
|
||||||
from gns3server.controller.import_project import import_project as import_controller_project
|
from gns3server.controller.import_project import import_project as import_controller_project
|
||||||
from gns3server.controller.export_project import export_project as export_controller_project
|
from gns3server.controller.export_project import export_project as export_controller_project
|
||||||
from gns3server.utils.asyncio import aiozipstream
|
from gns3server.utils.asyncio import aiozipstream
|
||||||
@ -285,7 +285,8 @@ async def export_project(
|
|||||||
include_snapshots: bool = False,
|
include_snapshots: bool = False,
|
||||||
include_images: bool = False,
|
include_images: bool = False,
|
||||||
reset_mac_addresses: bool = False,
|
reset_mac_addresses: bool = False,
|
||||||
compression: str = "zip",
|
compression: schemas.ProjectCompression = "zstd",
|
||||||
|
compression_level: int = None,
|
||||||
) -> StreamingResponse:
|
) -> StreamingResponse:
|
||||||
"""
|
"""
|
||||||
Export a project as a portable archive.
|
Export a project as a portable archive.
|
||||||
@ -294,12 +295,23 @@ async def export_project(
|
|||||||
compression_query = compression.lower()
|
compression_query = compression.lower()
|
||||||
if compression_query == "zip":
|
if compression_query == "zip":
|
||||||
compression = zipfile.ZIP_DEFLATED
|
compression = zipfile.ZIP_DEFLATED
|
||||||
|
if compression_level is not None and (compression_level < 0 or compression_level > 9):
|
||||||
|
raise ControllerBadRequestError("Compression level must be between 0 and 9 for ZIP compression")
|
||||||
elif compression_query == "none":
|
elif compression_query == "none":
|
||||||
compression = zipfile.ZIP_STORED
|
compression = zipfile.ZIP_STORED
|
||||||
elif compression_query == "bzip2":
|
elif compression_query == "bzip2":
|
||||||
compression = zipfile.ZIP_BZIP2
|
compression = zipfile.ZIP_BZIP2
|
||||||
|
if compression_level is not None and (compression_level < 1 or compression_level > 9):
|
||||||
|
raise ControllerBadRequestError("Compression level must be between 1 and 9 for BZIP2 compression")
|
||||||
elif compression_query == "lzma":
|
elif compression_query == "lzma":
|
||||||
compression = zipfile.ZIP_LZMA
|
compression = zipfile.ZIP_LZMA
|
||||||
|
elif compression_query == "zstd":
|
||||||
|
compression = zipfile.ZIP_ZSTANDARD
|
||||||
|
if compression_level is not None and (compression_level < 1 or compression_level > 22):
|
||||||
|
raise ControllerBadRequestError("Compression level must be between 1 and 22 for Zstandard compression")
|
||||||
|
|
||||||
|
if compression_level is not None and compression_query in ("none", "lzma"):
|
||||||
|
raise ControllerBadRequestError(f"Compression level is not supported for '{compression_query}' compression method")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
begin = time.time()
|
begin = time.time()
|
||||||
@ -307,8 +319,10 @@ async def export_project(
|
|||||||
working_dir = os.path.abspath(os.path.join(project.path, os.pardir))
|
working_dir = os.path.abspath(os.path.join(project.path, os.pardir))
|
||||||
|
|
||||||
async def streamer():
|
async def streamer():
|
||||||
|
log.info(f"Exporting project '{project.name}' with '{compression_query}' compression "
|
||||||
|
f"(level {compression_level})")
|
||||||
with tempfile.TemporaryDirectory(dir=working_dir) as tmpdir:
|
with tempfile.TemporaryDirectory(dir=working_dir) as tmpdir:
|
||||||
with aiozipstream.ZipFile(compression=compression) as zstream:
|
with aiozipstream.ZipFile(compression=compression, compresslevel=compression_level) as zstream:
|
||||||
await export_controller_project(
|
await export_controller_project(
|
||||||
zstream,
|
zstream,
|
||||||
project,
|
project,
|
||||||
@ -342,10 +356,10 @@ async def import_project(
|
|||||||
Import a project from a portable archive.
|
Import a project from a portable archive.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
controller = Controller.instance()
|
#TODO: import project remotely
|
||||||
if Config.instance().settings.Server.local is False:
|
raise NotImplementedError()
|
||||||
raise ControllerForbiddenError("The server is not local")
|
|
||||||
|
|
||||||
|
controller = Controller.instance()
|
||||||
# We write the content to a temporary location and after we extract it all.
|
# We write the content to a temporary location and after we extract it all.
|
||||||
# It could be more optimal to stream this but it is not implemented in Python.
|
# It could be more optimal to stream this but it is not implemented in Python.
|
||||||
try:
|
try:
|
||||||
@ -385,16 +399,9 @@ async def duplicate_project(
|
|||||||
Duplicate a project.
|
Duplicate a project.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if project_data.path:
|
|
||||||
if Config.instance().settings.Server.local is False:
|
|
||||||
raise ControllerForbiddenError("The server is not a local server")
|
|
||||||
location = project_data.path
|
|
||||||
else:
|
|
||||||
location = None
|
|
||||||
|
|
||||||
reset_mac_addresses = project_data.reset_mac_addresses
|
reset_mac_addresses = project_data.reset_mac_addresses
|
||||||
new_project = await project.duplicate(
|
new_project = await project.duplicate(
|
||||||
name=project_data.name, location=location, reset_mac_addresses=reset_mac_addresses
|
name=project_data.name, reset_mac_addresses=reset_mac_addresses
|
||||||
)
|
)
|
||||||
await rbac_repo.add_permission_to_user_with_path(current_user.user_id, f"/projects/{new_project.id}/*")
|
await rbac_repo.add_permission_to_user_with_path(current_user.user_id, f"/projects/{new_project.id}/*")
|
||||||
return new_project.asdict()
|
return new_project.asdict()
|
||||||
@ -423,7 +430,7 @@ async def get_file(file_path: str, project: Project = Depends(dep_project)) -> F
|
|||||||
@router.post("/{project_id}/files/{file_path:path}", status_code=status.HTTP_204_NO_CONTENT)
|
@router.post("/{project_id}/files/{file_path:path}", status_code=status.HTTP_204_NO_CONTENT)
|
||||||
async def write_file(file_path: str, request: Request, project: Project = Depends(dep_project)) -> Response:
|
async def write_file(file_path: str, request: Request, project: Project = Depends(dep_project)) -> Response:
|
||||||
"""
|
"""
|
||||||
Write a file from a project.
|
Write a file to a project.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
file_path = urllib.parse.unquote(file_path)
|
file_path = urllib.parse.unquote(file_path)
|
||||||
|
@ -75,7 +75,7 @@ async def authenticate(
|
|||||||
) -> schemas.Token:
|
) -> schemas.Token:
|
||||||
"""
|
"""
|
||||||
Alternative authentication method using json.
|
Alternative authentication method using json.
|
||||||
Example: curl http://host:port/v3/users/authenticate -d '{"username": "admin", "password": "admin"} -H "Content-Type: application/json" '
|
Example: curl http://host:port/v3/users/authenticate -d '{"username": "admin", "password": "admin"}' -H "Content-Type: application/json"
|
||||||
"""
|
"""
|
||||||
|
|
||||||
user = await users_repo.authenticate_user(username=user_credentials.username, password=user_credentials.password)
|
user = await users_repo.authenticate_user(username=user_credentials.username, password=user_credentials.password)
|
||||||
|
@ -34,6 +34,7 @@ from gns3server.controller.controller_error import (
|
|||||||
ControllerTimeoutError,
|
ControllerTimeoutError,
|
||||||
ControllerForbiddenError,
|
ControllerForbiddenError,
|
||||||
ControllerUnauthorizedError,
|
ControllerUnauthorizedError,
|
||||||
|
ComputeConflictError
|
||||||
)
|
)
|
||||||
|
|
||||||
from gns3server.api.routes import controller, index
|
from gns3server.api.routes import controller, index
|
||||||
@ -138,6 +139,15 @@ async def controller_bad_request_error_handler(request: Request, exc: Controller
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@app.exception_handler(ComputeConflictError)
|
||||||
|
async def compute_conflict_error_handler(request: Request, exc: ComputeConflictError):
|
||||||
|
log.error(f"Controller received error from compute for request '{exc.url()}': {exc}")
|
||||||
|
return JSONResponse(
|
||||||
|
status_code=409,
|
||||||
|
content={"message": str(exc)},
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# make sure the content key is "message", not "detail" per default
|
# make sure the content key is "message", not "detail" per default
|
||||||
@app.exception_handler(HTTPException)
|
@app.exception_handler(HTTPException)
|
||||||
async def http_exception_handler(request: Request, exc: HTTPException):
|
async def http_exception_handler(request: Request, exc: HTTPException):
|
||||||
@ -156,12 +166,14 @@ async def sqlalchemry_error_handler(request: Request, exc: SQLAlchemyError):
|
|||||||
content={"message": "Database error detected, please check logs to find details"},
|
content={"message": "Database error detected, please check logs to find details"},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# FIXME: do not use this middleware since it creates issue when using StreamingResponse
|
||||||
|
# see https://starlette-context.readthedocs.io/en/latest/middleware.html#why-are-there-two-middlewares-that-do-the-same-thing
|
||||||
|
|
||||||
@app.middleware("http")
|
# @app.middleware("http")
|
||||||
async def add_extra_headers(request: Request, call_next):
|
# async def add_extra_headers(request: Request, call_next):
|
||||||
start_time = time.time()
|
# start_time = time.time()
|
||||||
response = await call_next(request)
|
# response = await call_next(request)
|
||||||
process_time = time.time() - start_time
|
# process_time = time.time() - start_time
|
||||||
response.headers["X-Process-Time"] = str(process_time)
|
# response.headers["X-Process-Time"] = str(process_time)
|
||||||
response.headers["X-GNS3-Server-Version"] = f"{__version__}"
|
# response.headers["X-GNS3-Server-Version"] = f"{__version__}"
|
||||||
return response
|
# return response
|
||||||
|
@ -30,6 +30,13 @@
|
|||||||
"process_priority": "normal"
|
"process_priority": "normal"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "arubaoscx-disk-image-genericx86-p4-20220223012712.vmdk",
|
||||||
|
"version": "10.09.1000",
|
||||||
|
"md5sum": "7b47c4442d825562e73e3f09b2f1f999",
|
||||||
|
"filesize": 556828672,
|
||||||
|
"download_url": "https://asp.arubanetworks.com/"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "arubaoscx-disk-image-genericx86-p4-20211206170615.vmdk",
|
"filename": "arubaoscx-disk-image-genericx86-p4-20211206170615.vmdk",
|
||||||
"version": "10.09.0002",
|
"version": "10.09.0002",
|
||||||
@ -81,6 +88,12 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "10.09.1000",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "arubaoscx-disk-image-genericx86-p4-20220223012712.vmdk"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "10.09.0002",
|
"name": "10.09.0002",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -26,6 +26,13 @@
|
|||||||
"kvm": "require"
|
"kvm": "require"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "asav9-16-2.qcow2",
|
||||||
|
"version": "9.16.2",
|
||||||
|
"md5sum": "c3aa2b73b029146ec345bf888dd54eab",
|
||||||
|
"filesize": 264896512,
|
||||||
|
"download_url": "https://software.cisco.com/download/home/286119613/type/280775065/release/9.16.2"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "asav9-15-1.qcow2",
|
"filename": "asav9-15-1.qcow2",
|
||||||
"version": "9.15.1",
|
"version": "9.15.1",
|
||||||
@ -105,6 +112,12 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "9.16.2",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "asav9-16-2.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "9.15.1",
|
"name": "9.15.1",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -32,6 +32,13 @@
|
|||||||
"download_url": "https://sourceforge.net/projects/gns-3/files",
|
"download_url": "https://sourceforge.net/projects/gns-3/files",
|
||||||
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/IOSv_startup_config.img/download"
|
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/IOSv_startup_config.img/download"
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"filename": "vios-adventerprisek9-m.spa.159-3.m4.qcow2",
|
||||||
|
"version": "15.9(3)M4",
|
||||||
|
"md5sum": "355b13ab821e64e2939fd7008d6304d7",
|
||||||
|
"filesize": 57297920,
|
||||||
|
"download_url": "https://learningnetworkstore.cisco.com/myaccount"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "vios-adventerprisek9-m.spa.159-3.m3.qcow2",
|
"filename": "vios-adventerprisek9-m.spa.159-3.m3.qcow2",
|
||||||
"version": "15.9(3)M3",
|
"version": "15.9(3)M3",
|
||||||
@ -83,6 +90,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "15.9(3)M4",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "vios-adventerprisek9-m.spa.159-3.m4.qcow2",
|
||||||
|
"hdb_disk_image": "IOSv_startup_config.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "15.9(3)M3",
|
"name": "15.9(3)M3",
|
||||||
"images": {
|
"images": {
|
||||||
|
46
gns3server/appliances/clavister-netsheild.gns3a
Normal file
46
gns3server/appliances/clavister-netsheild.gns3a
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
{
|
||||||
|
"appliance_id": "39c6b8db-8dc3-4b04-8727-7d0b414be7c8",
|
||||||
|
"name": "Clavister NetShield",
|
||||||
|
"category": "firewall",
|
||||||
|
"description": "Clavister NetShield (cOS Stream) Virtual Appliance offers the same functionality as the Clavister NetShield physical NGappliances FWs in a virtual environment.",
|
||||||
|
"vendor_name": "Clavister",
|
||||||
|
"vendor_url": "https://www.clavister.com/",
|
||||||
|
"documentation_url": "https://kb.clavister.com",
|
||||||
|
"product_name": "NetShield",
|
||||||
|
"product_url": "https://www.clavister.com/products/netshield/",
|
||||||
|
"registry_version": 4,
|
||||||
|
"status": "stable",
|
||||||
|
"availability": "free-to-try",
|
||||||
|
"maintainer": "Mattias Nordlund",
|
||||||
|
"maintainer_email": "mattias.nordlund@clavister.com",
|
||||||
|
"usage": "No configuration by default, oen console to set IPs and activate configuration.",
|
||||||
|
"port_name_format": "if{0}",
|
||||||
|
"qemu": {
|
||||||
|
"adapter_type": "virtio-net-pci",
|
||||||
|
"adapters": 4,
|
||||||
|
"ram": 1024,
|
||||||
|
"hda_disk_interface": "virtio",
|
||||||
|
"arch": "x86_64",
|
||||||
|
"console_type": "telnet",
|
||||||
|
"boot_priority": "c",
|
||||||
|
"kvm": "allow",
|
||||||
|
"options": "-cpu Nehalem"
|
||||||
|
},
|
||||||
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "clavister-cos-stream-3.80.09.01-virtual-x64-generic.qcow2",
|
||||||
|
"version": "cOS Stream 3.80.09",
|
||||||
|
"md5sum": "b57d8e0f1a3cdd4b2c96ffbc7d7c4f05",
|
||||||
|
"filesize": 134217728,
|
||||||
|
"download_url": "https://my.clavister.com/download/c44639bf-b082-ec11-8308-005056956b6b"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"versions": [
|
||||||
|
{
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "clavister-cos-stream-3.80.09.01-virtual-x64-generic.qcow2"
|
||||||
|
},
|
||||||
|
"name": "cOS Stream 3.80.09"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
58
gns3server/appliances/clavister-netwall.gns3a
Normal file
58
gns3server/appliances/clavister-netwall.gns3a
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
{
|
||||||
|
"appliance_id": "68ddb1dc-e55b-4bcc-9c18-27a9eb5e7413",
|
||||||
|
"name": "Clavister NetWall",
|
||||||
|
"category": "firewall",
|
||||||
|
"description": "Clavister NetWall (cOS Core) Virtual Appliance offers the same functionality as the Clavister NetWall physical NGFWs in a virtual environment.",
|
||||||
|
"vendor_name": "Clavister",
|
||||||
|
"vendor_url": "https://www.clavister.com/",
|
||||||
|
"documentation_url": "https://kb.clavister.com",
|
||||||
|
"product_name": "NetWall",
|
||||||
|
"product_url": "https://www.clavister.com/products/ngfw/",
|
||||||
|
"registry_version": 4,
|
||||||
|
"status": "stable",
|
||||||
|
"availability": "free-to-try",
|
||||||
|
"maintainer": "Mattias Nordlund",
|
||||||
|
"maintainer_email": "mattias.nordlund@clavister.com",
|
||||||
|
"usage": "DHCP enabled on all interfaces by default, WebUI/SSH access enabled on the local network connected to If1.",
|
||||||
|
"port_name_format": "If{0}",
|
||||||
|
"qemu": {
|
||||||
|
"adapter_type": "e1000",
|
||||||
|
"adapters": 4,
|
||||||
|
"ram": 512,
|
||||||
|
"hda_disk_interface": "virtio",
|
||||||
|
"arch": "x86_64",
|
||||||
|
"console_type": "telnet",
|
||||||
|
"boot_priority": "c",
|
||||||
|
"kvm": "allow"
|
||||||
|
},
|
||||||
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "clavister-cos-core-14.00.01.13-kvm-en.img",
|
||||||
|
"version": "cOS Core 14.00.01 (x86)",
|
||||||
|
"md5sum": "6c72eb0bb13d191912ca930b72071d07",
|
||||||
|
"filesize": 134217728,
|
||||||
|
"download_url": "https://my.clavister.com/download/ee3ecb2f-7662-ec11-8308-005056956b6b"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "clavister-cos-core-14.00.00.12-kvm-en.img",
|
||||||
|
"version": "cOS Core 14.00.00 (x86)",
|
||||||
|
"md5sum": "496ddd494b226e3508563db837643910",
|
||||||
|
"filesize": 134217728,
|
||||||
|
"download_url": "https://my.clavister.com/download/b2b7bce8-4449-ec11-8308-005056956b6b"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"versions": [
|
||||||
|
{
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "clavister-cos-core-14.00.01.13-kvm-en.img"
|
||||||
|
},
|
||||||
|
"name": "cOS Core 14.00.01 (x86)"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "clavister-cos-core-14.00.00.12-kvm-en.img"
|
||||||
|
},
|
||||||
|
"name": "cOS Core 14.00.00 (x86)"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -24,20 +24,20 @@
|
|||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
{
|
{
|
||||||
"filename": "debian-11-genericcloud-amd64-20211220-862.qcow2",
|
"filename": "debian-11-genericcloud-amd64-20220328-962.qcow2",
|
||||||
"version": "11.2",
|
"version": "11.3",
|
||||||
"md5sum": "3bdc52b0b3622a72095efdd001780a45",
|
"md5sum": "7cf51e23747898485971a656ac2eb96d",
|
||||||
"filesize": 253231104,
|
"filesize": 253296640,
|
||||||
"download_url": "https://cloud.debian.org/images/cloud/bullseye/",
|
"download_url": "https://cloud.debian.org/images/cloud/bullseye/",
|
||||||
"direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20211220-862/debian-11-genericcloud-amd64-20211220-862.qcow2"
|
"direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20220328-962/debian-11-genericcloud-amd64-20220328-962.qcow2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "debian-10-genericcloud-amd64-20211011-792.qcow2",
|
"filename": "debian-10-genericcloud-amd64-20220328-962.qcow2",
|
||||||
"version": "10.11",
|
"version": "10.12",
|
||||||
"md5sum": "ea4de19b17d114b6db813ee64a6b8284",
|
"md5sum": "e92dfa1fc779fff807856f6ea6876e42",
|
||||||
"filesize": 232980480,
|
"filesize": 232980480,
|
||||||
"download_url": "https://cloud.debian.org/images/cloud/buster/",
|
"download_url": "https://cloud.debian.org/images/cloud/buster/",
|
||||||
"direct_download_url": "https://cloud.debian.org/images/cloud/buster/20211011-792/debian-10-genericcloud-amd64-20211011-792.qcow2"
|
"direct_download_url": "https://cloud.debian.org/images/cloud/buster/20220328-962/debian-10-genericcloud-amd64-20220328-962.qcow2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"filename": "debian-cloud-init-data.iso",
|
"filename": "debian-cloud-init-data.iso",
|
||||||
@ -49,16 +49,16 @@
|
|||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
{
|
{
|
||||||
"name": "11.2",
|
"name": "11.3",
|
||||||
"images": {
|
"images": {
|
||||||
"hda_disk_image": "debian-11-genericcloud-amd64-20211220-862.qcow2",
|
"hda_disk_image": "debian-11-genericcloud-amd64-20220328-962.qcow2",
|
||||||
"cdrom_image": "debian-cloud-init-data.iso"
|
"cdrom_image": "debian-cloud-init-data.iso"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"name": "10.11",
|
"name": "10.12",
|
||||||
"images": {
|
"images": {
|
||||||
"hda_disk_image": "debian-10-genericcloud-amd64-20211011-792.qcow2",
|
"hda_disk_image": "debian-10-genericcloud-amd64-20220328-962.qcow2",
|
||||||
"cdrom_image": "debian-cloud-init-data.iso"
|
"cdrom_image": "debian-cloud-init-data.iso"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,14 @@
|
|||||||
"kvm": "allow"
|
"kvm": "allow"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "frr-8.1.0.qcow2",
|
||||||
|
"version": "8.1.0",
|
||||||
|
"md5sum": "836d6a207f63f99a4039378f2b0c6123",
|
||||||
|
"filesize": 54063616,
|
||||||
|
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
|
||||||
|
"direct_download_url": "http://downloads.sourceforge.net/project/gns-3/Qemu%20Appliances/frr-8.1.0.qcow2"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "frr-7.5.1.qcow2",
|
"filename": "frr-7.5.1.qcow2",
|
||||||
"version": "7.5.1",
|
"version": "7.5.1",
|
||||||
@ -40,6 +48,12 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "8.1.0",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "frr-8.1.0.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "7.5.1",
|
"name": "7.5.1",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -25,6 +25,13 @@
|
|||||||
"kvm": "require"
|
"kvm": "require"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "OPNsense-22.1.2-OpenSSL-nano-amd64.img",
|
||||||
|
"version": "22.1.2",
|
||||||
|
"md5sum": "3109030a214301b89a47eb9466e8b656",
|
||||||
|
"filesize": 3221225472,
|
||||||
|
"download_url": "https://opnsense.c0urier.net/releases/22.1/"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "OPNsense-21.7.1-OpenSSL-nano-amd64.img",
|
"filename": "OPNsense-21.7.1-OpenSSL-nano-amd64.img",
|
||||||
"version": "21.7.1",
|
"version": "21.7.1",
|
||||||
@ -48,6 +55,12 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "22.1.2",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "OPNsense-22.1.2-OpenSSL-nano-amd64.img"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "21.7.1",
|
"name": "21.7.1",
|
||||||
"images": {
|
"images": {
|
||||||
|
18
gns3server/appliances/ovs-snmp.gns3a
Normal file
18
gns3server/appliances/ovs-snmp.gns3a
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
{
|
||||||
|
"appliance_id": "bfafb392-bb2b-4078-8817-29c55273fff6",
|
||||||
|
"name": "Open vSwitch with SNMP",
|
||||||
|
"category": "multilayer_switch",
|
||||||
|
"description": "Customized Open vSwtich with SNMP support",
|
||||||
|
"vendor_name": "Open vSwitch",
|
||||||
|
"vendor_url": "http://openvswitch.org/",
|
||||||
|
"product_name": "Open vSwitch",
|
||||||
|
"registry_version": 3,
|
||||||
|
"status": "stable",
|
||||||
|
"maintainer": "GNS3 Team",
|
||||||
|
"maintainer_email": "developers@gns3.net",
|
||||||
|
"usage": "",
|
||||||
|
"docker": {
|
||||||
|
"adapters": 8,
|
||||||
|
"image": "gns3/ovs-snmp:latest"
|
||||||
|
}
|
||||||
|
}
|
@ -24,6 +24,13 @@
|
|||||||
"process_priority": "normal"
|
"process_priority": "normal"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "pfSense-CE-2.6.0-RELEASE-amd64.iso",
|
||||||
|
"version": "2.6.0",
|
||||||
|
"md5sum": "5ca6d4cb89977022d2e76c9158eeeb67",
|
||||||
|
"filesize": 767463424,
|
||||||
|
"download_url": "https://www.pfsense.org/download/mirror.php?section=downloads"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "pfSense-CE-2.5.2-RELEASE-amd64.iso",
|
"filename": "pfSense-CE-2.5.2-RELEASE-amd64.iso",
|
||||||
"version": "2.5.2",
|
"version": "2.5.2",
|
||||||
@ -62,6 +69,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "2.6.0",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "empty100G.qcow2",
|
||||||
|
"cdrom_image": "pfSense-CE-2.6.0-RELEASE-amd64.iso"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "2.5.2",
|
"name": "2.5.2",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -24,6 +24,20 @@
|
|||||||
"kvm": "require"
|
"kvm": "require"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "VI-18.5.2_MR-2.KVM-380-PRIMARY.qcow2",
|
||||||
|
"version": "18.5.2 MR2",
|
||||||
|
"md5sum": "d3b99cd9519fae06e4ef348af34fef2b",
|
||||||
|
"filesize": 458555392,
|
||||||
|
"download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"filename": "VI-18.5.2_MR-2.KVM-380-AUXILIARY.qcow2",
|
||||||
|
"version": "18.5.2 MR2",
|
||||||
|
"md5sum": "9cf2ebc15c92f712e28f8e45a29ee613",
|
||||||
|
"filesize": 11272192,
|
||||||
|
"download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "VI-17.1.3_MR-3.KVM-250-PRIMARY.qcow2",
|
"filename": "VI-17.1.3_MR-3.KVM-250-PRIMARY.qcow2",
|
||||||
"version": "17.1.3 MR3",
|
"version": "17.1.3 MR3",
|
||||||
@ -124,6 +138,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "18.5.2 MR2",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "VI-18.5.2_MR-2.KVM-380-PRIMARY.qcow2",
|
||||||
|
"hdb_disk_image": "VI-18.5.2_MR-2.KVM-380-AUXILIARY.qcow2"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "17.1.3 MR3",
|
"name": "17.1.3 MR3",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -26,6 +26,13 @@
|
|||||||
"kvm": "allow"
|
"kvm": "allow"
|
||||||
},
|
},
|
||||||
"images": [
|
"images": [
|
||||||
|
{
|
||||||
|
"filename": "vyos-1.3.1-amd64.iso",
|
||||||
|
"version": "1.3.1",
|
||||||
|
"md5sum": "b6f57bd0cf9b60cdafa337b08ba4f2bc",
|
||||||
|
"filesize": 350224384,
|
||||||
|
"download_url": "https://support.vyos.io/en/downloads/files/vyos-1-3-1-generic-iso-image"
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"filename": "vyos-1.3.0-amd64.iso",
|
"filename": "vyos-1.3.0-amd64.iso",
|
||||||
"version": "1.3.0",
|
"version": "1.3.0",
|
||||||
@ -73,6 +80,13 @@
|
|||||||
}
|
}
|
||||||
],
|
],
|
||||||
"versions": [
|
"versions": [
|
||||||
|
{
|
||||||
|
"name": "1.3.1",
|
||||||
|
"images": {
|
||||||
|
"hda_disk_image": "empty8G.qcow2",
|
||||||
|
"cdrom_image": "vyos-1.3.1-amd64.iso"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "1.3.0",
|
"name": "1.3.0",
|
||||||
"images": {
|
"images": {
|
||||||
|
@ -442,14 +442,6 @@ class BaseManager:
|
|||||||
return path
|
return path
|
||||||
raise ImageMissingError(orig_path)
|
raise ImageMissingError(orig_path)
|
||||||
|
|
||||||
# For local server we allow using absolute path outside image directory
|
|
||||||
if Config.instance().settings.Server.local is True:
|
|
||||||
log.debug(f"Searching for '{orig_path}'")
|
|
||||||
path = force_unix_path(path)
|
|
||||||
if os.path.exists(path):
|
|
||||||
return path
|
|
||||||
raise ImageMissingError(orig_path)
|
|
||||||
|
|
||||||
# Check to see if path is an absolute path to a valid directory
|
# Check to see if path is an absolute path to a valid directory
|
||||||
path = force_unix_path(path)
|
path = force_unix_path(path)
|
||||||
for directory in valid_directory_prefices:
|
for directory in valid_directory_prefices:
|
||||||
@ -514,7 +506,7 @@ class BaseManager:
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return list_images(self._NODE_TYPE)
|
return await list_images(self._NODE_TYPE)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise ComputeError(f"Can not list images {e}")
|
raise ComputeError(f"Can not list images {e}")
|
||||||
|
|
||||||
|
@ -333,16 +333,17 @@ class DockerVM(BaseNode):
|
|||||||
os.makedirs(os.path.join(path, "if-down.d"), exist_ok=True)
|
os.makedirs(os.path.join(path, "if-down.d"), exist_ok=True)
|
||||||
os.makedirs(os.path.join(path, "if-pre-up.d"), exist_ok=True)
|
os.makedirs(os.path.join(path, "if-pre-up.d"), exist_ok=True)
|
||||||
os.makedirs(os.path.join(path, "if-post-down.d"), exist_ok=True)
|
os.makedirs(os.path.join(path, "if-post-down.d"), exist_ok=True)
|
||||||
|
os.makedirs(os.path.join(path, "interfaces.d"), exist_ok=True)
|
||||||
|
|
||||||
if not os.path.exists(os.path.join(path, "interfaces")):
|
if not os.path.exists(os.path.join(path, "interfaces")):
|
||||||
with open(os.path.join(path, "interfaces"), "w+") as f:
|
with open(os.path.join(path, "interfaces"), "w+") as f:
|
||||||
f.write(
|
f.write("""#
|
||||||
"""#
|
# This is a sample network config, please uncomment lines to configure the network
|
||||||
# This is a sample network config uncomment lines to configure the network
|
|
||||||
#
|
#
|
||||||
|
|
||||||
"""
|
# Uncomment this line to load custom interface files
|
||||||
)
|
# source /etc/network/interfaces.d/*
|
||||||
|
""")
|
||||||
for adapter in range(0, self.adapters):
|
for adapter in range(0, self.adapters):
|
||||||
f.write(
|
f.write(
|
||||||
"""
|
"""
|
||||||
@ -355,11 +356,9 @@ class DockerVM(BaseNode):
|
|||||||
#\tup echo nameserver 192.168.{adapter}.1 > /etc/resolv.conf
|
#\tup echo nameserver 192.168.{adapter}.1 > /etc/resolv.conf
|
||||||
|
|
||||||
# DHCP config for eth{adapter}
|
# DHCP config for eth{adapter}
|
||||||
# auto eth{adapter}
|
#auto eth{adapter}
|
||||||
# iface eth{adapter} inet dhcp""".format(
|
#iface eth{adapter} inet dhcp
|
||||||
adapter=adapter
|
""".format(adapter=adapter))
|
||||||
)
|
|
||||||
)
|
|
||||||
return path
|
return path
|
||||||
|
|
||||||
async def create(self):
|
async def create(self):
|
||||||
|
@ -163,7 +163,7 @@ class Router(BaseNode):
|
|||||||
"dynamips_id": self._dynamips_id,
|
"dynamips_id": self._dynamips_id,
|
||||||
"platform": self._platform,
|
"platform": self._platform,
|
||||||
"image": self._image,
|
"image": self._image,
|
||||||
"image_md5sum": md5sum(self._image),
|
"image_md5sum": md5sum(self._image, self._working_directory),
|
||||||
"ram": self._ram,
|
"ram": self._ram,
|
||||||
"nvram": self._nvram,
|
"nvram": self._nvram,
|
||||||
"mmap": self._mmap,
|
"mmap": self._mmap,
|
||||||
|
@ -231,7 +231,7 @@ class IOUVM(BaseNode):
|
|||||||
"status": self.status,
|
"status": self.status,
|
||||||
"project_id": self.project.id,
|
"project_id": self.project.id,
|
||||||
"path": self.path,
|
"path": self.path,
|
||||||
"md5sum": gns3server.utils.images.md5sum(self.path),
|
"md5sum": gns3server.utils.images.md5sum(self.path, self.working_path),
|
||||||
"ethernet_adapters": len(self._ethernet_adapters),
|
"ethernet_adapters": len(self._ethernet_adapters),
|
||||||
"serial_adapters": len(self._serial_adapters),
|
"serial_adapters": len(self._serial_adapters),
|
||||||
"ram": self._ram,
|
"ram": self._ram,
|
||||||
|
@ -85,10 +85,6 @@ class Project:
|
|||||||
"variables": self._variables
|
"variables": self._variables
|
||||||
}
|
}
|
||||||
|
|
||||||
def is_local(self):
|
|
||||||
|
|
||||||
return Config.instance().settings.Server.local
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def id(self):
|
def id(self):
|
||||||
|
|
||||||
@ -101,12 +97,12 @@ class Project:
|
|||||||
|
|
||||||
@path.setter
|
@path.setter
|
||||||
def path(self, path):
|
def path(self, path):
|
||||||
check_path_allowed(path)
|
|
||||||
|
|
||||||
if hasattr(self, "_path"):
|
if hasattr(self, "_path"):
|
||||||
if path != self._path and self.is_local() is False:
|
if path != self._path:
|
||||||
raise ComputeForbiddenError("Changing the project directory path is not allowed")
|
raise ComputeForbiddenError("Changing the project directory path is not allowed")
|
||||||
|
|
||||||
|
check_path_allowed(path)
|
||||||
self._path = path
|
self._path = path
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -234,68 +234,6 @@ class Qemu(BaseManager):
|
|||||||
|
|
||||||
return os.path.join("qemu", f"vm-{legacy_vm_id}")
|
return os.path.join("qemu", f"vm-{legacy_vm_id}")
|
||||||
|
|
||||||
async def create_disk(self, qemu_img, path, options):
|
|
||||||
"""
|
|
||||||
Create a Qemu disk with qemu-img
|
|
||||||
|
|
||||||
:param qemu_img: qemu-img binary path
|
|
||||||
:param path: Image path
|
|
||||||
:param options: Disk image creation options
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
img_format = options.pop("format")
|
|
||||||
img_size = options.pop("size")
|
|
||||||
|
|
||||||
if not os.path.isabs(path):
|
|
||||||
directory = self.get_images_directory()
|
|
||||||
os.makedirs(directory, exist_ok=True)
|
|
||||||
path = os.path.join(directory, os.path.basename(path))
|
|
||||||
|
|
||||||
try:
|
|
||||||
if os.path.exists(path):
|
|
||||||
raise QemuError(f"Could not create disk image '{path}', file already exists")
|
|
||||||
except UnicodeEncodeError:
|
|
||||||
raise QemuError(
|
|
||||||
"Could not create disk image '{}', "
|
|
||||||
"path contains characters not supported by filesystem".format(path)
|
|
||||||
)
|
|
||||||
|
|
||||||
command = [qemu_img, "create", "-f", img_format]
|
|
||||||
for option in sorted(options.keys()):
|
|
||||||
command.extend(["-o", f"{option}={options[option]}"])
|
|
||||||
command.append(path)
|
|
||||||
command.append(f"{img_size}M")
|
|
||||||
|
|
||||||
process = await asyncio.create_subprocess_exec(*command)
|
|
||||||
await process.wait()
|
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
|
||||||
raise QemuError(f"Could not create disk image {path}:{e}")
|
|
||||||
|
|
||||||
async def resize_disk(self, qemu_img, path, extend):
|
|
||||||
"""
|
|
||||||
Resize a Qemu disk with qemu-img
|
|
||||||
|
|
||||||
:param qemu_img: qemu-img binary path
|
|
||||||
:param path: Image path
|
|
||||||
:param size: size
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not os.path.isabs(path):
|
|
||||||
directory = self.get_images_directory()
|
|
||||||
os.makedirs(directory, exist_ok=True)
|
|
||||||
path = os.path.join(directory, os.path.basename(path))
|
|
||||||
|
|
||||||
try:
|
|
||||||
if not os.path.exists(path):
|
|
||||||
raise QemuError(f"Qemu disk '{path}' does not exist")
|
|
||||||
command = [qemu_img, "resize", path, f"+{extend}M"]
|
|
||||||
process = await asyncio.create_subprocess_exec(*command)
|
|
||||||
await process.wait()
|
|
||||||
log.info(f"Qemu disk '{path}' extended by {extend} MB")
|
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
|
||||||
raise QemuError(f"Could not update disk image {path}:{e}")
|
|
||||||
|
|
||||||
def _init_config_disk(self):
|
def _init_config_disk(self):
|
||||||
"""
|
"""
|
||||||
Initialize the default config disk
|
Initialize the default config disk
|
||||||
|
@ -280,7 +280,7 @@ class QemuVM(BaseNode):
|
|||||||
:param value: New disk value
|
:param value: New disk value
|
||||||
"""
|
"""
|
||||||
|
|
||||||
value = self.manager.get_abs_image_path(value, self.project.path)
|
value = self.manager.get_abs_image_path(value, self.working_dir)
|
||||||
if not self.linked_clone:
|
if not self.linked_clone:
|
||||||
for node in self.manager.nodes:
|
for node in self.manager.nodes:
|
||||||
if node != self and getattr(node, variable) == value:
|
if node != self and getattr(node, variable) == value:
|
||||||
@ -493,7 +493,7 @@ class QemuVM(BaseNode):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
if cdrom_image:
|
if cdrom_image:
|
||||||
self._cdrom_image = self.manager.get_abs_image_path(cdrom_image, self.project.path)
|
self._cdrom_image = self.manager.get_abs_image_path(cdrom_image, self.working_dir)
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
'QEMU VM "{name}" [{id}] has set the QEMU cdrom image path to {cdrom_image}'.format(
|
'QEMU VM "{name}" [{id}] has set the QEMU cdrom image path to {cdrom_image}'.format(
|
||||||
@ -551,7 +551,7 @@ class QemuVM(BaseNode):
|
|||||||
:param bios_image: QEMU bios image path
|
:param bios_image: QEMU bios image path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
self._bios_image = self.manager.get_abs_image_path(bios_image, self.project.path)
|
self._bios_image = self.manager.get_abs_image_path(bios_image, self.working_dir)
|
||||||
log.info(
|
log.info(
|
||||||
'QEMU VM "{name}" [{id}] has set the QEMU bios image path to {bios_image}'.format(
|
'QEMU VM "{name}" [{id}] has set the QEMU bios image path to {bios_image}'.format(
|
||||||
name=self._name, id=self._id, bios_image=self._bios_image
|
name=self._name, id=self._id, bios_image=self._bios_image
|
||||||
@ -923,7 +923,7 @@ class QemuVM(BaseNode):
|
|||||||
:param initrd: QEMU initrd path
|
:param initrd: QEMU initrd path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
initrd = self.manager.get_abs_image_path(initrd, self.project.path)
|
initrd = self.manager.get_abs_image_path(initrd, self.working_dir)
|
||||||
|
|
||||||
log.info(
|
log.info(
|
||||||
'QEMU VM "{name}" [{id}] has set the QEMU initrd path to {initrd}'.format(
|
'QEMU VM "{name}" [{id}] has set the QEMU initrd path to {initrd}'.format(
|
||||||
@ -957,7 +957,7 @@ class QemuVM(BaseNode):
|
|||||||
:param kernel_image: QEMU kernel image path
|
:param kernel_image: QEMU kernel image path
|
||||||
"""
|
"""
|
||||||
|
|
||||||
kernel_image = self.manager.get_abs_image_path(kernel_image, self.project.path)
|
kernel_image = self.manager.get_abs_image_path(kernel_image, self.working_dir)
|
||||||
log.info(
|
log.info(
|
||||||
'QEMU VM "{name}" [{id}] has set the QEMU kernel image path to {kernel_image}'.format(
|
'QEMU VM "{name}" [{id}] has set the QEMU kernel image path to {kernel_image}'.format(
|
||||||
name=self._name, id=self._id, kernel_image=kernel_image
|
name=self._name, id=self._id, kernel_image=kernel_image
|
||||||
@ -1057,10 +1057,10 @@ class QemuVM(BaseNode):
|
|||||||
# In case user upload image manually we don't have md5 sums.
|
# In case user upload image manually we don't have md5 sums.
|
||||||
# We need generate hashes at this point, otherwise they will be generated
|
# We need generate hashes at this point, otherwise they will be generated
|
||||||
# at asdict but not on separate thread.
|
# at asdict but not on separate thread.
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hda_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hda_disk_image, self.working_dir)
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hdb_disk_image, self.working_dir)
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image, self.working_dir)
|
||||||
await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image)
|
await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image, self.working_dir)
|
||||||
|
|
||||||
super().create()
|
super().create()
|
||||||
|
|
||||||
@ -1599,6 +1599,85 @@ class QemuVM(BaseNode):
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
async def create_disk_image(self, disk_name, options):
|
||||||
|
"""
|
||||||
|
Create a Qemu disk
|
||||||
|
|
||||||
|
:param disk_name: disk name
|
||||||
|
:param options: disk creation options
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
qemu_img_path = self._get_qemu_img()
|
||||||
|
img_format = options.pop("format")
|
||||||
|
img_size = options.pop("size")
|
||||||
|
disk_path = os.path.join(self.working_dir, disk_name)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if os.path.exists(disk_path):
|
||||||
|
raise QemuError(f"Could not create disk image '{disk_name}', file already exists")
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
raise QemuError(
|
||||||
|
f"Could not create disk image '{disk_name}', "
|
||||||
|
"Disk image name contains characters not supported by the filesystem"
|
||||||
|
)
|
||||||
|
|
||||||
|
command = [qemu_img_path, "create", "-f", img_format]
|
||||||
|
for option in sorted(options.keys()):
|
||||||
|
command.extend(["-o", f"{option}={options[option]}"])
|
||||||
|
command.append(disk_path)
|
||||||
|
command.append(f"{img_size}M")
|
||||||
|
retcode = await self._qemu_img_exec(command)
|
||||||
|
if retcode:
|
||||||
|
stdout = self.read_qemu_img_stdout()
|
||||||
|
raise QemuError(f"Could not create '{disk_name}' disk image: qemu-img returned with {retcode}\n{stdout}")
|
||||||
|
else:
|
||||||
|
log.info(f"QEMU VM '{self.name}' [{self.id}]: Qemu disk image'{disk_name}' created")
|
||||||
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
|
stdout = self.read_qemu_img_stdout()
|
||||||
|
raise QemuError(f"Could not create '{disk_name}' disk image: {e}\n{stdout}")
|
||||||
|
|
||||||
|
async def resize_disk_image(self, disk_name, extend):
|
||||||
|
"""
|
||||||
|
Resize a Qemu disk
|
||||||
|
|
||||||
|
:param disk_name: disk name
|
||||||
|
:param extend: new size
|
||||||
|
"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
qemu_img_path = self._get_qemu_img()
|
||||||
|
disk_path = os.path.join(self.working_dir, disk_name)
|
||||||
|
if not os.path.exists(disk_path):
|
||||||
|
raise QemuError(f"Qemu disk image '{disk_name}' does not exist")
|
||||||
|
|
||||||
|
command = [qemu_img_path, "resize", disk_path, f"+{extend}M"]
|
||||||
|
retcode = await self._qemu_img_exec(command)
|
||||||
|
if retcode:
|
||||||
|
stdout = self.read_qemu_img_stdout()
|
||||||
|
raise QemuError(f"Could not update '{disk_name}' disk image: qemu-img returned with {retcode}\n{stdout}")
|
||||||
|
else:
|
||||||
|
log.info(f"QEMU VM '{self.name}' [{self.id}]: Qemu disk image '{disk_name}' extended by {extend} MB")
|
||||||
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
|
stdout = self.read_qemu_img_stdout()
|
||||||
|
raise QemuError(f"Could not update '{disk_name}' disk image: {e}\n{stdout}")
|
||||||
|
|
||||||
|
def delete_disk_image(self, disk_name):
|
||||||
|
"""
|
||||||
|
Delete a Qemu disk
|
||||||
|
|
||||||
|
:param disk_name: disk name
|
||||||
|
"""
|
||||||
|
|
||||||
|
disk_path = os.path.join(self.working_dir, disk_name)
|
||||||
|
if not os.path.exists(disk_path):
|
||||||
|
raise QemuError(f"Qemu disk image '{disk_name}' does not exist")
|
||||||
|
|
||||||
|
try:
|
||||||
|
os.remove(disk_path)
|
||||||
|
except OSError as e:
|
||||||
|
raise QemuError(f"Could not delete '{disk_name}' disk image: {e}")
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def started(self):
|
def started(self):
|
||||||
"""
|
"""
|
||||||
@ -1791,6 +1870,7 @@ class QemuVM(BaseNode):
|
|||||||
*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir
|
*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir
|
||||||
)
|
)
|
||||||
retcode = await process.wait()
|
retcode = await process.wait()
|
||||||
|
if retcode != 0:
|
||||||
log.info(f"{self._get_qemu_img()} returned with {retcode}")
|
log.info(f"{self._get_qemu_img()} returned with {retcode}")
|
||||||
return retcode
|
return retcode
|
||||||
|
|
||||||
@ -1978,7 +2058,7 @@ class QemuVM(BaseNode):
|
|||||||
drives = ["a", "b", "c", "d"]
|
drives = ["a", "b", "c", "d"]
|
||||||
|
|
||||||
for disk_index, drive in enumerate(drives):
|
for disk_index, drive in enumerate(drives):
|
||||||
# prioritize config disk over harddisk d
|
# prioritize config disk over normal disks
|
||||||
if drive == "d" and self._create_config_disk:
|
if drive == "d" and self._create_config_disk:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -1992,34 +2072,44 @@ class QemuVM(BaseNode):
|
|||||||
interface = "ide"
|
interface = "ide"
|
||||||
setattr(self, f"hd{drive}_disk_interface", interface)
|
setattr(self, f"hd{drive}_disk_interface", interface)
|
||||||
|
|
||||||
disk_name = "hd" + drive
|
disk_name = f"hd{drive}"
|
||||||
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
|
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
|
||||||
if os.path.islink(disk_image):
|
if os.path.islink(disk_image):
|
||||||
raise QemuError(
|
raise QemuError(
|
||||||
f"{disk_name} disk image '{disk_image}' linked to '{os.path.realpath(disk_image)}' is not accessible"
|
f"'{disk_name}' disk image linked to "
|
||||||
|
f"'{os.path.realpath(disk_image)}' is not accessible"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
raise QemuError(f"{disk_name} disk image '{disk_image}' is not accessible")
|
raise QemuError(f"'{disk_image}' is not accessible")
|
||||||
else:
|
else:
|
||||||
try:
|
try:
|
||||||
# check for corrupt disk image
|
# check for corrupt disk image
|
||||||
retcode = await self._qemu_img_exec([qemu_img_path, "check", disk_image])
|
retcode = await self._qemu_img_exec([qemu_img_path, "check", disk_image])
|
||||||
|
# ignore retcode == 1, one reason is that the image is encrypted and
|
||||||
|
# there is no encrypt.key-secret available
|
||||||
if retcode == 3:
|
if retcode == 3:
|
||||||
# image has leaked clusters, but is not corrupted, let's try to fix it
|
# image has leaked clusters, but is not corrupted, let's try to fix it
|
||||||
log.warning(f"Qemu image {disk_image} has leaked clusters")
|
log.warning(f"Disk image '{disk_image}' has leaked clusters")
|
||||||
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", "{}".format(disk_image)]) == 3:
|
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", f"{disk_image}"]) == 3:
|
||||||
self.project.emit("log.warning", {"message": "Qemu image '{}' has leaked clusters and could not be fixed".format(disk_image)})
|
self.project.emit(
|
||||||
|
"log.warning",
|
||||||
|
{"message": f"Disk image '{disk_image}' has leaked clusters and could not be fixed"}
|
||||||
|
)
|
||||||
elif retcode == 2:
|
elif retcode == 2:
|
||||||
# image is corrupted, let's try to fix it
|
# image is corrupted, let's try to fix it
|
||||||
log.warning(f"Qemu image {disk_image} is corrupted")
|
log.warning(f"Disk image '{disk_image}' is corrupted")
|
||||||
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", "{}".format(disk_image)]) == 2:
|
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", f"{disk_image}"]) == 2:
|
||||||
self.project.emit("log.warning", {"message": "Qemu image '{}' is corrupted and could not be fixed".format(disk_image)})
|
self.project.emit(
|
||||||
# ignore retcode == 1. One reason is that the image is encrypted and there is no encrypt.key-secret available
|
"log.warning",
|
||||||
|
{"message": f"Disk image '{disk_image}' is corrupted and could not be fixed"}
|
||||||
|
)
|
||||||
except (OSError, subprocess.SubprocessError) as e:
|
except (OSError, subprocess.SubprocessError) as e:
|
||||||
stdout = self.read_qemu_img_stdout()
|
stdout = self.read_qemu_img_stdout()
|
||||||
raise QemuError(f"Could not check '{disk_name}' disk image: {e}\n{stdout}")
|
raise QemuError(f"Could not check '{disk_name}' disk image: {e}\n{stdout}")
|
||||||
|
|
||||||
if self.linked_clone:
|
if self.linked_clone and os.path.dirname(disk_image) != self.working_dir:
|
||||||
|
|
||||||
|
#cloned_disk_image = os.path.splitext(os.path.basename(disk_image))
|
||||||
disk = os.path.join(self.working_dir, f"{disk_name}_disk.qcow2")
|
disk = os.path.join(self.working_dir, f"{disk_name}_disk.qcow2")
|
||||||
if not os.path.exists(disk):
|
if not os.path.exists(disk):
|
||||||
# create the disk
|
# create the disk
|
||||||
@ -2027,7 +2117,7 @@ class QemuVM(BaseNode):
|
|||||||
else:
|
else:
|
||||||
backing_file_format = await self._find_disk_file_format(disk_image)
|
backing_file_format = await self._find_disk_file_format(disk_image)
|
||||||
if not backing_file_format:
|
if not backing_file_format:
|
||||||
raise QemuError("Could not detect format for disk image: {}".format(disk_image))
|
raise QemuError(f"Could not detect format for disk image '{disk_image}'")
|
||||||
# Rebase the image. This is in case the base image moved to a different directory,
|
# Rebase the image. This is in case the base image moved to a different directory,
|
||||||
# which will be the case if we imported a portable project. This uses
|
# which will be the case if we imported a portable project. This uses
|
||||||
# get_abs_image_path(hdX_disk_image) and ignores the old base path embedded
|
# get_abs_image_path(hdX_disk_image) and ignores the old base path embedded
|
||||||
@ -2406,20 +2496,30 @@ class QemuVM(BaseNode):
|
|||||||
answer[field] = getattr(self, field)
|
answer[field] = getattr(self, field)
|
||||||
except AttributeError:
|
except AttributeError:
|
||||||
pass
|
pass
|
||||||
answer["hda_disk_image"] = self.manager.get_relative_image_path(self._hda_disk_image, self.project.path)
|
|
||||||
answer["hda_disk_image_md5sum"] = md5sum(self._hda_disk_image)
|
for drive in ["a", "b", "c", "d"]:
|
||||||
answer["hdb_disk_image"] = self.manager.get_relative_image_path(self._hdb_disk_image, self.project.path)
|
disk_image = getattr(self, f"_hd{drive}_disk_image")
|
||||||
answer["hdb_disk_image_md5sum"] = md5sum(self._hdb_disk_image)
|
if not disk_image:
|
||||||
answer["hdc_disk_image"] = self.manager.get_relative_image_path(self._hdc_disk_image, self.project.path)
|
continue
|
||||||
answer["hdc_disk_image_md5sum"] = md5sum(self._hdc_disk_image)
|
answer[f"hd{drive}_disk_image"] = self.manager.get_relative_image_path(disk_image, self.working_dir)
|
||||||
answer["hdd_disk_image"] = self.manager.get_relative_image_path(self._hdd_disk_image, self.project.path)
|
answer[f"hd{drive}_disk_image_md5sum"] = md5sum(disk_image, self.working_dir)
|
||||||
answer["hdd_disk_image_md5sum"] = md5sum(self._hdd_disk_image)
|
|
||||||
answer["cdrom_image"] = self.manager.get_relative_image_path(self._cdrom_image, self.project.path)
|
local_disk = os.path.join(self.working_dir, f"hd{drive}_disk.qcow2")
|
||||||
answer["cdrom_image_md5sum"] = md5sum(self._cdrom_image)
|
if os.path.exists(local_disk):
|
||||||
answer["bios_image"] = self.manager.get_relative_image_path(self._bios_image, self.project.path)
|
try:
|
||||||
answer["bios_image_md5sum"] = md5sum(self._bios_image)
|
qcow2 = Qcow2(local_disk)
|
||||||
answer["initrd"] = self.manager.get_relative_image_path(self._initrd, self.project.path)
|
if qcow2.backing_file:
|
||||||
answer["initrd_md5sum"] = md5sum(self._initrd)
|
answer[f"hd{drive}_disk_image_backed"] = os.path.basename(local_disk)
|
||||||
answer["kernel_image"] = self.manager.get_relative_image_path(self._kernel_image, self.project.path)
|
except (Qcow2Error, OSError) as e:
|
||||||
answer["kernel_image_md5sum"] = md5sum(self._kernel_image)
|
log.error(f"Could not read qcow2 disk image '{local_disk}': {e}")
|
||||||
|
continue
|
||||||
|
|
||||||
|
answer["cdrom_image"] = self.manager.get_relative_image_path(self._cdrom_image, self.working_dir)
|
||||||
|
answer["cdrom_image_md5sum"] = md5sum(self._cdrom_image, self.working_dir)
|
||||||
|
answer["bios_image"] = self.manager.get_relative_image_path(self._bios_image, self.working_dir)
|
||||||
|
answer["bios_image_md5sum"] = md5sum(self._bios_image, self.working_dir)
|
||||||
|
answer["initrd"] = self.manager.get_relative_image_path(self._initrd, self.working_dir)
|
||||||
|
answer["initrd_md5sum"] = md5sum(self._initrd, self.working_dir)
|
||||||
|
answer["kernel_image"] = self.manager.get_relative_image_path(self._kernel_image, self.working_dir)
|
||||||
|
answer["kernel_image_md5sum"] = md5sum(self._kernel_image, self.working_dir)
|
||||||
return answer
|
return answer
|
||||||
|
@ -311,8 +311,8 @@ class VMware(BaseManager):
|
|||||||
vmnet_interfaces = self._get_vmnet_interfaces_ubridge()
|
vmnet_interfaces = self._get_vmnet_interfaces_ubridge()
|
||||||
else:
|
else:
|
||||||
vmnet_interfaces = self._get_vmnet_interfaces()
|
vmnet_interfaces = self._get_vmnet_interfaces()
|
||||||
vmnet_interfaces = list(vmnet_interfaces.keys())
|
|
||||||
self._vmnets_info = vmnet_interfaces.copy()
|
self._vmnets_info = vmnet_interfaces.copy()
|
||||||
|
vmnet_interfaces = list(vmnet_interfaces.keys())
|
||||||
|
|
||||||
# remove vmnets already in use
|
# remove vmnets already in use
|
||||||
for vmware_vm in self._nodes.values():
|
for vmware_vm in self._nodes.values():
|
||||||
|
@ -153,8 +153,14 @@ class ApplianceManager:
|
|||||||
version_images[appliance_key] = image_in_db.filename
|
version_images[appliance_key] = image_in_db.filename
|
||||||
else:
|
else:
|
||||||
# check if the image is on disk
|
# check if the image is on disk
|
||||||
|
# FIXME: still necessary? the image should have been discovered and saved in the db already
|
||||||
image_path = os.path.join(image_dir, appliance_file)
|
image_path = os.path.join(image_dir, appliance_file)
|
||||||
if os.path.exists(image_path) and await wait_run_in_executor(md5sum, image_path) == image_checksum:
|
if os.path.exists(image_path) and \
|
||||||
|
await wait_run_in_executor(
|
||||||
|
md5sum,
|
||||||
|
image_path,
|
||||||
|
cache_to_md5file=False
|
||||||
|
) == image_checksum:
|
||||||
async with aiofiles.open(image_path, "rb") as f:
|
async with aiofiles.open(image_path, "rb") as f:
|
||||||
await write_image(appliance_file, image_path, f, images_repo)
|
await write_image(appliance_file, image_path, f, images_repo)
|
||||||
else:
|
else:
|
||||||
|
@ -30,10 +30,13 @@ from ..utils import parse_version
|
|||||||
from ..utils.asyncio import locking
|
from ..utils.asyncio import locking
|
||||||
from ..controller.controller_error import (
|
from ..controller.controller_error import (
|
||||||
ControllerError,
|
ControllerError,
|
||||||
|
ControllerBadRequestError,
|
||||||
ControllerNotFoundError,
|
ControllerNotFoundError,
|
||||||
ControllerForbiddenError,
|
ControllerForbiddenError,
|
||||||
ControllerTimeoutError,
|
ControllerTimeoutError,
|
||||||
ControllerUnauthorizedError,
|
ControllerUnauthorizedError,
|
||||||
|
ComputeError,
|
||||||
|
ComputeConflictError
|
||||||
)
|
)
|
||||||
from ..version import __version__, __version_info__
|
from ..version import __version__, __version_info__
|
||||||
|
|
||||||
@ -43,23 +46,6 @@ import logging
|
|||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class ComputeError(ControllerError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
# FIXME: broken
|
|
||||||
class ComputeConflict(ComputeError):
|
|
||||||
"""
|
|
||||||
Raise when the compute send a 409 that we can handle
|
|
||||||
|
|
||||||
:param response: The response of the compute
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, response):
|
|
||||||
super().__init__(response["message"])
|
|
||||||
self.response = response
|
|
||||||
|
|
||||||
|
|
||||||
class Compute:
|
class Compute:
|
||||||
"""
|
"""
|
||||||
A GNS3 compute.
|
A GNS3 compute.
|
||||||
@ -574,7 +560,9 @@ class Compute:
|
|||||||
else:
|
else:
|
||||||
msg = ""
|
msg = ""
|
||||||
|
|
||||||
if response.status == 401:
|
if response.status == 400:
|
||||||
|
raise ControllerBadRequestError(msg)
|
||||||
|
elif response.status == 401:
|
||||||
raise ControllerUnauthorizedError(f"Invalid authentication for compute '{self.name}' [{self.id}]")
|
raise ControllerUnauthorizedError(f"Invalid authentication for compute '{self.name}' [{self.id}]")
|
||||||
elif response.status == 403:
|
elif response.status == 403:
|
||||||
raise ControllerForbiddenError(msg)
|
raise ControllerForbiddenError(msg)
|
||||||
@ -584,7 +572,7 @@ class Compute:
|
|||||||
raise ControllerTimeoutError(f"{method} {path} request timeout")
|
raise ControllerTimeoutError(f"{method} {path} request timeout")
|
||||||
elif response.status == 409:
|
elif response.status == 409:
|
||||||
try:
|
try:
|
||||||
raise ComputeConflict(json.loads(body))
|
raise ComputeConflictError(url, json.loads(body))
|
||||||
# If the 409 doesn't come from a GNS3 server
|
# If the 409 doesn't come from a GNS3 server
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ControllerError(msg)
|
raise ControllerError(msg)
|
||||||
@ -593,7 +581,7 @@ class Compute:
|
|||||||
elif response.status == 503:
|
elif response.status == 503:
|
||||||
raise aiohttp.web.HTTPServiceUnavailable(text=f"Service unavailable {url} {body}")
|
raise aiohttp.web.HTTPServiceUnavailable(text=f"Service unavailable {url} {body}")
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(f"{response.status} status code is not supported for {method} '{url}'")
|
raise NotImplementedError(f"{response.status} status code is not supported for {method} '{url}'\n{body}")
|
||||||
if body and len(body):
|
if body and len(body):
|
||||||
if raw:
|
if raw:
|
||||||
response.body = body
|
response.body = body
|
||||||
@ -636,16 +624,12 @@ class Compute:
|
|||||||
"""
|
"""
|
||||||
Return the list of images available for this type on the compute node.
|
Return the list of images available for this type on the compute node.
|
||||||
"""
|
"""
|
||||||
images = []
|
|
||||||
|
|
||||||
res = await self.http_query("GET", f"/{type}/images", timeout=None)
|
res = await self.http_query("GET", f"/{type}/images", timeout=None)
|
||||||
images = res.json
|
images = res.json
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if type in ["qemu", "dynamips", "iou"]:
|
if type in ["qemu", "dynamips", "iou"]:
|
||||||
# for local_image in list_images(type):
|
|
||||||
# if local_image['filename'] not in [i['filename'] for i in images]:
|
|
||||||
# images.append(local_image)
|
|
||||||
images = sorted(images, key=itemgetter("filename"))
|
images = sorted(images, key=itemgetter("filename"))
|
||||||
else:
|
else:
|
||||||
images = sorted(images, key=itemgetter("image"))
|
images = sorted(images, key=itemgetter("image"))
|
||||||
|
@ -51,3 +51,27 @@ class ControllerForbiddenError(ControllerError):
|
|||||||
class ControllerTimeoutError(ControllerError):
|
class ControllerTimeoutError(ControllerError):
|
||||||
def __init__(self, message: str):
|
def __init__(self, message: str):
|
||||||
super().__init__(message)
|
super().__init__(message)
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeError(ControllerError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeConflictError(ComputeError):
|
||||||
|
"""
|
||||||
|
Raise when the compute sends a 409 that we can handle
|
||||||
|
|
||||||
|
:param request URL: compute URL used for the request
|
||||||
|
:param response: compute JSON response
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, url, response):
|
||||||
|
super().__init__(response["message"])
|
||||||
|
self._url = url
|
||||||
|
self._response = response
|
||||||
|
|
||||||
|
def url(self):
|
||||||
|
return self._url
|
||||||
|
|
||||||
|
def response(self):
|
||||||
|
return self._response
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import json
|
import json
|
||||||
import asyncio
|
import asyncio
|
||||||
import aiofiles
|
import aiofiles
|
||||||
|
@ -20,10 +20,10 @@ import sys
|
|||||||
import json
|
import json
|
||||||
import uuid
|
import uuid
|
||||||
import shutil
|
import shutil
|
||||||
import zipfile
|
|
||||||
import aiofiles
|
import aiofiles
|
||||||
import itertools
|
import itertools
|
||||||
import tempfile
|
import tempfile
|
||||||
|
import gns3server.utils.zipfile_zstd as zipfile_zstd
|
||||||
|
|
||||||
from .controller_error import ControllerError
|
from .controller_error import ControllerError
|
||||||
from .topology import load_topology
|
from .topology import load_topology
|
||||||
@ -60,9 +60,9 @@ async def import_project(controller, project_id, stream, location=None, name=Non
|
|||||||
raise ControllerError("The destination path should not contain .gns3")
|
raise ControllerError("The destination path should not contain .gns3")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with zipfile.ZipFile(stream) as zip_file:
|
with zipfile_zstd.ZipFile(stream) as zip_file:
|
||||||
project_file = zip_file.read("project.gns3").decode()
|
project_file = zip_file.read("project.gns3").decode()
|
||||||
except zipfile.BadZipFile:
|
except zipfile_zstd.BadZipFile:
|
||||||
raise ControllerError("Cannot import project, not a GNS3 project (invalid zip)")
|
raise ControllerError("Cannot import project, not a GNS3 project (invalid zip)")
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise ControllerError("Cannot import project, project.gns3 file could not be found")
|
raise ControllerError("Cannot import project, project.gns3 file could not be found")
|
||||||
@ -92,9 +92,9 @@ async def import_project(controller, project_id, stream, location=None, name=Non
|
|||||||
raise ControllerError("The project name contain non supported or invalid characters")
|
raise ControllerError("The project name contain non supported or invalid characters")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with zipfile.ZipFile(stream) as zip_file:
|
with zipfile_zstd.ZipFile(stream) as zip_file:
|
||||||
await wait_run_in_executor(zip_file.extractall, path)
|
await wait_run_in_executor(zip_file.extractall, path)
|
||||||
except zipfile.BadZipFile:
|
except zipfile_zstd.BadZipFile:
|
||||||
raise ControllerError("Cannot extract files from GNS3 project (invalid zip)")
|
raise ControllerError("Cannot extract files from GNS3 project (invalid zip)")
|
||||||
|
|
||||||
topology = load_topology(os.path.join(path, "project.gns3"))
|
topology = load_topology(os.path.join(path, "project.gns3"))
|
||||||
@ -264,11 +264,11 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
|
|||||||
# extract everything to a temporary directory
|
# extract everything to a temporary directory
|
||||||
try:
|
try:
|
||||||
with open(snapshot_path, "rb") as f:
|
with open(snapshot_path, "rb") as f:
|
||||||
with zipfile.ZipFile(f) as zip_file:
|
with zipfile_zstd.ZipFile(f) as zip_file:
|
||||||
await wait_run_in_executor(zip_file.extractall, tmpdir)
|
await wait_run_in_executor(zip_file.extractall, tmpdir)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
raise ControllerError(f"Cannot open snapshot '{os.path.basename(snapshot)}': {e}")
|
raise ControllerError(f"Cannot open snapshot '{os.path.basename(snapshot)}': {e}")
|
||||||
except zipfile.BadZipFile:
|
except zipfile_zstd.BadZipFile:
|
||||||
raise ControllerError(
|
raise ControllerError(
|
||||||
f"Cannot extract files from snapshot '{os.path.basename(snapshot)}': not a GNS3 project (invalid zip)"
|
f"Cannot extract files from snapshot '{os.path.basename(snapshot)}': not a GNS3 project (invalid zip)"
|
||||||
)
|
)
|
||||||
@ -294,7 +294,7 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
|
|||||||
|
|
||||||
# write everything back to the original snapshot file
|
# write everything back to the original snapshot file
|
||||||
try:
|
try:
|
||||||
with aiozipstream.ZipFile(compression=zipfile.ZIP_STORED) as zstream:
|
with aiozipstream.ZipFile(compression=zipfile_zstd.ZIP_STORED) as zstream:
|
||||||
for root, dirs, files in os.walk(tmpdir, topdown=True, followlinks=False):
|
for root, dirs, files in os.walk(tmpdir, topdown=True, followlinks=False):
|
||||||
for file in files:
|
for file in files:
|
||||||
path = os.path.join(root, file)
|
path = os.path.join(root, file)
|
||||||
|
@ -21,8 +21,12 @@ import copy
|
|||||||
import uuid
|
import uuid
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from .compute import ComputeConflict, ComputeError
|
from .controller_error import (
|
||||||
from .controller_error import ControllerError, ControllerTimeoutError
|
ControllerError,
|
||||||
|
ControllerTimeoutError,
|
||||||
|
ComputeError,
|
||||||
|
ComputeConflictError
|
||||||
|
)
|
||||||
from .ports.port_factory import PortFactory, StandardPortFactory, DynamipsPortFactory
|
from .ports.port_factory import PortFactory, StandardPortFactory, DynamipsPortFactory
|
||||||
from ..utils.images import images_directories
|
from ..utils.images import images_directories
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
@ -400,9 +404,10 @@ class Node:
|
|||||||
response = await self._compute.post(
|
response = await self._compute.post(
|
||||||
f"/projects/{self._project.id}/{self._node_type}/nodes", data=data, timeout=timeout
|
f"/projects/{self._project.id}/{self._node_type}/nodes", data=data, timeout=timeout
|
||||||
)
|
)
|
||||||
except ComputeConflict as e:
|
except ComputeConflictError as e:
|
||||||
if e.response.get("exception") == "ImageMissingError":
|
response = e.response()
|
||||||
res = await self._upload_missing_image(self._node_type, e.response["image"])
|
if response.get("exception") == "ImageMissingError":
|
||||||
|
res = await self._upload_missing_image(self._node_type, response["image"])
|
||||||
if not res:
|
if not res:
|
||||||
raise e
|
raise e
|
||||||
else:
|
else:
|
||||||
|
@ -1038,7 +1038,7 @@ class Project:
|
|||||||
while self._loading:
|
while self._loading:
|
||||||
await asyncio.sleep(0.5)
|
await asyncio.sleep(0.5)
|
||||||
|
|
||||||
async def duplicate(self, name=None, location=None, reset_mac_addresses=True):
|
async def duplicate(self, name=None, reset_mac_addresses=True):
|
||||||
"""
|
"""
|
||||||
Duplicate a project
|
Duplicate a project
|
||||||
|
|
||||||
@ -1047,7 +1047,6 @@ class Project:
|
|||||||
It's a little slower but we have only one implementation to maintain.
|
It's a little slower but we have only one implementation to maintain.
|
||||||
|
|
||||||
:param name: Name of the new project. A new one will be generated in case of conflicts
|
:param name: Name of the new project. A new one will be generated in case of conflicts
|
||||||
:param location: Parent directory of the new project
|
|
||||||
:param reset_mac_addresses: Reset MAC addresses for the new project
|
:param reset_mac_addresses: Reset MAC addresses for the new project
|
||||||
"""
|
"""
|
||||||
# If the project was not open we open it temporary
|
# If the project was not open we open it temporary
|
||||||
@ -1062,10 +1061,7 @@ class Project:
|
|||||||
|
|
||||||
# use the parent directory of the project we are duplicating as a
|
# use the parent directory of the project we are duplicating as a
|
||||||
# temporary directory to avoid no space left issues when '/tmp'
|
# temporary directory to avoid no space left issues when '/tmp'
|
||||||
# is location on another partition.
|
# is located on another partition.
|
||||||
if location:
|
|
||||||
working_dir = os.path.abspath(os.path.join(location, os.pardir))
|
|
||||||
else:
|
|
||||||
working_dir = os.path.abspath(os.path.join(self.path, os.pardir))
|
working_dir = os.path.abspath(os.path.join(self.path, os.pardir))
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory(dir=working_dir) as tmpdir:
|
with tempfile.TemporaryDirectory(dir=working_dir) as tmpdir:
|
||||||
@ -1090,7 +1086,11 @@ class Project:
|
|||||||
# import the temporary project
|
# import the temporary project
|
||||||
with open(project_path, "rb") as f:
|
with open(project_path, "rb") as f:
|
||||||
project = await import_project(
|
project = await import_project(
|
||||||
self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True
|
self._controller,
|
||||||
|
str(uuid.uuid4()),
|
||||||
|
f,
|
||||||
|
name=name,
|
||||||
|
keep_compute_id=True
|
||||||
)
|
)
|
||||||
|
|
||||||
log.info(f"Project '{project.name}' duplicated in {time.time() - begin:.4f} seconds")
|
log.info(f"Project '{project.name}' duplicated in {time.time() - begin:.4f} seconds")
|
||||||
|
@ -224,7 +224,7 @@ def _convert_2_1_0(topo, topo_path):
|
|||||||
if node["node_type"] in ("qemu", "vmware", "virtualbox"):
|
if node["node_type"] in ("qemu", "vmware", "virtualbox"):
|
||||||
if "acpi_shutdown" in node["properties"]:
|
if "acpi_shutdown" in node["properties"]:
|
||||||
if node["properties"]["acpi_shutdown"] is True:
|
if node["properties"]["acpi_shutdown"] is True:
|
||||||
node["properties"]["on_close"] = "save_vm_sate"
|
node["properties"]["on_close"] = "save_vm_state"
|
||||||
else:
|
else:
|
||||||
node["properties"]["on_close"] = "power_off"
|
node["properties"]["on_close"] = "power_off"
|
||||||
del node["properties"]["acpi_shutdown"]
|
del node["properties"]["acpi_shutdown"]
|
||||||
|
@ -15,7 +15,6 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import sys
|
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
from typing import Callable
|
from typing import Callable
|
||||||
@ -25,7 +24,8 @@ from gns3server.controller import Controller
|
|||||||
from gns3server.compute import MODULES
|
from gns3server.compute import MODULES
|
||||||
from gns3server.compute.port_manager import PortManager
|
from gns3server.compute.port_manager import PortManager
|
||||||
from gns3server.utils.http_client import HTTPClient
|
from gns3server.utils.http_client import HTTPClient
|
||||||
from gns3server.db.tasks import connect_to_db, get_computes
|
from gns3server.db.tasks import connect_to_db, get_computes, disconnect_from_db, discover_images_on_filesystem
|
||||||
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -60,7 +60,9 @@ def create_startup_handler(app: FastAPI) -> Callable:
|
|||||||
# computing with server start
|
# computing with server start
|
||||||
from gns3server.compute.qemu import Qemu
|
from gns3server.compute.qemu import Qemu
|
||||||
|
|
||||||
asyncio.ensure_future(Qemu.instance().list_images())
|
# Start the discovering new images on file system 5 seconds after the server has started
|
||||||
|
# to give it a chance to process API requests
|
||||||
|
loop.call_later(5, asyncio.create_task, discover_images_on_filesystem(app))
|
||||||
|
|
||||||
for module in MODULES:
|
for module in MODULES:
|
||||||
log.debug(f"Loading module {module.__name__}")
|
log.debug(f"Loading module {module.__name__}")
|
||||||
@ -90,4 +92,6 @@ def create_shutdown_handler(app: FastAPI) -> Callable:
|
|||||||
if PortManager.instance().udp_ports:
|
if PortManager.instance().udp_ports:
|
||||||
log.warning(f"UDP ports are still used {PortManager.instance().udp_ports}")
|
log.warning(f"UDP ports are still used {PortManager.instance().udp_ports}")
|
||||||
|
|
||||||
|
await disconnect_from_db(app)
|
||||||
|
|
||||||
return shutdown_handler
|
return shutdown_handler
|
||||||
|
@ -59,7 +59,7 @@ class CrashReport:
|
|||||||
Report crash to a third party service
|
Report crash to a third party service
|
||||||
"""
|
"""
|
||||||
|
|
||||||
DSN = "https://8f474628c1e44d0799140ccf05c486b8:f952ab1783d3427188fd81cc37da323c@o19455.ingest.sentry.io/38482"
|
DSN = "https://57f6b1102b6a4985a8e93aed51e19b8b@o19455.ingest.sentry.io/38482"
|
||||||
_instance = None
|
_instance = None
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
@ -59,11 +59,14 @@ class ImagesRepository(BaseRepository):
|
|||||||
result = await self._db_session.execute(query)
|
result = await self._db_session.execute(query)
|
||||||
return result.scalars().first()
|
return result.scalars().first()
|
||||||
|
|
||||||
async def get_images(self) -> List[models.Image]:
|
async def get_images(self, image_type=None) -> List[models.Image]:
|
||||||
"""
|
"""
|
||||||
Get all images.
|
Get all images.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if image_type:
|
||||||
|
query = select(models.Image).where(models.Image.image_type == image_type)
|
||||||
|
else:
|
||||||
query = select(models.Image)
|
query = select(models.Image)
|
||||||
result = await self._db_session.execute(query)
|
result = await self._db_session.execute(query)
|
||||||
return result.scalars().all()
|
return result.scalars().all()
|
||||||
|
@ -15,11 +15,13 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import signal
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
from fastapi.encoders import jsonable_encoder
|
|
||||||
from pydantic import ValidationError
|
from pydantic import ValidationError
|
||||||
|
from watchfiles import awatch, Change
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
from sqlalchemy import event
|
from sqlalchemy import event
|
||||||
@ -27,6 +29,8 @@ from sqlalchemy.engine import Engine
|
|||||||
from sqlalchemy.exc import SQLAlchemyError
|
from sqlalchemy.exc import SQLAlchemyError
|
||||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
|
||||||
from gns3server.db.repositories.computes import ComputesRepository
|
from gns3server.db.repositories.computes import ComputesRepository
|
||||||
|
from gns3server.db.repositories.images import ImagesRepository
|
||||||
|
from gns3server.utils.images import discover_images, check_valid_image_header, read_image_info, InvalidImageError
|
||||||
from gns3server import schemas
|
from gns3server import schemas
|
||||||
|
|
||||||
from .models import Base
|
from .models import Base
|
||||||
@ -51,6 +55,14 @@ async def connect_to_db(app: FastAPI) -> None:
|
|||||||
log.fatal(f"Error while connecting to database '{db_url}: {e}")
|
log.fatal(f"Error while connecting to database '{db_url}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
async def disconnect_from_db(app: FastAPI) -> None:
|
||||||
|
|
||||||
|
# dispose of the connection pool used by the database engine
|
||||||
|
if getattr(app.state, "_db_engine"):
|
||||||
|
await app.state._db_engine.dispose()
|
||||||
|
log.info(f"Disconnected from database")
|
||||||
|
|
||||||
|
|
||||||
@event.listens_for(Engine, "connect")
|
@event.listens_for(Engine, "connect")
|
||||||
def set_sqlite_pragma(dbapi_connection, connection_record):
|
def set_sqlite_pragma(dbapi_connection, connection_record):
|
||||||
|
|
||||||
@ -74,3 +86,94 @@ async def get_computes(app: FastAPI) -> List[dict]:
|
|||||||
continue
|
continue
|
||||||
computes.append(compute)
|
computes.append(compute)
|
||||||
return computes
|
return computes
|
||||||
|
|
||||||
|
|
||||||
|
def image_filter(change: Change, path: str) -> bool:
|
||||||
|
|
||||||
|
if change == Change.added:
|
||||||
|
header_magic_len = 7
|
||||||
|
with open(path, "rb") as f:
|
||||||
|
image_header = f.read(header_magic_len) # read the first 7 bytes of the file
|
||||||
|
if len(image_header) >= header_magic_len:
|
||||||
|
try:
|
||||||
|
check_valid_image_header(image_header)
|
||||||
|
except InvalidImageError as e:
|
||||||
|
log.debug(f"New image '{path}' added: {e}")
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
log.debug(f"New image '{path}' added: size is too small to be valid")
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
# FIXME: should we support image deletion?
|
||||||
|
# elif change == Change.deleted:
|
||||||
|
# return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
async def monitor_images_on_filesystem(app: FastAPI):
|
||||||
|
|
||||||
|
server_config = Config.instance().settings.Server
|
||||||
|
images_dir = os.path.expanduser(server_config.images_path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
async for changes in awatch(
|
||||||
|
images_dir,
|
||||||
|
watch_filter=image_filter,
|
||||||
|
raise_interrupt=True
|
||||||
|
):
|
||||||
|
async with AsyncSession(app.state._db_engine) as db_session:
|
||||||
|
images_repository = ImagesRepository(db_session)
|
||||||
|
for change in changes:
|
||||||
|
change_type, image_path = change
|
||||||
|
if change_type == Change.added:
|
||||||
|
try:
|
||||||
|
image = await read_image_info(image_path)
|
||||||
|
except InvalidImageError as e:
|
||||||
|
log.warning(str(e))
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
if await images_repository.get_image(image_path):
|
||||||
|
continue
|
||||||
|
await images_repository.add_image(**image)
|
||||||
|
log.info(f"Discovered image '{image_path}' has been added to the database")
|
||||||
|
except SQLAlchemyError as e:
|
||||||
|
log.warning(f"Error while adding image '{image_path}' to the database: {e}")
|
||||||
|
# if change_type == Change.deleted:
|
||||||
|
# try:
|
||||||
|
# if await images_repository.get_image(image_path):
|
||||||
|
# success = await images_repository.delete_image(image_path)
|
||||||
|
# if not success:
|
||||||
|
# log.warning(f"Could not delete image '{image_path}' from the database")
|
||||||
|
# else:
|
||||||
|
# log.info(f"Image '{image_path}' has been deleted from the database")
|
||||||
|
# except SQLAlchemyError as e:
|
||||||
|
# log.warning(f"Error while deleting image '{image_path}' from the database: {e}")
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
# send SIGTERM to the server PID so uvicorn can shutdown the process
|
||||||
|
os.kill(os.getpid(), signal.SIGTERM)
|
||||||
|
|
||||||
|
|
||||||
|
async def discover_images_on_filesystem(app: FastAPI):
|
||||||
|
|
||||||
|
async with AsyncSession(app.state._db_engine) as db_session:
|
||||||
|
images_repository = ImagesRepository(db_session)
|
||||||
|
db_images = await images_repository.get_images()
|
||||||
|
existing_image_paths = []
|
||||||
|
for db_image in db_images:
|
||||||
|
try:
|
||||||
|
image = schemas.Image.from_orm(db_image)
|
||||||
|
existing_image_paths.append(image.path)
|
||||||
|
except ValidationError as e:
|
||||||
|
log.error(f"Could not load image '{db_image.filename}' from database: {e}")
|
||||||
|
continue
|
||||||
|
for image_type in ("qemu", "ios", "iou"):
|
||||||
|
discovered_images = await discover_images(image_type, existing_image_paths)
|
||||||
|
for image in discovered_images:
|
||||||
|
log.info(f"Adding discovered image '{image['path']}' to the database")
|
||||||
|
try:
|
||||||
|
await images_repository.add_image(**image)
|
||||||
|
except SQLAlchemyError as e:
|
||||||
|
log.warning(f"Error while adding image '{image['path']}' to the database: {e}")
|
||||||
|
|
||||||
|
# monitor if images have been manually added
|
||||||
|
asyncio.create_task(monitor_images_on_filesystem(app))
|
||||||
|
@ -28,7 +28,7 @@ from .controller.appliances import ApplianceVersion, Appliance
|
|||||||
from .controller.drawings import Drawing
|
from .controller.drawings import Drawing
|
||||||
from .controller.gns3vm import GNS3VM
|
from .controller.gns3vm import GNS3VM
|
||||||
from .controller.nodes import NodeCreate, NodeUpdate, NodeDuplicate, NodeCapture, Node
|
from .controller.nodes import NodeCreate, NodeUpdate, NodeDuplicate, NodeCapture, Node
|
||||||
from .controller.projects import ProjectCreate, ProjectUpdate, ProjectDuplicate, Project, ProjectFile
|
from .controller.projects import ProjectCreate, ProjectUpdate, ProjectDuplicate, Project, ProjectFile, ProjectCompression
|
||||||
from .controller.users import UserCreate, UserUpdate, LoggedInUserUpdate, User, Credentials, UserGroupCreate, UserGroupUpdate, UserGroup
|
from .controller.users import UserCreate, UserUpdate, LoggedInUserUpdate, User, Credentials, UserGroupCreate, UserGroupUpdate, UserGroup
|
||||||
from .controller.rbac import RoleCreate, RoleUpdate, Role, PermissionCreate, PermissionUpdate, Permission
|
from .controller.rbac import RoleCreate, RoleUpdate, Role, PermissionCreate, PermissionUpdate, Permission
|
||||||
from .controller.tokens import Token
|
from .controller.tokens import Token
|
||||||
@ -73,9 +73,12 @@ from .compute.dynamips_nodes import DynamipsCreate, DynamipsUpdate, Dynamips
|
|||||||
from .compute.ethernet_hub_nodes import EthernetHubCreate, EthernetHubUpdate, EthernetHub
|
from .compute.ethernet_hub_nodes import EthernetHubCreate, EthernetHubUpdate, EthernetHub
|
||||||
from .compute.ethernet_switch_nodes import EthernetSwitchCreate, EthernetSwitchUpdate, EthernetSwitch
|
from .compute.ethernet_switch_nodes import EthernetSwitchCreate, EthernetSwitchUpdate, EthernetSwitch
|
||||||
from .compute.frame_relay_switch_nodes import FrameRelaySwitchCreate, FrameRelaySwitchUpdate, FrameRelaySwitch
|
from .compute.frame_relay_switch_nodes import FrameRelaySwitchCreate, FrameRelaySwitchUpdate, FrameRelaySwitch
|
||||||
from .compute.qemu_nodes import QemuCreate, QemuUpdate, QemuImageCreate, QemuImageUpdate, QemuDiskResize, Qemu
|
from .compute.qemu_nodes import QemuCreate, QemuUpdate, Qemu
|
||||||
from .compute.iou_nodes import IOUCreate, IOUUpdate, IOUStart, IOU
|
from .compute.iou_nodes import IOUCreate, IOUUpdate, IOUStart, IOU
|
||||||
from .compute.nat_nodes import NATCreate, NATUpdate, NAT
|
from .compute.nat_nodes import NATCreate, NATUpdate, NAT
|
||||||
from .compute.vpcs_nodes import VPCSCreate, VPCSUpdate, VPCS
|
from .compute.vpcs_nodes import VPCSCreate, VPCSUpdate, VPCS
|
||||||
from .compute.vmware_nodes import VMwareCreate, VMwareUpdate, VMware
|
from .compute.vmware_nodes import VMwareCreate, VMwareUpdate, VMware
|
||||||
from .compute.virtualbox_nodes import VirtualBoxCreate, VirtualBoxUpdate, VirtualBox
|
from .compute.virtualbox_nodes import VirtualBoxCreate, VirtualBoxUpdate, VirtualBox
|
||||||
|
|
||||||
|
# Schemas for both controller and compute
|
||||||
|
from .qemu_disk_image import QemuDiskImageCreate, QemuDiskImageUpdate
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from pydantic import BaseModel, Field
|
from pydantic import BaseModel, Field
|
||||||
from typing import Optional, Union
|
from typing import Optional
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
@ -166,15 +166,19 @@ class QemuBase(BaseModel):
|
|||||||
aux: Optional[int] = Field(None, gt=0, le=65535, description="Auxiliary console TCP port")
|
aux: Optional[int] = Field(None, gt=0, le=65535, description="Auxiliary console TCP port")
|
||||||
aux_type: Optional[QemuConsoleType] = Field(None, description="Auxiliary console type")
|
aux_type: Optional[QemuConsoleType] = Field(None, description="Auxiliary console type")
|
||||||
hda_disk_image: Optional[str] = Field(None, description="QEMU hda disk image path")
|
hda_disk_image: Optional[str] = Field(None, description="QEMU hda disk image path")
|
||||||
|
hda_disk_image_backed: Optional[str] = Field(None, description="QEMU hda backed disk image path")
|
||||||
hda_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hda disk image checksum")
|
hda_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hda disk image checksum")
|
||||||
hda_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hda interface")
|
hda_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hda interface")
|
||||||
hdb_disk_image: Optional[str] = Field(None, description="QEMU hdb disk image path")
|
hdb_disk_image: Optional[str] = Field(None, description="QEMU hdb disk image path")
|
||||||
|
hdb_disk_image_backed: Optional[str] = Field(None, description="QEMU hdb backed disk image path")
|
||||||
hdb_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdb disk image checksum")
|
hdb_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdb disk image checksum")
|
||||||
hdb_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdb interface")
|
hdb_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdb interface")
|
||||||
hdc_disk_image: Optional[str] = Field(None, description="QEMU hdc disk image path")
|
hdc_disk_image: Optional[str] = Field(None, description="QEMU hdc disk image path")
|
||||||
|
hdc_disk_image_backed: Optional[str] = Field(None, description="QEMU hdc backed disk image path")
|
||||||
hdc_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdc disk image checksum")
|
hdc_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdc disk image checksum")
|
||||||
hdc_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdc interface")
|
hdc_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdc interface")
|
||||||
hdd_disk_image: Optional[str] = Field(None, description="QEMU hdd disk image path")
|
hdd_disk_image: Optional[str] = Field(None, description="QEMU hdd disk image path")
|
||||||
|
hdd_disk_image_backed: Optional[str] = Field(None, description="QEMU hdd backed disk image path")
|
||||||
hdd_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdd disk image checksum")
|
hdd_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdd disk image checksum")
|
||||||
hdd_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdd interface")
|
hdd_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdd interface")
|
||||||
cdrom_image: Optional[str] = Field(None, description="QEMU cdrom image path")
|
cdrom_image: Optional[str] = Field(None, description="QEMU cdrom image path")
|
||||||
@ -232,113 +236,7 @@ class Qemu(QemuBase):
|
|||||||
status: NodeStatus = Field(..., description="Container status (read only)")
|
status: NodeStatus = Field(..., description="Container status (read only)")
|
||||||
|
|
||||||
|
|
||||||
class QemuDriveName(str, Enum):
|
|
||||||
"""
|
|
||||||
Supported Qemu drive names.
|
|
||||||
"""
|
|
||||||
|
|
||||||
hda = "hda"
|
|
||||||
hdb = "hdb"
|
|
||||||
hdc = "hdc"
|
|
||||||
hdd = "hdd"
|
|
||||||
|
|
||||||
|
|
||||||
class QemuDiskResize(BaseModel):
|
|
||||||
"""
|
|
||||||
Properties to resize a Qemu disk.
|
|
||||||
"""
|
|
||||||
|
|
||||||
drive_name: QemuDriveName = Field(..., description="Qemu drive name")
|
|
||||||
extend: int = Field(..., description="Number of Megabytes to extend the image")
|
|
||||||
|
|
||||||
|
|
||||||
class QemuBinaryPath(BaseModel):
|
class QemuBinaryPath(BaseModel):
|
||||||
|
|
||||||
path: str
|
path: str
|
||||||
version: str
|
version: str
|
||||||
|
|
||||||
|
|
||||||
class QemuImageFormat(str, Enum):
|
|
||||||
"""
|
|
||||||
Supported Qemu image formats.
|
|
||||||
"""
|
|
||||||
|
|
||||||
qcow2 = "qcow2"
|
|
||||||
qcow = "qcow"
|
|
||||||
vpc = "vpc"
|
|
||||||
vdi = "vdi"
|
|
||||||
vdmk = "vdmk"
|
|
||||||
raw = "raw"
|
|
||||||
|
|
||||||
|
|
||||||
class QemuImagePreallocation(str, Enum):
|
|
||||||
"""
|
|
||||||
Supported Qemu image preallocation options.
|
|
||||||
"""
|
|
||||||
|
|
||||||
off = "off"
|
|
||||||
metadata = "metadata"
|
|
||||||
falloc = "falloc"
|
|
||||||
full = "full"
|
|
||||||
|
|
||||||
|
|
||||||
class QemuImageOnOff(str, Enum):
|
|
||||||
"""
|
|
||||||
Supported Qemu image on/off options.
|
|
||||||
"""
|
|
||||||
|
|
||||||
on = "off"
|
|
||||||
off = "off"
|
|
||||||
|
|
||||||
|
|
||||||
class QemuImageSubformat(str, Enum):
|
|
||||||
"""
|
|
||||||
Supported Qemu image preallocation options.
|
|
||||||
"""
|
|
||||||
|
|
||||||
dynamic = "dynamic"
|
|
||||||
fixed = "fixed"
|
|
||||||
stream_optimized = "streamOptimized"
|
|
||||||
two_gb_max_extent_sparse = "twoGbMaxExtentSparse"
|
|
||||||
two_gb_max_extent_flat = "twoGbMaxExtentFlat"
|
|
||||||
monolithic_sparse = "monolithicSparse"
|
|
||||||
monolithic_flat = "monolithicFlat"
|
|
||||||
|
|
||||||
|
|
||||||
class QemuImageAdapterType(str, Enum):
|
|
||||||
"""
|
|
||||||
Supported Qemu image on/off options.
|
|
||||||
"""
|
|
||||||
|
|
||||||
ide = "ide"
|
|
||||||
lsilogic = "lsilogic"
|
|
||||||
buslogic = "buslogic"
|
|
||||||
legacy_esx = "legacyESX"
|
|
||||||
|
|
||||||
|
|
||||||
class QemuImageBase(BaseModel):
|
|
||||||
|
|
||||||
qemu_img: str = Field(..., description="Path to the qemu-img binary")
|
|
||||||
path: str = Field(..., description="Absolute or relative path of the image")
|
|
||||||
format: QemuImageFormat = Field(..., description="Image format type")
|
|
||||||
size: int = Field(..., description="Image size in Megabytes")
|
|
||||||
preallocation: Optional[QemuImagePreallocation]
|
|
||||||
cluster_size: Optional[int]
|
|
||||||
refcount_bits: Optional[int]
|
|
||||||
lazy_refcounts: Optional[QemuImageOnOff]
|
|
||||||
subformat: Optional[QemuImageSubformat]
|
|
||||||
static: Optional[QemuImageOnOff]
|
|
||||||
zeroed_grain: Optional[QemuImageOnOff]
|
|
||||||
adapter_type: Optional[QemuImageAdapterType]
|
|
||||||
|
|
||||||
|
|
||||||
class QemuImageCreate(QemuImageBase):
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class QemuImageUpdate(QemuImageBase):
|
|
||||||
|
|
||||||
format: Optional[QemuImageFormat] = Field(None, description="Image format type")
|
|
||||||
size: Optional[int] = Field(None, description="Image size in Megabytes")
|
|
||||||
extend: Optional[int] = Field(None, description="Number of Megabytes to extend the image")
|
|
||||||
|
@ -102,3 +102,15 @@ class ProjectFile(BaseModel):
|
|||||||
|
|
||||||
path: str = Field(..., description="File path")
|
path: str = Field(..., description="File path")
|
||||||
md5sum: str = Field(..., description="File checksum")
|
md5sum: str = Field(..., description="File checksum")
|
||||||
|
|
||||||
|
|
||||||
|
class ProjectCompression(str, Enum):
|
||||||
|
"""
|
||||||
|
Supported project compression.
|
||||||
|
"""
|
||||||
|
|
||||||
|
none = "none"
|
||||||
|
zip = "zip"
|
||||||
|
bzip2 = "bzip2"
|
||||||
|
lzma = "lzma"
|
||||||
|
zstd = "zstd"
|
||||||
|
103
gns3server/schemas/qemu_disk_image.py
Normal file
103
gns3server/schemas/qemu_disk_image.py
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
#
|
||||||
|
# Copyright (C) 2022 GNS3 Technologies Inc.
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
from typing import Optional
|
||||||
|
from enum import Enum
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImageFormat(str, Enum):
|
||||||
|
"""
|
||||||
|
Supported Qemu disk image formats.
|
||||||
|
"""
|
||||||
|
|
||||||
|
qcow2 = "qcow2"
|
||||||
|
qcow = "qcow"
|
||||||
|
vpc = "vpc"
|
||||||
|
vdi = "vdi"
|
||||||
|
vdmk = "vdmk"
|
||||||
|
raw = "raw"
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImagePreallocation(str, Enum):
|
||||||
|
"""
|
||||||
|
Supported Qemu disk image pre-allocation options.
|
||||||
|
"""
|
||||||
|
|
||||||
|
off = "off"
|
||||||
|
metadata = "metadata"
|
||||||
|
falloc = "falloc"
|
||||||
|
full = "full"
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImageOnOff(str, Enum):
|
||||||
|
"""
|
||||||
|
Supported Qemu image on/off options.
|
||||||
|
"""
|
||||||
|
|
||||||
|
on = "on"
|
||||||
|
off = "off"
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImageSubformat(str, Enum):
|
||||||
|
"""
|
||||||
|
Supported Qemu disk image sub-format options.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dynamic = "dynamic"
|
||||||
|
fixed = "fixed"
|
||||||
|
stream_optimized = "streamOptimized"
|
||||||
|
two_gb_max_extent_sparse = "twoGbMaxExtentSparse"
|
||||||
|
two_gb_max_extent_flat = "twoGbMaxExtentFlat"
|
||||||
|
monolithic_sparse = "monolithicSparse"
|
||||||
|
monolithic_flat = "monolithicFlat"
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImageAdapterType(str, Enum):
|
||||||
|
"""
|
||||||
|
Supported Qemu disk image on/off options.
|
||||||
|
"""
|
||||||
|
|
||||||
|
ide = "ide"
|
||||||
|
lsilogic = "lsilogic"
|
||||||
|
buslogic = "buslogic"
|
||||||
|
legacy_esx = "legacyESX"
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImageBase(BaseModel):
|
||||||
|
|
||||||
|
format: QemuDiskImageFormat = Field(..., description="Image format type")
|
||||||
|
size: int = Field(..., description="Image size in Megabytes")
|
||||||
|
preallocation: Optional[QemuDiskImagePreallocation]
|
||||||
|
cluster_size: Optional[int]
|
||||||
|
refcount_bits: Optional[int]
|
||||||
|
lazy_refcounts: Optional[QemuDiskImageOnOff]
|
||||||
|
subformat: Optional[QemuDiskImageSubformat]
|
||||||
|
static: Optional[QemuDiskImageOnOff]
|
||||||
|
zeroed_grain: Optional[QemuDiskImageOnOff]
|
||||||
|
adapter_type: Optional[QemuDiskImageAdapterType]
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImageCreate(QemuDiskImageBase):
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class QemuDiskImageUpdate(QemuDiskImageBase):
|
||||||
|
|
||||||
|
format: Optional[QemuDiskImageFormat] = Field(None, description="Image format type")
|
||||||
|
size: Optional[int] = Field(None, description="Image size in Megabytes")
|
||||||
|
extend: Optional[int] = Field(None, description="Number of Megabytes to extend the image")
|
@ -111,7 +111,7 @@ class Server:
|
|||||||
)
|
)
|
||||||
parser.add_argument("-q", "--quiet", default=False, action="store_true", help="do not show logs on stdout")
|
parser.add_argument("-q", "--quiet", default=False, action="store_true", help="do not show logs on stdout")
|
||||||
parser.add_argument("-d", "--debug", default=False, action="store_true", help="show debug logs")
|
parser.add_argument("-d", "--debug", default=False, action="store_true", help="show debug logs")
|
||||||
parser.add_argument("--logfile", help="send output to logfile instead of console")
|
parser.add_argument("--logfile", "--log", help="send output to logfile instead of console")
|
||||||
parser.add_argument("--logmaxsize", default=10000000, help="maximum logfile size in bytes (default is 10MB)")
|
parser.add_argument("--logmaxsize", default=10000000, help="maximum logfile size in bytes (default is 10MB)")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--logbackupcount", default=10, help="number of historical log files to keep (default is 10)"
|
"--logbackupcount", default=10, help="number of historical log files to keep (default is 10)"
|
||||||
@ -255,9 +255,6 @@ class Server:
|
|||||||
self._set_config_defaults_from_command_line(args)
|
self._set_config_defaults_from_command_line(args)
|
||||||
config = Config.instance().settings
|
config = Config.instance().settings
|
||||||
|
|
||||||
if config.Server.local:
|
|
||||||
log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem")
|
|
||||||
|
|
||||||
if not config.Server.compute_password.get_secret_value():
|
if not config.Server.compute_password.get_secret_value():
|
||||||
alphabet = string.ascii_letters + string.digits + string.punctuation
|
alphabet = string.ascii_letters + string.digits + string.punctuation
|
||||||
generated_password = ''.join(secrets.choice(alphabet) for _ in range(16))
|
generated_password = ''.join(secrets.choice(alphabet) for _ in range(16))
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,9 +1,10 @@
|
|||||||
GNS3 WebUI is web implementation of user interface for GNS3 software.
|
GNS3 WebUI is web implementation of user interface for GNS3 software.
|
||||||
|
|
||||||
Current version: 2.2.24
|
Current version: 2.2.32
|
||||||
|
|
||||||
Bug Fixes & enhancements
|
Bug Fixes & enhancements
|
||||||
- security fixes
|
- Fixed generated capture file is not valid
|
||||||
|
- Fixed Docker additional directories
|
||||||
|
|
||||||
Current version: 2020.4.0-beta.1
|
Current version: 2020.4.0-beta.1
|
||||||
|
|
||||||
|
@ -46,6 +46,6 @@
|
|||||||
|
|
||||||
gtag('config', 'G-5D6FZL9923');
|
gtag('config', 'G-5D6FZL9923');
|
||||||
</script>
|
</script>
|
||||||
<script src="runtime.445d8d501d6ed9a85ab9.js" defer></script><script src="polyfills-es5.4eb1fdd946638b823036.js" nomodule defer></script><script src="polyfills.519e27ac94450b1b7e67.js" defer></script><script src="main.e8a548933524bbeda197.js" defer></script>
|
<script src="runtime.91a209cf21f6fb848205.js" defer></script><script src="polyfills-es5.865074f5cd9a121111a2.js" nomodule defer></script><script src="polyfills.2f91a039d848e57ff02e.js" defer></script><script src="main.4b4883543cd4ccdf0202.js" defer></script>
|
||||||
|
|
||||||
</body></html>
|
</body></html>
|
File diff suppressed because one or more lines are too long
@ -1 +0,0 @@
|
|||||||
!function(){"use strict";var e,v={},g={};function n(e){var a=g[e];if(void 0!==a)return a.exports;var t=g[e]={id:e,loaded:!1,exports:{}};return v[e](t,t.exports,n),t.loaded=!0,t.exports}n.m=v,e=[],n.O=function(a,t,u,o){if(!t){var r=1/0;for(i=0;i<e.length;i++){t=e[i][0],u=e[i][1],o=e[i][2];for(var l=!0,f=0;f<t.length;f++)(!1&o||r>=o)&&Object.keys(n.O).every(function(b){return n.O[b](t[f])})?t.splice(f--,1):(l=!1,o<r&&(r=o));if(l){e.splice(i--,1);var s=u();void 0!==s&&(a=s)}}return a}o=o||0;for(var i=e.length;i>0&&e[i-1][2]>o;i--)e[i]=e[i-1];e[i]=[t,u,o]},n.n=function(e){var a=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(a,{a:a}),a},n.d=function(e,a){for(var t in a)n.o(a,t)&&!n.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:a[t]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce(function(a,t){return n.f[t](e,a),a},[]))},n.u=function(e){return e+".288b4de0ead3b7b9276b.js"},n.miniCssF=function(e){return"styles.f8555f2eecf8cf87f666.css"},n.hmd=function(e){return(e=Object.create(e)).children||(e.children=[]),Object.defineProperty(e,"exports",{enumerable:!0,set:function(){throw new Error("ES Modules may not assign module.exports or exports.*, Use ESM export syntax, instead: "+e.id)}}),e},n.o=function(e,a){return Object.prototype.hasOwnProperty.call(e,a)},function(){var e={},a="gns3-web-ui:";n.l=function(t,u,o,i){if(e[t])e[t].push(u);else{var r,l;if(void 0!==o)for(var f=document.getElementsByTagName("script"),s=0;s<f.length;s++){var c=f[s];if(c.getAttribute("src")==t||c.getAttribute("data-webpack")==a+o){r=c;break}}r||(l=!0,(r=document.createElement("script")).charset="utf-8",r.timeout=120,n.nc&&r.setAttribute("nonce",n.nc),r.setAttribute("data-webpack",a+o),r.src=n.tu(t)),e[t]=[u];var d=function(h,b){r.onerror=r.onload=null,clearTimeout(p);var _=e[t];if(delete e[t],r.parentNode&&r.parentNode.removeChild(r),_&&_.forEach(function(m){return m(b)}),h)return h(b)},p=setTimeout(d.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=d.bind(null,r.onerror),r.onload=d.bind(null,r.onload),l&&document.head.appendChild(r)}}}(),n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},function(){var e;n.tu=function(a){return void 0===e&&(e={createScriptURL:function(t){return t}},"undefined"!=typeof trustedTypes&&trustedTypes.createPolicy&&(e=trustedTypes.createPolicy("angular#bundler",e))),e.createScriptURL(a)}}(),n.p="",function(){var e={666:0};n.f.j=function(u,o){var i=n.o(e,u)?e[u]:void 0;if(0!==i)if(i)o.push(i[2]);else if(666!=u){var r=new Promise(function(c,d){i=e[u]=[c,d]});o.push(i[2]=r);var l=n.p+n.u(u),f=new Error;n.l(l,function(c){if(n.o(e,u)&&(0!==(i=e[u])&&(e[u]=void 0),i)){var d=c&&("load"===c.type?"missing":c.type),p=c&&c.target&&c.target.src;f.message="Loading chunk "+u+" failed.\n("+d+": "+p+")",f.name="ChunkLoadError",f.type=d,f.request=p,i[1](f)}},"chunk-"+u,u)}else e[u]=0},n.O.j=function(u){return 0===e[u]};var a=function(u,o){var f,s,i=o[0],r=o[1],l=o[2],c=0;for(f in r)n.o(r,f)&&(n.m[f]=r[f]);if(l)var d=l(n);for(u&&u(o);c<i.length;c++)n.o(e,s=i[c])&&e[s]&&e[s][0](),e[i[c]]=0;return n.O(d)},t=self.webpackChunkgns3_web_ui=self.webpackChunkgns3_web_ui||[];t.forEach(a.bind(null,0)),t.push=a.bind(null,t.push.bind(t))}()}();
|
|
1
gns3server/static/web-ui/runtime.91a209cf21f6fb848205.js
Normal file
1
gns3server/static/web-ui/runtime.91a209cf21f6fb848205.js
Normal file
@ -0,0 +1 @@
|
|||||||
|
!function(){"use strict";var e,v={},g={};function n(e){var u=g[e];if(void 0!==u)return u.exports;var t=g[e]={id:e,loaded:!1,exports:{}};return v[e](t,t.exports,n),t.loaded=!0,t.exports}n.m=v,e=[],n.O=function(u,t,a,o){if(!t){var r=1/0;for(i=0;i<e.length;i++){t=e[i][0],a=e[i][1],o=e[i][2];for(var l=!0,f=0;f<t.length;f++)(!1&o||r>=o)&&Object.keys(n.O).every(function(b){return n.O[b](t[f])})?t.splice(f--,1):(l=!1,o<r&&(r=o));if(l){e.splice(i--,1);var s=a();void 0!==s&&(u=s)}}return u}o=o||0;for(var i=e.length;i>0&&e[i-1][2]>o;i--)e[i]=e[i-1];e[i]=[t,a,o]},n.n=function(e){var u=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(u,{a:u}),u},n.d=function(e,u){for(var t in u)n.o(u,t)&&!n.o(e,t)&&Object.defineProperty(e,t,{enumerable:!0,get:u[t]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce(function(u,t){return n.f[t](e,u),u},[]))},n.u=function(e){return e+".52bf50eec59e1bcb0895.js"},n.miniCssF=function(e){return"styles.f8555f2eecf8cf87f666.css"},n.hmd=function(e){return(e=Object.create(e)).children||(e.children=[]),Object.defineProperty(e,"exports",{enumerable:!0,set:function(){throw new Error("ES Modules may not assign module.exports or exports.*, Use ESM export syntax, instead: "+e.id)}}),e},n.o=function(e,u){return Object.prototype.hasOwnProperty.call(e,u)},function(){var e={},u="gns3-web-ui:";n.l=function(t,a,o,i){if(e[t])e[t].push(a);else{var r,l;if(void 0!==o)for(var f=document.getElementsByTagName("script"),s=0;s<f.length;s++){var c=f[s];if(c.getAttribute("src")==t||c.getAttribute("data-webpack")==u+o){r=c;break}}r||(l=!0,(r=document.createElement("script")).charset="utf-8",r.timeout=120,n.nc&&r.setAttribute("nonce",n.nc),r.setAttribute("data-webpack",u+o),r.src=n.tu(t)),e[t]=[a];var d=function(h,b){r.onerror=r.onload=null,clearTimeout(p);var _=e[t];if(delete e[t],r.parentNode&&r.parentNode.removeChild(r),_&&_.forEach(function(m){return m(b)}),h)return h(b)},p=setTimeout(d.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=d.bind(null,r.onerror),r.onload=d.bind(null,r.onload),l&&document.head.appendChild(r)}}}(),n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},function(){var e;n.tu=function(u){return void 0===e&&(e={createScriptURL:function(t){return t}},"undefined"!=typeof trustedTypes&&trustedTypes.createPolicy&&(e=trustedTypes.createPolicy("angular#bundler",e))),e.createScriptURL(u)}}(),n.p="",function(){var e={666:0};n.f.j=function(a,o){var i=n.o(e,a)?e[a]:void 0;if(0!==i)if(i)o.push(i[2]);else if(666!=a){var r=new Promise(function(c,d){i=e[a]=[c,d]});o.push(i[2]=r);var l=n.p+n.u(a),f=new Error;n.l(l,function(c){if(n.o(e,a)&&(0!==(i=e[a])&&(e[a]=void 0),i)){var d=c&&("load"===c.type?"missing":c.type),p=c&&c.target&&c.target.src;f.message="Loading chunk "+a+" failed.\n("+d+": "+p+")",f.name="ChunkLoadError",f.type=d,f.request=p,i[1](f)}},"chunk-"+a,a)}else e[a]=0},n.O.j=function(a){return 0===e[a]};var u=function(a,o){var f,s,i=o[0],r=o[1],l=o[2],c=0;for(f in r)n.o(r,f)&&(n.m[f]=r[f]);if(l)var d=l(n);for(a&&a(o);c<i.length;c++)n.o(e,s=i[c])&&e[s]&&e[s][0](),e[i[c]]=0;return n.O(d)},t=self.webpackChunkgns3_web_ui=self.webpackChunkgns3_web_ui||[];t.forEach(u.bind(null,0)),t.push=u.bind(null,t.push.bind(t))}()}();
|
@ -43,26 +43,38 @@ from zipfile import (
|
|||||||
stringEndArchive64Locator,
|
stringEndArchive64Locator,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ZIP_ZSTANDARD = 93 # zstandard is supported by WinZIP v24 and later, PowerArchiver 2021 and 7-Zip-zstd
|
||||||
|
ZSTANDARD_VERSION = 20
|
||||||
stringDataDescriptor = b"PK\x07\x08" # magic number for data descriptor
|
stringDataDescriptor = b"PK\x07\x08" # magic number for data descriptor
|
||||||
|
|
||||||
|
|
||||||
def _get_compressor(compress_type):
|
def _get_compressor(compress_type, compresslevel=None):
|
||||||
"""
|
"""
|
||||||
Return the compressor.
|
Return the compressor.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if compress_type == zipfile.ZIP_DEFLATED:
|
if compress_type == zipfile.ZIP_DEFLATED:
|
||||||
from zipfile import zlib
|
from zipfile import zlib
|
||||||
|
if compresslevel is not None:
|
||||||
|
return zlib.compressobj(compresslevel, zlib.DEFLATED, -15)
|
||||||
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
|
return zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
|
||||||
elif compress_type == zipfile.ZIP_BZIP2:
|
elif compress_type == zipfile.ZIP_BZIP2:
|
||||||
from zipfile import bz2
|
from zipfile import bz2
|
||||||
|
if compresslevel is not None:
|
||||||
|
return bz2.BZ2Compressor(compresslevel)
|
||||||
return bz2.BZ2Compressor()
|
return bz2.BZ2Compressor()
|
||||||
|
# compresslevel is ignored for ZIP_LZMA
|
||||||
elif compress_type == zipfile.ZIP_LZMA:
|
elif compress_type == zipfile.ZIP_LZMA:
|
||||||
from zipfile import LZMACompressor
|
from zipfile import LZMACompressor
|
||||||
|
|
||||||
return LZMACompressor()
|
return LZMACompressor()
|
||||||
|
elif compress_type == ZIP_ZSTANDARD:
|
||||||
|
import zstandard as zstd
|
||||||
|
if compresslevel is not None:
|
||||||
|
#params = zstd.ZstdCompressionParameters.from_level(compresslevel, threads=-1, enable_ldm=True, window_log=31)
|
||||||
|
#return zstd.ZstdCompressor(compression_params=params).compressobj()
|
||||||
|
return zstd.ZstdCompressor(level=compresslevel).compressobj()
|
||||||
|
return zstd.ZstdCompressor().compressobj()
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@ -129,7 +141,15 @@ class ZipInfo(zipfile.ZipInfo):
|
|||||||
|
|
||||||
|
|
||||||
class ZipFile(zipfile.ZipFile):
|
class ZipFile(zipfile.ZipFile):
|
||||||
def __init__(self, fileobj=None, mode="w", compression=zipfile.ZIP_STORED, allowZip64=True, chunksize=32768):
|
def __init__(
|
||||||
|
self,
|
||||||
|
fileobj=None,
|
||||||
|
mode="w",
|
||||||
|
compression=zipfile.ZIP_STORED,
|
||||||
|
allowZip64=True,
|
||||||
|
compresslevel=None,
|
||||||
|
chunksize=32768
|
||||||
|
):
|
||||||
"""Open the ZIP file with mode write "w"."""
|
"""Open the ZIP file with mode write "w"."""
|
||||||
|
|
||||||
if mode not in ("w",):
|
if mode not in ("w",):
|
||||||
@ -138,7 +158,13 @@ class ZipFile(zipfile.ZipFile):
|
|||||||
fileobj = PointerIO()
|
fileobj = PointerIO()
|
||||||
|
|
||||||
self._comment = b""
|
self._comment = b""
|
||||||
zipfile.ZipFile.__init__(self, fileobj, mode=mode, compression=compression, allowZip64=allowZip64)
|
zipfile.ZipFile.__init__(
|
||||||
|
self, fileobj,
|
||||||
|
mode=mode,
|
||||||
|
compression=compression,
|
||||||
|
compresslevel=compresslevel,
|
||||||
|
allowZip64=allowZip64
|
||||||
|
)
|
||||||
self._chunksize = chunksize
|
self._chunksize = chunksize
|
||||||
self.paths_to_write = []
|
self.paths_to_write = []
|
||||||
|
|
||||||
@ -195,23 +221,33 @@ class ZipFile(zipfile.ZipFile):
|
|||||||
for chunk in self._close():
|
for chunk in self._close():
|
||||||
yield chunk
|
yield chunk
|
||||||
|
|
||||||
def write(self, filename, arcname=None, compress_type=None):
|
def write(self, filename, arcname=None, compress_type=None, compresslevel=None):
|
||||||
"""
|
"""
|
||||||
Write a file to the archive under the name `arcname`.
|
Write a file to the archive under the name `arcname`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
kwargs = {"filename": filename, "arcname": arcname, "compress_type": compress_type}
|
kwargs = {
|
||||||
|
"filename": filename,
|
||||||
|
"arcname": arcname,
|
||||||
|
"compress_type": compress_type,
|
||||||
|
"compresslevel": compresslevel
|
||||||
|
}
|
||||||
self.paths_to_write.append(kwargs)
|
self.paths_to_write.append(kwargs)
|
||||||
|
|
||||||
def write_iter(self, arcname, iterable, compress_type=None):
|
def write_iter(self, arcname, iterable, compress_type=None, compresslevel=None):
|
||||||
"""
|
"""
|
||||||
Write the bytes iterable `iterable` to the archive under the name `arcname`.
|
Write the bytes iterable `iterable` to the archive under the name `arcname`.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
kwargs = {"arcname": arcname, "iterable": iterable, "compress_type": compress_type}
|
kwargs = {
|
||||||
|
"arcname": arcname,
|
||||||
|
"iterable": iterable,
|
||||||
|
"compress_type": compress_type,
|
||||||
|
"compresslevel": compresslevel
|
||||||
|
}
|
||||||
self.paths_to_write.append(kwargs)
|
self.paths_to_write.append(kwargs)
|
||||||
|
|
||||||
def writestr(self, arcname, data, compress_type=None):
|
def writestr(self, arcname, data, compress_type=None, compresslevel=None):
|
||||||
"""
|
"""
|
||||||
Writes a str into ZipFile by wrapping data as a generator
|
Writes a str into ZipFile by wrapping data as a generator
|
||||||
"""
|
"""
|
||||||
@ -219,9 +255,9 @@ class ZipFile(zipfile.ZipFile):
|
|||||||
def _iterable():
|
def _iterable():
|
||||||
yield data
|
yield data
|
||||||
|
|
||||||
return self.write_iter(arcname, _iterable(), compress_type=compress_type)
|
return self.write_iter(arcname, _iterable(), compress_type=compress_type, compresslevel=compresslevel)
|
||||||
|
|
||||||
async def _write(self, filename=None, iterable=None, arcname=None, compress_type=None):
|
async def _write(self, filename=None, iterable=None, arcname=None, compress_type=None, compresslevel=None):
|
||||||
"""
|
"""
|
||||||
Put the bytes from filename into the archive under the name `arcname`.
|
Put the bytes from filename into the archive under the name `arcname`.
|
||||||
"""
|
"""
|
||||||
@ -256,6 +292,11 @@ class ZipFile(zipfile.ZipFile):
|
|||||||
else:
|
else:
|
||||||
zinfo.compress_type = compress_type
|
zinfo.compress_type = compress_type
|
||||||
|
|
||||||
|
if compresslevel is None:
|
||||||
|
zinfo._compresslevel = self.compresslevel
|
||||||
|
else:
|
||||||
|
zinfo._compresslevel = compresslevel
|
||||||
|
|
||||||
if st:
|
if st:
|
||||||
zinfo.file_size = st[6]
|
zinfo.file_size = st[6]
|
||||||
else:
|
else:
|
||||||
@ -279,7 +320,7 @@ class ZipFile(zipfile.ZipFile):
|
|||||||
yield self.fp.write(zinfo.FileHeader(False))
|
yield self.fp.write(zinfo.FileHeader(False))
|
||||||
return
|
return
|
||||||
|
|
||||||
cmpr = _get_compressor(zinfo.compress_type)
|
cmpr = _get_compressor(zinfo.compress_type, zinfo._compresslevel)
|
||||||
|
|
||||||
# Must overwrite CRC and sizes with correct data later
|
# Must overwrite CRC and sizes with correct data later
|
||||||
zinfo.CRC = CRC = 0
|
zinfo.CRC = CRC = 0
|
||||||
@ -369,6 +410,8 @@ class ZipFile(zipfile.ZipFile):
|
|||||||
min_version = max(zipfile.BZIP2_VERSION, min_version)
|
min_version = max(zipfile.BZIP2_VERSION, min_version)
|
||||||
elif zinfo.compress_type == zipfile.ZIP_LZMA:
|
elif zinfo.compress_type == zipfile.ZIP_LZMA:
|
||||||
min_version = max(zipfile.LZMA_VERSION, min_version)
|
min_version = max(zipfile.LZMA_VERSION, min_version)
|
||||||
|
elif zinfo.compress_type == ZIP_ZSTANDARD:
|
||||||
|
min_version = max(ZSTANDARD_VERSION, min_version)
|
||||||
|
|
||||||
extract_version = max(min_version, zinfo.extract_version)
|
extract_version = max(min_version, zinfo.extract_version)
|
||||||
create_version = max(min_version, zinfo.create_version)
|
create_version = max(min_version, zinfo.create_version)
|
||||||
|
@ -20,19 +20,20 @@ import stat
|
|||||||
import aiofiles
|
import aiofiles
|
||||||
import shutil
|
import shutil
|
||||||
|
|
||||||
from typing import AsyncGenerator
|
from typing import List, AsyncGenerator
|
||||||
from ..config import Config
|
from ..config import Config
|
||||||
from . import force_unix_path
|
from . import force_unix_path
|
||||||
|
|
||||||
import gns3server.db.models as models
|
import gns3server.db.models as models
|
||||||
from gns3server.db.repositories.images import ImagesRepository
|
from gns3server.db.repositories.images import ImagesRepository
|
||||||
|
from gns3server.utils.asyncio import wait_run_in_executor
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def list_images(image_type):
|
async def list_images(image_type):
|
||||||
"""
|
"""
|
||||||
Scan directories for available image for a given type.
|
Scan directories for available image for a given type.
|
||||||
|
|
||||||
@ -59,7 +60,6 @@ def list_images(image_type):
|
|||||||
directory = os.path.normpath(directory)
|
directory = os.path.normpath(directory)
|
||||||
for root, _, filenames in _os_walk(directory, recurse=recurse):
|
for root, _, filenames in _os_walk(directory, recurse=recurse):
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
path = os.path.join(root, filename)
|
|
||||||
if filename not in files:
|
if filename not in files:
|
||||||
if filename.endswith(".md5sum") or filename.startswith("."):
|
if filename.endswith(".md5sum") or filename.startswith("."):
|
||||||
continue
|
continue
|
||||||
@ -92,7 +92,7 @@ def list_images(image_type):
|
|||||||
{
|
{
|
||||||
"filename": filename,
|
"filename": filename,
|
||||||
"path": force_unix_path(path),
|
"path": force_unix_path(path),
|
||||||
"md5sum": md5sum(os.path.join(root, filename)),
|
"md5sum": await wait_run_in_executor(md5sum, os.path.join(root, filename)),
|
||||||
"filesize": os.stat(os.path.join(root, filename)).st_size,
|
"filesize": os.stat(os.path.join(root, filename)).st_size,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -101,6 +101,59 @@ def list_images(image_type):
|
|||||||
return images
|
return images
|
||||||
|
|
||||||
|
|
||||||
|
async def read_image_info(path: str, expected_image_type: str = None) -> dict:
|
||||||
|
|
||||||
|
header_magic_len = 7
|
||||||
|
try:
|
||||||
|
async with aiofiles.open(path, "rb") as f:
|
||||||
|
image_header = await f.read(header_magic_len) # read the first 7 bytes of the file
|
||||||
|
if len(image_header) >= header_magic_len:
|
||||||
|
detected_image_type = check_valid_image_header(image_header)
|
||||||
|
if expected_image_type and detected_image_type != expected_image_type:
|
||||||
|
raise InvalidImageError(f"Detected image type for '{path}' is {detected_image_type}, "
|
||||||
|
f"expected type is {expected_image_type}")
|
||||||
|
else:
|
||||||
|
raise InvalidImageError(f"Image '{path}' is too small to be valid")
|
||||||
|
except OSError as e:
|
||||||
|
raise InvalidImageError(f"Cannot read image '{path}': {e}")
|
||||||
|
|
||||||
|
image_info = {
|
||||||
|
"image_name": os.path.basename(path),
|
||||||
|
"image_type": detected_image_type,
|
||||||
|
"image_size": os.stat(path).st_size,
|
||||||
|
"path": path,
|
||||||
|
"checksum": await wait_run_in_executor(md5sum, path, cache_to_md5file=False),
|
||||||
|
"checksum_algorithm": "md5",
|
||||||
|
}
|
||||||
|
return image_info
|
||||||
|
|
||||||
|
|
||||||
|
async def discover_images(image_type: str, skip_image_paths: list = None) -> List[dict]:
|
||||||
|
"""
|
||||||
|
Scan directories for available images
|
||||||
|
"""
|
||||||
|
|
||||||
|
files = set()
|
||||||
|
images = []
|
||||||
|
|
||||||
|
for directory in images_directories(image_type):
|
||||||
|
for root, _, filenames in os.walk(os.path.normpath(directory)):
|
||||||
|
for filename in filenames:
|
||||||
|
if filename.endswith(".md5sum") or filename.startswith("."):
|
||||||
|
continue
|
||||||
|
path = os.path.join(root, filename)
|
||||||
|
if not os.path.isfile(path) or skip_image_paths and path in skip_image_paths or path in files:
|
||||||
|
continue
|
||||||
|
files.add(path)
|
||||||
|
|
||||||
|
try:
|
||||||
|
images.append(await read_image_info(path, image_type))
|
||||||
|
except InvalidImageError as e:
|
||||||
|
log.debug(str(e))
|
||||||
|
continue
|
||||||
|
return images
|
||||||
|
|
||||||
|
|
||||||
def _os_walk(directory, recurse=True, **kwargs):
|
def _os_walk(directory, recurse=True, **kwargs):
|
||||||
"""
|
"""
|
||||||
Work like os.walk but if recurse is False just list current directory
|
Work like os.walk but if recurse is False just list current directory
|
||||||
@ -133,18 +186,18 @@ def default_images_directory(image_type):
|
|||||||
raise NotImplementedError(f"%s node type is not supported", image_type)
|
raise NotImplementedError(f"%s node type is not supported", image_type)
|
||||||
|
|
||||||
|
|
||||||
def images_directories(type):
|
def images_directories(image_type):
|
||||||
"""
|
"""
|
||||||
Return all directories where we will look for images
|
Return all directories where we will look for images
|
||||||
by priority
|
by priority
|
||||||
|
|
||||||
:param type: Type of emulator
|
:param image_type: Type of emulator
|
||||||
"""
|
"""
|
||||||
|
|
||||||
server_config = Config.instance().settings.Server
|
server_config = Config.instance().settings.Server
|
||||||
paths = []
|
paths = []
|
||||||
img_dir = os.path.expanduser(server_config.images_path)
|
img_dir = os.path.expanduser(server_config.images_path)
|
||||||
type_img_directory = default_images_directory(type)
|
type_img_directory = default_images_directory(image_type)
|
||||||
try:
|
try:
|
||||||
os.makedirs(type_img_directory, exist_ok=True)
|
os.makedirs(type_img_directory, exist_ok=True)
|
||||||
paths.append(type_img_directory)
|
paths.append(type_img_directory)
|
||||||
@ -158,11 +211,12 @@ def images_directories(type):
|
|||||||
return [force_unix_path(p) for p in paths if os.path.exists(p)]
|
return [force_unix_path(p) for p in paths if os.path.exists(p)]
|
||||||
|
|
||||||
|
|
||||||
def md5sum(path, stopped_event=None):
|
def md5sum(path, working_dir=None, stopped_event=None, cache_to_md5file=True):
|
||||||
"""
|
"""
|
||||||
Return the md5sum of an image and cache it on disk
|
Return the md5sum of an image and cache it on disk
|
||||||
|
|
||||||
:param path: Path to the image
|
:param path: Path to the image
|
||||||
|
:param workdir_dir: where to store .md5sum files
|
||||||
:param stopped_event: In case you execute this function on thread and would like to have possibility
|
:param stopped_event: In case you execute this function on thread and would like to have possibility
|
||||||
to cancel operation pass the `threading.Event`
|
to cancel operation pass the `threading.Event`
|
||||||
:returns: Digest of the image
|
:returns: Digest of the image
|
||||||
@ -171,8 +225,13 @@ def md5sum(path, stopped_event=None):
|
|||||||
if path is None or len(path) == 0 or not os.path.exists(path):
|
if path is None or len(path) == 0 or not os.path.exists(path):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
if working_dir:
|
||||||
|
md5sum_file = os.path.join(working_dir, os.path.basename(path) + ".md5sum")
|
||||||
|
else:
|
||||||
|
md5sum_file = path + ".md5sum"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(path + ".md5sum") as f:
|
with open(md5sum_file) as f:
|
||||||
md5 = f.read().strip()
|
md5 = f.read().strip()
|
||||||
if len(md5) == 32:
|
if len(md5) == 32:
|
||||||
return md5
|
return md5
|
||||||
@ -187,7 +246,7 @@ def md5sum(path, stopped_event=None):
|
|||||||
if stopped_event is not None and stopped_event.is_set():
|
if stopped_event is not None and stopped_event.is_set():
|
||||||
log.error(f"MD5 sum calculation of `{path}` has stopped due to cancellation")
|
log.error(f"MD5 sum calculation of `{path}` has stopped due to cancellation")
|
||||||
return
|
return
|
||||||
buf = f.read(128)
|
buf = f.read(1024)
|
||||||
if not buf:
|
if not buf:
|
||||||
break
|
break
|
||||||
m.update(buf)
|
m.update(buf)
|
||||||
@ -196,8 +255,9 @@ def md5sum(path, stopped_event=None):
|
|||||||
log.error("Can't create digest of %s: %s", path, str(e))
|
log.error("Can't create digest of %s: %s", path, str(e))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
if cache_to_md5file:
|
||||||
try:
|
try:
|
||||||
with open(f"{path}.md5sum", "w+") as f:
|
with open(md5sum_file, "w+") as f:
|
||||||
f.write(digest)
|
f.write(digest)
|
||||||
except OSError as e:
|
except OSError as e:
|
||||||
log.error("Can't write digest of %s: %s", path, str(e))
|
log.error("Can't write digest of %s: %s", path, str(e))
|
||||||
@ -231,10 +291,11 @@ def check_valid_image_header(data: bytes) -> str:
|
|||||||
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
# for IOS images: file must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
|
||||||
return "ios"
|
return "ios"
|
||||||
elif data[:7] == b'\x7fELF\x01\x01\x01' or data[:7] == b'\x7fELF\x02\x01\x01':
|
elif data[:7] == b'\x7fELF\x01\x01\x01' or data[:7] == b'\x7fELF\x02\x01\x01':
|
||||||
# for IOU images file must start with the ELF magic number, be 32-bit or 64-bit, little endian and
|
# for IOU images: file must start with the ELF magic number, be 32-bit or 64-bit, little endian and
|
||||||
# have an ELF version of 1 (normal IOS images are big endian!)
|
# have an ELF version of 1 (normal IOS images are big endian!)
|
||||||
return "iou"
|
return "iou"
|
||||||
elif data[:4] != b'QFI\xfb' or data[:4] != b'KDMV':
|
elif data[:4] == b'QFI\xfb' or data[:4] == b'KDMV':
|
||||||
|
# for Qemy images: file must be QCOW2 or VMDK
|
||||||
return "qemu"
|
return "qemu"
|
||||||
else:
|
else:
|
||||||
raise InvalidImageError("Could not detect image type, please make sure it is a valid image")
|
raise InvalidImageError("Could not detect image type, please make sure it is a valid image")
|
||||||
@ -274,13 +335,23 @@ async def write_image(
|
|||||||
if duplicate_image and os.path.dirname(duplicate_image.path) == os.path.dirname(image_path):
|
if duplicate_image and os.path.dirname(duplicate_image.path) == os.path.dirname(image_path):
|
||||||
raise InvalidImageError(f"Image {duplicate_image.filename} with "
|
raise InvalidImageError(f"Image {duplicate_image.filename} with "
|
||||||
f"same checksum already exists in the same directory")
|
f"same checksum already exists in the same directory")
|
||||||
except InvalidImageError:
|
|
||||||
os.remove(tmp_path)
|
|
||||||
raise
|
|
||||||
os.chmod(tmp_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
|
||||||
if not image_dir:
|
if not image_dir:
|
||||||
directory = default_images_directory(image_type)
|
directory = default_images_directory(image_type)
|
||||||
os.makedirs(directory, exist_ok=True)
|
os.makedirs(directory, exist_ok=True)
|
||||||
image_path = os.path.abspath(os.path.join(directory, image_filename))
|
image_path = os.path.abspath(os.path.join(directory, image_filename))
|
||||||
shutil.move(tmp_path, image_path)
|
shutil.move(tmp_path, image_path)
|
||||||
return await images_repo.add_image(image_name, image_type, image_size, image_path, checksum, checksum_algorithm="md5")
|
os.chmod(image_path, stat.S_IWRITE | stat.S_IREAD | stat.S_IEXEC)
|
||||||
|
finally:
|
||||||
|
try:
|
||||||
|
os.remove(tmp_path)
|
||||||
|
except OSError:
|
||||||
|
log.warning(f"Could not remove '{tmp_path}'")
|
||||||
|
|
||||||
|
return await images_repo.add_image(
|
||||||
|
image_name,
|
||||||
|
image_type,
|
||||||
|
image_size,
|
||||||
|
image_path,
|
||||||
|
checksum,
|
||||||
|
checksum_algorithm="md5"
|
||||||
|
)
|
||||||
|
@ -60,8 +60,7 @@ def check_path_allowed(path: str):
|
|||||||
if len(os.path.commonprefix([project_directory, path])) == len(project_directory):
|
if len(os.path.commonprefix([project_directory, path])) == len(project_directory):
|
||||||
return
|
return
|
||||||
|
|
||||||
if Config.instance().settings.Server.local is False:
|
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"The path {path} is not allowed")
|
||||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="The path is not allowed")
|
|
||||||
|
|
||||||
|
|
||||||
def get_mountpoint(path: str):
|
def get_mountpoint(path: str):
|
||||||
|
10
gns3server/utils/zipfile_zstd/__init__.py
Normal file
10
gns3server/utils/zipfile_zstd/__init__.py
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
|
||||||
|
# NOTE: this patches the standard zipfile module
|
||||||
|
from . import _zipfile
|
||||||
|
|
||||||
|
from zipfile import *
|
||||||
|
from zipfile import (
|
||||||
|
ZIP_ZSTANDARD,
|
||||||
|
ZSTANDARD_VERSION,
|
||||||
|
)
|
||||||
|
|
20
gns3server/utils/zipfile_zstd/_patcher.py
Normal file
20
gns3server/utils/zipfile_zstd/_patcher.py
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
import functools
|
||||||
|
|
||||||
|
|
||||||
|
class patch:
|
||||||
|
|
||||||
|
originals = {}
|
||||||
|
|
||||||
|
def __init__(self, host, name):
|
||||||
|
self.host = host
|
||||||
|
self.name = name
|
||||||
|
|
||||||
|
def __call__(self, func):
|
||||||
|
original = getattr(self.host, self.name)
|
||||||
|
self.originals[self.name] = original
|
||||||
|
|
||||||
|
functools.update_wrapper(func, original)
|
||||||
|
setattr(self.host, self.name, func)
|
||||||
|
|
||||||
|
return func
|
||||||
|
|
64
gns3server/utils/zipfile_zstd/_zipfile.py
Normal file
64
gns3server/utils/zipfile_zstd/_zipfile.py
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
import zipfile
|
||||||
|
import zstandard as zstd
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
from ._patcher import patch
|
||||||
|
|
||||||
|
|
||||||
|
zipfile.ZIP_ZSTANDARD = 93
|
||||||
|
zipfile.compressor_names[zipfile.ZIP_ZSTANDARD] = 'zstandard'
|
||||||
|
zipfile.ZSTANDARD_VERSION = 20
|
||||||
|
|
||||||
|
|
||||||
|
@patch(zipfile, '_check_compression')
|
||||||
|
def zstd_check_compression(compression):
|
||||||
|
if compression == zipfile.ZIP_ZSTANDARD:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
patch.originals['_check_compression'](compression)
|
||||||
|
|
||||||
|
|
||||||
|
class ZstdDecompressObjWrapper:
|
||||||
|
def __init__(self, o):
|
||||||
|
self.o = o
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
if attr == 'eof':
|
||||||
|
return False
|
||||||
|
return getattr(self.o, attr)
|
||||||
|
|
||||||
|
|
||||||
|
@patch(zipfile, '_get_decompressor')
|
||||||
|
def zstd_get_decompressor(compress_type):
|
||||||
|
if compress_type == zipfile.ZIP_ZSTANDARD:
|
||||||
|
return ZstdDecompressObjWrapper(zstd.ZstdDecompressor(max_window_size=2147483648).decompressobj())
|
||||||
|
else:
|
||||||
|
return patch.originals['_get_decompressor'](compress_type)
|
||||||
|
|
||||||
|
|
||||||
|
if 'compresslevel' in inspect.signature(zipfile._get_compressor).parameters:
|
||||||
|
@patch(zipfile, '_get_compressor')
|
||||||
|
def zstd_get_compressor(compress_type, compresslevel=None):
|
||||||
|
if compress_type == zipfile.ZIP_ZSTANDARD:
|
||||||
|
if compresslevel is None:
|
||||||
|
compresslevel = 3
|
||||||
|
return zstd.ZstdCompressor(level=compresslevel, threads=12).compressobj()
|
||||||
|
else:
|
||||||
|
return patch.originals['_get_compressor'](compress_type, compresslevel=compresslevel)
|
||||||
|
else:
|
||||||
|
@patch(zipfile, '_get_compressor')
|
||||||
|
def zstd_get_compressor(compress_type, compresslevel=None):
|
||||||
|
if compress_type == zipfile.ZIP_ZSTANDARD:
|
||||||
|
if compresslevel is None:
|
||||||
|
compresslevel = 3
|
||||||
|
return zstd.ZstdCompressor(level=compresslevel, threads=12).compressobj()
|
||||||
|
else:
|
||||||
|
return patch.originals['_get_compressor'](compress_type)
|
||||||
|
|
||||||
|
|
||||||
|
@patch(zipfile.ZipInfo, 'FileHeader')
|
||||||
|
def zstd_FileHeader(self, zip64=None):
|
||||||
|
if self.compress_type == zipfile.ZIP_ZSTANDARD:
|
||||||
|
self.create_version = max(self.create_version, zipfile.ZSTANDARD_VERSION)
|
||||||
|
self.extract_version = max(self.extract_version, zipfile.ZSTANDARD_VERSION)
|
||||||
|
return patch.originals['FileHeader'](self, zip64=zip64)
|
@ -1,18 +1,20 @@
|
|||||||
uvicorn==0.17.6
|
uvicorn==0.17.6
|
||||||
fastapi==0.75.0
|
fastapi==0.78.0
|
||||||
python-multipart==0.0.5
|
python-multipart==0.0.5
|
||||||
websockets==10.2
|
websockets==10.3
|
||||||
aiohttp==3.8.1
|
aiohttp==3.8.1
|
||||||
async-timeout==4.0.2
|
async-timeout==4.0.2
|
||||||
aiofiles==0.8.0
|
aiofiles==0.8.0
|
||||||
Jinja2==3.0.3
|
Jinja2==3.0.3
|
||||||
sentry-sdk==1.5.7
|
sentry-sdk==1.5.12
|
||||||
psutil==5.9.0
|
psutil==5.9.1
|
||||||
distro==1.7.0
|
distro==1.7.0
|
||||||
py-cpuinfo==8.0.0
|
py-cpuinfo==8.0.0
|
||||||
sqlalchemy==1.4.32
|
sqlalchemy==1.4.36
|
||||||
aiosqlite===0.17.0
|
aiosqlite===0.17.0
|
||||||
passlib[bcrypt]==1.7.4
|
passlib[bcrypt]==1.7.4
|
||||||
python-jose==3.3.0
|
python-jose==3.3.0
|
||||||
email-validator==1.1.3
|
email-validator==1.2.1
|
||||||
|
watchfiles==0.14.1
|
||||||
|
zstandard==0.17.0
|
||||||
setuptools==60.6.0 # don't upgrade because of https://github.com/pypa/setuptools/issues/3084
|
setuptools==60.6.0 # don't upgrade because of https://github.com/pypa/setuptools/issues/3084
|
||||||
|
@ -76,7 +76,7 @@ if [ "$CUSTOM_REPO" = false ] ; then
|
|||||||
git fetch --tags
|
git fetch --tags
|
||||||
git pull
|
git pull
|
||||||
|
|
||||||
if [[ ! -z "$TAG" ]]
|
if [[ -n "$TAG" ]]
|
||||||
then
|
then
|
||||||
echo "Switching to tag: ${TAG}"
|
echo "Switching to tag: ${TAG}"
|
||||||
git checkout "tags/${TAG}"
|
git checkout "tags/${TAG}"
|
||||||
|
2
setup.py
2
setup.py
@ -86,7 +86,7 @@ setup(
|
|||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
zip_safe=False,
|
zip_safe=False,
|
||||||
platforms="any",
|
platforms="any",
|
||||||
python_requires='>=3.6.0',
|
python_requires='>=3.7.0',
|
||||||
setup_requires=["setuptools>=17.1"],
|
setup_requires=["setuptools>=17.1"],
|
||||||
classifiers=[
|
classifiers=[
|
||||||
"Development Status :: 5 - Production/Stable",
|
"Development Status :: 5 - Production/Stable",
|
||||||
|
@ -45,7 +45,7 @@ async def test_version_output(app: FastAPI, compute_client: AsyncClient) -> None
|
|||||||
|
|
||||||
response = await compute_client.get(app.url_path_for("compute:compute_version"))
|
response = await compute_client.get(app.url_path_for("compute:compute_version"))
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
assert response.json() == {'local': True, 'version': __version__}
|
assert response.json() == {'version': __version__}
|
||||||
|
|
||||||
|
|
||||||
async def test_compute_authentication(app: FastAPI, compute_client: AsyncClient) -> None:
|
async def test_compute_authentication(app: FastAPI, compute_client: AsyncClient) -> None:
|
||||||
|
@ -36,35 +36,13 @@ def base_params(tmpdir) -> dict:
|
|||||||
|
|
||||||
params = {
|
params = {
|
||||||
"name": "test",
|
"name": "test",
|
||||||
"path": str(tmpdir),
|
|
||||||
"project_id": str(uuid.uuid4())
|
"project_id": str(uuid.uuid4())
|
||||||
}
|
}
|
||||||
return params
|
return params
|
||||||
|
|
||||||
|
|
||||||
async def test_create_project_with_path(app: FastAPI, compute_client: AsyncClient, base_params: dict) -> None:
|
|
||||||
|
|
||||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_compute_project"), json=base_params)
|
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
|
||||||
assert response.json()["project_id"] == base_params["project_id"]
|
|
||||||
|
|
||||||
|
|
||||||
async def test_create_project_with_path_and_empty_variables(app: FastAPI,
|
|
||||||
compute_client: AsyncClient,
|
|
||||||
base_params: dict) -> None:
|
|
||||||
|
|
||||||
base_params["variables"] = None
|
|
||||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
|
||||||
|
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_compute_project"), json=base_params)
|
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
|
||||||
assert response.json()["project_id"] == base_params["project_id"]
|
|
||||||
|
|
||||||
|
|
||||||
async def test_create_project_without_dir(app: FastAPI, compute_client: AsyncClient, base_params: dict) -> None:
|
async def test_create_project_without_dir(app: FastAPI, compute_client: AsyncClient, base_params: dict) -> None:
|
||||||
|
|
||||||
del base_params["path"]
|
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_compute_project"), json=base_params)
|
response = await compute_client.post(app.url_path_for("compute:create_compute_project"), json=base_params)
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
assert response.json()["project_id"] == base_params["project_id"]
|
assert response.json()["project_id"] == base_params["project_id"]
|
||||||
@ -158,9 +136,8 @@ async def test_close_project_invalid_uuid(app: FastAPI, compute_client: AsyncCli
|
|||||||
assert response.status_code == status.HTTP_404_NOT_FOUND
|
assert response.status_code == status.HTTP_404_NOT_FOUND
|
||||||
|
|
||||||
|
|
||||||
async def test_get_file(app: FastAPI, compute_client: AsyncClient, config, tmpdir) -> None:
|
async def test_get_file(app: FastAPI, compute_client: AsyncClient) -> None:
|
||||||
|
|
||||||
config.settings.Server.projects_path = str(tmpdir)
|
|
||||||
project = ProjectManager.instance().create_project(project_id="01010203-0405-0607-0809-0a0b0c0d0e0b")
|
project = ProjectManager.instance().create_project(project_id="01010203-0405-0607-0809-0a0b0c0d0e0b")
|
||||||
|
|
||||||
with open(os.path.join(project.path, "hello"), "w+") as f:
|
with open(os.path.join(project.path, "hello"), "w+") as f:
|
||||||
|
@ -17,13 +17,13 @@
|
|||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import stat
|
import stat
|
||||||
|
import shutil
|
||||||
|
|
||||||
from fastapi import FastAPI, status
|
from fastapi import FastAPI, status
|
||||||
from httpx import AsyncClient
|
from httpx import AsyncClient
|
||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
from gns3server.compute.project import Project
|
from gns3server.compute.project import Project
|
||||||
|
|
||||||
@ -52,6 +52,16 @@ def fake_qemu_vm(images_dir) -> str:
|
|||||||
return bin_path
|
return bin_path
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def fake_qemu_img_binary(tmpdir):
|
||||||
|
|
||||||
|
bin_path = str(tmpdir / "qemu-img")
|
||||||
|
with open(bin_path, "w+") as f:
|
||||||
|
f.write("1")
|
||||||
|
os.chmod(bin_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
||||||
|
return bin_path
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def base_params(tmpdir, fake_qemu_bin) -> dict:
|
def base_params(tmpdir, fake_qemu_bin) -> dict:
|
||||||
"""Return standard parameters"""
|
"""Return standard parameters"""
|
||||||
@ -60,9 +70,12 @@ def base_params(tmpdir, fake_qemu_bin) -> dict:
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
async def vm(app: FastAPI, compute_client: AsyncClient, compute_project: Project, base_params: dict) -> None:
|
async def qemu_vm(app: FastAPI, compute_client: AsyncClient, compute_project: Project, base_params: dict) -> None:
|
||||||
|
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_qemu_node", project_id=compute_project.id), json=base_params)
|
response = await compute_client.post(
|
||||||
|
app.url_path_for("compute:create_qemu_node", project_id=compute_project.id),
|
||||||
|
json=base_params
|
||||||
|
)
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
return response.json()
|
return response.json()
|
||||||
|
|
||||||
@ -116,99 +129,107 @@ async def test_qemu_create_with_params(app: FastAPI,
|
|||||||
assert response.json()["hda_disk_image_md5sum"] == "c4ca4238a0b923820dcc509a6f75849b"
|
assert response.json()["hda_disk_image_md5sum"] == "c4ca4238a0b923820dcc509a6f75849b"
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_create_with_project_file(app: FastAPI,
|
# async def test_qemu_create_with_project_file(app: FastAPI,
|
||||||
compute_client: AsyncClient,
|
# compute_client: AsyncClient,
|
||||||
compute_project: Project,
|
# compute_project: Project,
|
||||||
base_params: dict,
|
# base_params: dict,
|
||||||
fake_qemu_vm: str) -> None:
|
# fake_qemu_vm: str) -> None:
|
||||||
|
#
|
||||||
response = await compute_client.post(app.url_path_for("compute:write_compute_project_file",
|
# response = await compute_client.post(
|
||||||
project_id=compute_project.id,
|
# app.url_path_for("compute:write_compute_project_file", project_id=compute_project.id, file_path="hello.img"),
|
||||||
file_path="hello.img"), content=b"world")
|
# content=b"world"
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
# )
|
||||||
params = base_params
|
# assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
params["hda_disk_image"] = "hello.img"
|
# params = base_params
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_qemu_node", project_id=compute_project.id), json=params)
|
# params["hda_disk_image"] = "hello.img"
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
# response = await compute_client.post(
|
||||||
assert response.json()["hda_disk_image"] == "hello.img"
|
# app.url_path_for("compute:create_qemu_node", project_id=compute_project.id),
|
||||||
assert response.json()["hda_disk_image_md5sum"] == "7d793037a0760186574b0282f2f435e7"
|
# json=params
|
||||||
|
# )
|
||||||
|
# assert response.status_code == status.HTTP_201_CREATED
|
||||||
|
# assert response.json()["hda_disk_image"] == "hello.img"
|
||||||
|
# assert response.json()["hda_disk_image_md5sum"] == "7d793037a0760186574b0282f2f435e7"
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_get(app: FastAPI, compute_client: AsyncClient, compute_project: Project, vm: dict):
|
async def test_qemu_get(app: FastAPI, compute_client: AsyncClient, compute_project: Project, qemu_vm: dict):
|
||||||
|
|
||||||
response = await compute_client.get(app.url_path_for("compute:get_qemu_node", project_id=vm["project_id"], node_id=vm["node_id"]))
|
response = await compute_client.get(
|
||||||
|
app.url_path_for("compute:get_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"])
|
||||||
|
)
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
assert response.json()["name"] == "PC TEST 1"
|
assert response.json()["name"] == "PC TEST 1"
|
||||||
assert response.json()["project_id"] == compute_project.id
|
assert response.json()["project_id"] == compute_project.id
|
||||||
assert response.json()["node_directory"] == os.path.join(compute_project.path,
|
assert response.json()["node_directory"] == os.path.join(
|
||||||
|
compute_project.path,
|
||||||
"project-files",
|
"project-files",
|
||||||
"qemu",
|
"qemu",
|
||||||
vm["node_id"])
|
qemu_vm["node_id"]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_start(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_start(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.start", return_value=True) as mock:
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.start", return_value=True) as mock:
|
||||||
response = await compute_client.post(app.url_path_for("compute:start_qemu_node",
|
response = await compute_client.post(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:start_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"])
|
||||||
node_id=vm["node_id"]))
|
)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_stop(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_stop(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.stop", return_value=True) as mock:
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.stop", return_value=True) as mock:
|
||||||
response = await compute_client.post(app.url_path_for("compute:stop_qemu_node",
|
response = await compute_client.post(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:stop_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"])
|
||||||
node_id=vm["node_id"]))
|
)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_reload(app: FastAPI, compute_client: AsyncClient, vm) -> None:
|
async def test_qemu_reload(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.reload", return_value=True) as mock:
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.reload", return_value=True) as mock:
|
||||||
response = await compute_client.post(app.url_path_for("compute:reload_qemu_node",
|
response = await compute_client.post(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:reload_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"])
|
||||||
node_id=vm["node_id"]))
|
)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_suspend(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_suspend(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.suspend", return_value=True) as mock:
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.suspend", return_value=True) as mock:
|
||||||
response = await compute_client.post(app.url_path_for("compute:suspend_qemu_node",
|
response = await compute_client.post(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:suspend_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"])
|
||||||
node_id=vm["node_id"]))
|
)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_resume(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_resume(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.resume", return_value=True) as mock:
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.resume", return_value=True) as mock:
|
||||||
response = await compute_client.post(app.url_path_for("compute:resume_qemu_node",
|
response = await compute_client.post(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:resume_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"])
|
||||||
node_id=vm["node_id"]))
|
)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_delete(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_delete(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.Qemu.delete_node", return_value=True) as mock:
|
with asyncio_patch("gns3server.compute.qemu.Qemu.delete_node", return_value=True) as mock:
|
||||||
response = await compute_client.delete(app.url_path_for("compute:delete_qemu_node",
|
response = await compute_client.delete(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:delete_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"])
|
||||||
node_id=vm["node_id"]))
|
)
|
||||||
assert mock.called
|
assert mock.called
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_update(app: FastAPI,
|
async def test_qemu_update(app: FastAPI,
|
||||||
compute_client: AsyncClient,
|
compute_client: AsyncClient,
|
||||||
vm: dict,
|
qemu_vm: dict,
|
||||||
free_console_port: int,
|
free_console_port: int,
|
||||||
fake_qemu_vm: str) -> None:
|
fake_qemu_vm: str) -> None:
|
||||||
|
|
||||||
@ -219,9 +240,10 @@ async def test_qemu_update(app: FastAPI,
|
|||||||
"hdb_disk_image": "linux载.img"
|
"hdb_disk_image": "linux载.img"
|
||||||
}
|
}
|
||||||
|
|
||||||
response = await compute_client.put(app.url_path_for("compute:update_qemu_node",
|
response = await compute_client.put(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:update_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"]),
|
||||||
node_id=vm["node_id"]), json=params)
|
json=params
|
||||||
|
)
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
assert response.json()["name"] == "test"
|
assert response.json()["name"] == "test"
|
||||||
assert response.json()["console"] == free_console_port
|
assert response.json()["console"] == free_console_port
|
||||||
@ -229,7 +251,7 @@ async def test_qemu_update(app: FastAPI,
|
|||||||
assert response.json()["ram"] == 1024
|
assert response.json()["ram"] == 1024
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_nio_create_udp(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_nio_create_udp(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"type": "nio_udp",
|
"type": "nio_udp",
|
||||||
@ -239,21 +261,25 @@ async def test_qemu_nio_create_udp(app: FastAPI, compute_client: AsyncClient, vm
|
|||||||
}
|
}
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.add_ubridge_udp_connection"):
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.add_ubridge_udp_connection"):
|
||||||
await compute_client.put(app.url_path_for("compute:update_qemu_node",
|
await compute_client.put(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:update_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"]),
|
||||||
node_id=vm["node_id"]), json={"adapters": 2})
|
json={"adapters": 2}
|
||||||
|
)
|
||||||
|
|
||||||
url = app.url_path_for("compute:create_qemu_node_nio",
|
url = app.url_path_for(
|
||||||
project_id=vm["project_id"],
|
"compute:create_qemu_node_nio",
|
||||||
node_id=vm["node_id"],
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
adapter_number="1",
|
adapter_number="1",
|
||||||
port_number="0")
|
port_number="0"
|
||||||
|
)
|
||||||
response = await compute_client.post(url, json=params)
|
response = await compute_client.post(url, json=params)
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
assert response.json()["type"] == "nio_udp"
|
assert response.json()["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_nio_update_udp(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_nio_update_udp(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"type": "nio_udp",
|
"type": "nio_udp",
|
||||||
@ -262,31 +288,35 @@ async def test_qemu_nio_update_udp(app: FastAPI, compute_client: AsyncClient, vm
|
|||||||
"rhost": "127.0.0.1"
|
"rhost": "127.0.0.1"
|
||||||
}
|
}
|
||||||
|
|
||||||
await compute_client.put(app.url_path_for("compute:update_qemu_node",
|
await compute_client.put(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:update_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"]),
|
||||||
node_id=vm["node_id"]), json={"adapters": 2})
|
json={"adapters": 2}
|
||||||
|
)
|
||||||
|
|
||||||
url = app.url_path_for("compute:create_qemu_node_nio",
|
url = app.url_path_for(
|
||||||
project_id=vm["project_id"],
|
"compute:create_qemu_node_nio",
|
||||||
node_id=vm["node_id"],
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
adapter_number="1",
|
adapter_number="1",
|
||||||
port_number="0")
|
port_number="0"
|
||||||
|
)
|
||||||
|
|
||||||
await compute_client.post(url, json=params)
|
await compute_client.post(url, json=params)
|
||||||
|
|
||||||
params["filters"] = {}
|
params["filters"] = {}
|
||||||
|
|
||||||
url = app.url_path_for("compute:update_qemu_node_nio",
|
url = app.url_path_for(
|
||||||
project_id=vm["project_id"],
|
"compute:update_qemu_node_nio",
|
||||||
node_id=vm["node_id"],
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
adapter_number="1",
|
adapter_number="1",
|
||||||
port_number="0")
|
port_number="0"
|
||||||
|
)
|
||||||
response = await compute_client.put(url, json=params)
|
response = await compute_client.put(url, json=params)
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
assert response.json()["type"] == "nio_udp"
|
assert response.json()["type"] == "nio_udp"
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_delete_nio(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_delete_nio(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict) -> None:
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"type": "nio_udp",
|
"type": "nio_udp",
|
||||||
@ -296,27 +326,32 @@ async def test_qemu_delete_nio(app: FastAPI, compute_client: AsyncClient, vm: di
|
|||||||
}
|
}
|
||||||
|
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM._ubridge_send"):
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM._ubridge_send"):
|
||||||
await compute_client.put(app.url_path_for("compute:update_qemu_node",
|
await compute_client.put(
|
||||||
project_id=vm["project_id"],
|
app.url_path_for("compute:update_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"]),
|
||||||
node_id=vm["node_id"]), json={"adapters": 2})
|
json={"adapters": 2}
|
||||||
|
)
|
||||||
|
|
||||||
url = app.url_path_for("compute:create_qemu_node_nio",
|
url = app.url_path_for(
|
||||||
project_id=vm["project_id"],
|
"compute:create_qemu_node_nio",
|
||||||
node_id=vm["node_id"],
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
adapter_number="1",
|
adapter_number="1",
|
||||||
port_number="0")
|
port_number="0"
|
||||||
|
)
|
||||||
await compute_client.post(url, json=params)
|
await compute_client.post(url, json=params)
|
||||||
|
|
||||||
url = app.url_path_for("compute:delete_qemu_node_nio",
|
url = app.url_path_for(
|
||||||
project_id=vm["project_id"],
|
"compute:delete_qemu_node_nio",
|
||||||
node_id=vm["node_id"],
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
adapter_number="1",
|
adapter_number="1",
|
||||||
port_number="0")
|
port_number="0"
|
||||||
|
)
|
||||||
response = await compute_client.delete(url)
|
response = await compute_client.delete(url)
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_qemu_list_binaries(app: FastAPI, compute_client: AsyncClient, vm: dict) -> None:
|
async def test_qemu_list_binaries(app: FastAPI, compute_client: AsyncClient) -> None:
|
||||||
|
|
||||||
ret = [{"path": "/tmp/1", "version": "2.2.0"},
|
ret = [{"path": "/tmp/1", "version": "2.2.0"},
|
||||||
{"path": "/tmp/2", "version": "2.1.0"}]
|
{"path": "/tmp/2", "version": "2.1.0"}]
|
||||||
@ -417,57 +452,57 @@ async def test_upload_image_permission_denied(app: FastAPI, compute_client: Asyn
|
|||||||
assert response.status_code == status.HTTP_409_CONFLICT
|
assert response.status_code == status.HTTP_409_CONFLICT
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
# @pytest.mark.asyncio
|
||||||
async def test_create_img_relative(app: FastAPI, compute_client: AsyncClient):
|
# async def test_create_img_relative(app: FastAPI, compute_client: AsyncClient):
|
||||||
|
#
|
||||||
params = {
|
# params = {
|
||||||
"qemu_img": "/tmp/qemu-img",
|
# "qemu_img": "/tmp/qemu-img",
|
||||||
"path": "hda.qcow2",
|
# "path": "hda.qcow2",
|
||||||
"format": "qcow2",
|
# "format": "qcow2",
|
||||||
"preallocation": "metadata",
|
# "preallocation": "metadata",
|
||||||
"cluster_size": 64,
|
# "cluster_size": 64,
|
||||||
"refcount_bits": 12,
|
# "refcount_bits": 12,
|
||||||
"lazy_refcounts": "off",
|
# "lazy_refcounts": "off",
|
||||||
"size": 100
|
# "size": 100
|
||||||
}
|
# }
|
||||||
with asyncio_patch("gns3server.compute.Qemu.create_disk"):
|
# with asyncio_patch("gns3server.compute.Qemu.create_disk"):
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_qemu_image"), json=params)
|
# response = await compute_client.post(app.url_path_for("compute:create_qemu_image"), json=params)
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
# assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
#
|
||||||
|
#
|
||||||
async def test_create_img_absolute_non_local(app: FastAPI, compute_client: AsyncClient, config) -> None:
|
# async def test_create_img_absolute_non_local(app: FastAPI, compute_client: AsyncClient, config) -> None:
|
||||||
|
#
|
||||||
config.settings.Server.local = False
|
# config.settings.Server.local = False
|
||||||
params = {
|
# params = {
|
||||||
"qemu_img": "/tmp/qemu-img",
|
# "qemu_img": "/tmp/qemu-img",
|
||||||
"path": "/tmp/hda.qcow2",
|
# "path": "/tmp/hda.qcow2",
|
||||||
"format": "qcow2",
|
# "format": "qcow2",
|
||||||
"preallocation": "metadata",
|
# "preallocation": "metadata",
|
||||||
"cluster_size": 64,
|
# "cluster_size": 64,
|
||||||
"refcount_bits": 12,
|
# "refcount_bits": 12,
|
||||||
"lazy_refcounts": "off",
|
# "lazy_refcounts": "off",
|
||||||
"size": 100
|
# "size": 100
|
||||||
}
|
# }
|
||||||
with asyncio_patch("gns3server.compute.Qemu.create_disk"):
|
# with asyncio_patch("gns3server.compute.Qemu.create_disk"):
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_qemu_image"), json=params)
|
# response = await compute_client.post(app.url_path_for("compute:create_qemu_image"), json=params)
|
||||||
assert response.status_code == 403
|
# assert response.status_code == 403
|
||||||
|
#
|
||||||
|
#
|
||||||
async def test_create_img_absolute_local(app: FastAPI, compute_client: AsyncClient, config) -> None:
|
# async def test_create_img_absolute_local(app: FastAPI, compute_client: AsyncClient, config) -> None:
|
||||||
|
#
|
||||||
params = {
|
# params = {
|
||||||
"qemu_img": "/tmp/qemu-img",
|
# "qemu_img": "/tmp/qemu-img",
|
||||||
"path": "/tmp/hda.qcow2",
|
# "path": "/tmp/hda.qcow2",
|
||||||
"format": "qcow2",
|
# "format": "qcow2",
|
||||||
"preallocation": "metadata",
|
# "preallocation": "metadata",
|
||||||
"cluster_size": 64,
|
# "cluster_size": 64,
|
||||||
"refcount_bits": 12,
|
# "refcount_bits": 12,
|
||||||
"lazy_refcounts": "off",
|
# "lazy_refcounts": "off",
|
||||||
"size": 100
|
# "size": 100
|
||||||
}
|
# }
|
||||||
with asyncio_patch("gns3server.compute.Qemu.create_disk"):
|
# with asyncio_patch("gns3server.compute.Qemu.create_disk"):
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_qemu_image"), json=params)
|
# response = await compute_client.post(app.url_path_for("compute:create_qemu_image"), json=params)
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
# assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
async def test_capabilities(app: FastAPI, compute_client: AsyncClient) -> None:
|
async def test_capabilities(app: FastAPI, compute_client: AsyncClient) -> None:
|
||||||
@ -480,33 +515,216 @@ async def test_capabilities(app: FastAPI, compute_client: AsyncClient) -> None:
|
|||||||
async def test_qemu_duplicate(app: FastAPI,
|
async def test_qemu_duplicate(app: FastAPI,
|
||||||
compute_client: AsyncClient,
|
compute_client: AsyncClient,
|
||||||
compute_project: Project,
|
compute_project: Project,
|
||||||
vm: dict,
|
qemu_vm: dict,
|
||||||
base_params: dict) -> None:
|
base_params: dict) -> None:
|
||||||
|
|
||||||
# create destination node first
|
# create destination node first
|
||||||
response = await compute_client.post(app.url_path_for("compute:create_qemu_node",
|
response = await compute_client.post(
|
||||||
project_id=vm["project_id"]), json=base_params)
|
app.url_path_for("compute:create_qemu_node", project_id=qemu_vm["project_id"]),
|
||||||
|
json=base_params
|
||||||
|
)
|
||||||
|
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
params = {"destination_node_id": response.json()["node_id"]}
|
params = {"destination_node_id": response.json()["node_id"]}
|
||||||
response = await compute_client.post(app.url_path_for("compute:duplicate_qemu_node",
|
response = await compute_client.post(
|
||||||
project_id=vm["project_id"], node_id=vm["node_id"]), json=params)
|
app.url_path_for("compute:duplicate_qemu_node", project_id=qemu_vm["project_id"], node_id=qemu_vm["node_id"]),
|
||||||
|
json=params
|
||||||
|
)
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_create_disk_image(
|
||||||
|
app: FastAPI,
|
||||||
|
compute_client: AsyncClient,
|
||||||
|
compute_project: Project,
|
||||||
|
fake_qemu_img_binary: str,
|
||||||
|
qemu_vm: dict,
|
||||||
|
):
|
||||||
|
|
||||||
|
options = {
|
||||||
|
"format": "qcow2",
|
||||||
|
"preallocation": "metadata",
|
||||||
|
"cluster_size": 64,
|
||||||
|
"refcount_bits": 12,
|
||||||
|
"lazy_refcounts": "off",
|
||||||
|
"size": 30
|
||||||
|
}
|
||||||
|
|
||||||
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as qemu_img:
|
||||||
|
response = await compute_client.post(
|
||||||
|
app.url_path_for(
|
||||||
|
"compute:create_qemu_disk_image",
|
||||||
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
|
disk_name="disk.qcow2"
|
||||||
|
),
|
||||||
|
json=options
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
args, kwargs = qemu_img.call_args
|
||||||
|
assert args == (
|
||||||
|
fake_qemu_img_binary,
|
||||||
|
"create",
|
||||||
|
"-f",
|
||||||
|
"qcow2",
|
||||||
|
"-o",
|
||||||
|
"cluster_size=64",
|
||||||
|
"-o",
|
||||||
|
"lazy_refcounts=off",
|
||||||
|
"-o",
|
||||||
|
"preallocation=metadata",
|
||||||
|
"-o",
|
||||||
|
"refcount_bits=12",
|
||||||
|
os.path.join(qemu_vm["node_directory"], "disk.qcow2"),
|
||||||
|
"30M"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_create_disk_image_already_exists(
|
||||||
|
app: FastAPI,
|
||||||
|
compute_client: AsyncClient,
|
||||||
|
compute_project: Project,
|
||||||
|
fake_qemu_img_binary: str,
|
||||||
|
qemu_vm: dict,
|
||||||
|
):
|
||||||
|
|
||||||
|
node = compute_project.get_node(qemu_vm["node_id"])
|
||||||
|
shutil.copy("tests/resources/empty8G.qcow2", os.path.join(node.working_dir, "disk.qcow2"))
|
||||||
|
|
||||||
|
options = {
|
||||||
|
"format": "qcow2",
|
||||||
|
"size": 100
|
||||||
|
}
|
||||||
|
|
||||||
|
response = await compute_client.post(
|
||||||
|
app.url_path_for(
|
||||||
|
"compute:create_qemu_disk_image",
|
||||||
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
|
disk_name="disk.qcow2"
|
||||||
|
),
|
||||||
|
json=options
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_409_CONFLICT
|
||||||
|
|
||||||
|
|
||||||
|
# async def test_qemu_create_disk_image_with_not_supported_characters_by_filesystem(
|
||||||
|
# app: FastAPI,
|
||||||
|
# compute_client: AsyncClient,
|
||||||
|
# compute_project: Project,
|
||||||
|
# fake_qemu_img_binary: str,
|
||||||
|
# qemu_vm: dict,
|
||||||
|
# ):
|
||||||
|
#
|
||||||
|
# node = compute_project.get_node(qemu_vm["node_id"])
|
||||||
|
# shutil.copy("tests/resources/empty8G.qcow2", os.path.join(node.working_dir, "disk.qcow2"))
|
||||||
|
#
|
||||||
|
# options = {
|
||||||
|
# "format": "qcow2",
|
||||||
|
# "size": 100
|
||||||
|
# }
|
||||||
|
#
|
||||||
|
# with patch("os.path.exists", side_effect=UnicodeEncodeError('error', u"", 1, 2, 'Emulated Unicode Err')):
|
||||||
|
# response = await compute_client.post(
|
||||||
|
# app.url_path_for(
|
||||||
|
# "compute:create_qemu_disk_image",
|
||||||
|
# project_id=qemu_vm["project_id"],
|
||||||
|
# node_id=qemu_vm["node_id"],
|
||||||
|
# disk_name=u"\u2019"
|
||||||
|
# ),
|
||||||
|
# json=options
|
||||||
|
# )
|
||||||
|
# assert response.status_code == status.HTTP_409_CONFLICT
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_update_disk_image(
|
||||||
|
app: FastAPI,
|
||||||
|
compute_client: AsyncClient,
|
||||||
|
compute_project: Project,
|
||||||
|
fake_qemu_img_binary: str,
|
||||||
|
qemu_vm: dict,
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
node = compute_project.get_node(qemu_vm["node_id"])
|
||||||
|
shutil.copy("tests/resources/empty8G.qcow2", os.path.join(node.working_dir, "disk.qcow2"))
|
||||||
|
|
||||||
|
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as qemu_img:
|
||||||
|
response = await compute_client.put(
|
||||||
|
app.url_path_for(
|
||||||
|
"compute:update_qemu_disk_image",
|
||||||
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
|
disk_name="disk.qcow2"
|
||||||
|
),
|
||||||
|
json={"extend": 10}
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
assert qemu_img.called
|
||||||
|
args, kwargs = qemu_img.call_args
|
||||||
|
assert args == (
|
||||||
|
fake_qemu_img_binary,
|
||||||
|
"resize",
|
||||||
|
os.path.join(qemu_vm["node_directory"], "disk.qcow2"),
|
||||||
|
"+10M"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_delete_disk_image(
|
||||||
|
app: FastAPI,
|
||||||
|
compute_client: AsyncClient,
|
||||||
|
compute_project: Project,
|
||||||
|
qemu_vm: dict,
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
node = compute_project.get_node(qemu_vm["node_id"])
|
||||||
|
shutil.copy("tests/resources/empty8G.qcow2", os.path.join(node.working_dir, "disk.qcow2"))
|
||||||
|
|
||||||
|
response = await compute_client.delete(
|
||||||
|
app.url_path_for(
|
||||||
|
"compute:delete_qemu_disk_image",
|
||||||
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
|
disk_name="disk.qcow2"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_delete_disk_image_missing_image(
|
||||||
|
app: FastAPI,
|
||||||
|
compute_client: AsyncClient,
|
||||||
|
compute_project: Project,
|
||||||
|
qemu_vm: dict,
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = await compute_client.delete(
|
||||||
|
app.url_path_for(
|
||||||
|
"compute:delete_qemu_disk_image",
|
||||||
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
|
disk_name="unknown_image.qcow2"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_409_CONFLICT
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_qemu_start_capture(app: FastAPI, compute_client: AsyncClient, vm):
|
async def test_qemu_start_capture(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict):
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
"capture_file_name": "test.pcap",
|
"capture_file_name": "test.pcap",
|
||||||
"data_link_type": "DLT_EN10MB"
|
"data_link_type": "DLT_EN10MB"
|
||||||
}
|
}
|
||||||
|
|
||||||
url = app.url_path_for("compute:start_qemu_node_capture",
|
url = app.url_path_for(
|
||||||
project_id=vm["project_id"],
|
"compute:start_qemu_node_capture",
|
||||||
node_id=vm["node_id"],
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
adapter_number="0",
|
adapter_number="0",
|
||||||
port_number="0")
|
port_number="0"
|
||||||
|
)
|
||||||
|
|
||||||
with patch("gns3server.compute.qemu.qemu_vm.QemuVM.is_running", return_value=True):
|
with patch("gns3server.compute.qemu.qemu_vm.QemuVM.is_running", return_value=True):
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.start_capture") as mock:
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.start_capture") as mock:
|
||||||
@ -517,13 +735,15 @@ async def test_qemu_start_capture(app: FastAPI, compute_client: AsyncClient, vm)
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_qemu_stop_capture(app: FastAPI, compute_client: AsyncClient, vm):
|
async def test_qemu_stop_capture(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict):
|
||||||
|
|
||||||
url = app.url_path_for("compute:stop_qemu_node_capture",
|
url = app.url_path_for(
|
||||||
project_id=vm["project_id"],
|
"compute:stop_qemu_node_capture",
|
||||||
node_id=vm["node_id"],
|
project_id=qemu_vm["project_id"],
|
||||||
|
node_id=qemu_vm["node_id"],
|
||||||
adapter_number="0",
|
adapter_number="0",
|
||||||
port_number="0")
|
port_number="0"
|
||||||
|
)
|
||||||
|
|
||||||
with patch("gns3server.compute.qemu.qemu_vm.QemuVM.is_running", return_value=True):
|
with patch("gns3server.compute.qemu.qemu_vm.QemuVM.is_running", return_value=True):
|
||||||
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.stop_capture") as mock:
|
with asyncio_patch("gns3server.compute.qemu.qemu_vm.QemuVM.stop_capture") as mock:
|
||||||
|
@ -30,6 +30,7 @@ pytestmark = pytest.mark.asyncio
|
|||||||
async def test_shutdown_local(app: FastAPI, client: AsyncClient, config: Config) -> None:
|
async def test_shutdown_local(app: FastAPI, client: AsyncClient, config: Config) -> None:
|
||||||
|
|
||||||
os.kill = MagicMock()
|
os.kill = MagicMock()
|
||||||
|
config.settings.Server.local = True
|
||||||
response = await client.post(app.url_path_for("shutdown"))
|
response = await client.post(app.url_path_for("shutdown"))
|
||||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
assert os.kill.called
|
assert os.kill.called
|
||||||
@ -37,7 +38,6 @@ async def test_shutdown_local(app: FastAPI, client: AsyncClient, config: Config)
|
|||||||
|
|
||||||
async def test_shutdown_non_local(app: FastAPI, client: AsyncClient, config: Config) -> None:
|
async def test_shutdown_non_local(app: FastAPI, client: AsyncClient, config: Config) -> None:
|
||||||
|
|
||||||
config.settings.Server.local = False
|
|
||||||
response = await client.post(app.url_path_for("shutdown"))
|
response = await client.post(app.url_path_for("shutdown"))
|
||||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||||
|
|
||||||
|
@ -223,33 +223,161 @@ async def test_dynamips_idle_pc(
|
|||||||
client: AsyncClient,
|
client: AsyncClient,
|
||||||
project: Project,
|
project: Project,
|
||||||
compute: Compute,
|
compute: Compute,
|
||||||
node: Node) -> None:
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
response = MagicMock()
|
response = MagicMock()
|
||||||
response.json = {"idlepc": "0x60606f54"}
|
response.json = {"idlepc": "0x60606f54"}
|
||||||
compute.get = AsyncioMagicMock(return_value=response)
|
compute.get = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node._node_type = "dynamips" # force Dynamips node type
|
||||||
response = await client.get(app.url_path_for("auto_idlepc", project_id=project.id, node_id=node.id))
|
response = await client.get(app.url_path_for("auto_idlepc", project_id=project.id, node_id=node.id))
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
assert response.json()["idlepc"] == "0x60606f54"
|
assert response.json()["idlepc"] == "0x60606f54"
|
||||||
|
|
||||||
|
|
||||||
|
async def test_dynamips_idle_pc_wrong_node_type(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = await client.get(app.url_path_for("auto_idlepc", project_id=project.id, node_id=node.id))
|
||||||
|
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||||
|
|
||||||
|
|
||||||
async def test_dynamips_idlepc_proposals(
|
async def test_dynamips_idlepc_proposals(
|
||||||
app: FastAPI,
|
app: FastAPI,
|
||||||
client: AsyncClient,
|
client: AsyncClient,
|
||||||
project: Project,
|
project: Project,
|
||||||
compute: Compute,
|
compute: Compute,
|
||||||
node: Node) -> None:
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
response = MagicMock()
|
response = MagicMock()
|
||||||
response.json = ["0x60606f54", "0x33805a22"]
|
response.json = ["0x60606f54", "0x33805a22"]
|
||||||
compute.get = AsyncioMagicMock(return_value=response)
|
compute.get = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node._node_type = "dynamips" # force Dynamips node type
|
||||||
response = await client.get(app.url_path_for("idlepc_proposals", project_id=project.id, node_id=node.id))
|
response = await client.get(app.url_path_for("idlepc_proposals", project_id=project.id, node_id=node.id))
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
assert response.json() == ["0x60606f54", "0x33805a22"]
|
assert response.json() == ["0x60606f54", "0x33805a22"]
|
||||||
|
|
||||||
|
|
||||||
|
async def test_dynamips_idlepc_proposals_wrong_node_type(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = await client.get(app.url_path_for("idlepc_proposals", project_id=project.id, node_id=node.id))
|
||||||
|
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_disk_image_create(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = MagicMock()
|
||||||
|
compute.post = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node._node_type = "qemu" # force Qemu node type
|
||||||
|
response = await client.post(
|
||||||
|
app.url_path_for("create_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2"),
|
||||||
|
json={"format": "qcow2", "size": 30}
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_disk_image_create_wrong_node_type(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = await client.post(
|
||||||
|
app.url_path_for("create_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2"),
|
||||||
|
json={"format": "qcow2", "size": 30}
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_disk_image_update(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = MagicMock()
|
||||||
|
compute.put = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node._node_type = "qemu" # force Qemu node type
|
||||||
|
response = await client.put(
|
||||||
|
app.url_path_for("update_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2"),
|
||||||
|
json={"extend": 10}
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_disk_image_update_wrong_node_type(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = await client.put(
|
||||||
|
app.url_path_for("update_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2"),
|
||||||
|
json={"extend": 10}
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_disk_image_delete(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = MagicMock()
|
||||||
|
compute.delete = AsyncioMagicMock(return_value=response)
|
||||||
|
|
||||||
|
node._node_type = "qemu" # force Qemu node type
|
||||||
|
response = await client.delete(
|
||||||
|
app.url_path_for("delete_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2")
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||||
|
|
||||||
|
|
||||||
|
async def test_qemu_disk_image_delete_wrong_node_type(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
project: Project,
|
||||||
|
compute: Compute,
|
||||||
|
node: Node
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
response = await client.delete(
|
||||||
|
app.url_path_for("delete_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2")
|
||||||
|
)
|
||||||
|
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||||
|
|
||||||
|
|
||||||
async def test_get_file(app: FastAPI, client: AsyncClient, project: Project, compute: Compute, node: Node) -> None:
|
async def test_get_file(app: FastAPI, client: AsyncClient, project: Project, compute: Compute, node: Node) -> None:
|
||||||
|
|
||||||
response = MagicMock()
|
response = MagicMock()
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
import os
|
import os
|
||||||
import zipfile
|
|
||||||
import json
|
import json
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@ -26,6 +25,7 @@ from httpx import AsyncClient
|
|||||||
from unittest.mock import patch, MagicMock
|
from unittest.mock import patch, MagicMock
|
||||||
from tests.utils import asyncio_patch
|
from tests.utils import asyncio_patch
|
||||||
|
|
||||||
|
import gns3server.utils.zipfile_zstd as zipfile_zstd
|
||||||
from gns3server.controller import Controller
|
from gns3server.controller import Controller
|
||||||
from gns3server.controller.project import Project
|
from gns3server.controller.project import Project
|
||||||
|
|
||||||
@ -41,9 +41,9 @@ async def project(app: FastAPI, client: AsyncClient, controller: Controller) ->
|
|||||||
return controller.get_project(u)
|
return controller.get_project(u)
|
||||||
|
|
||||||
|
|
||||||
async def test_create_project_with_path(app: FastAPI, client: AsyncClient, controller: Controller, tmpdir) -> None:
|
async def test_create_project_with_path(app: FastAPI, client: AsyncClient, controller: Controller, config) -> None:
|
||||||
|
|
||||||
params = {"name": "test", "path": str(tmpdir), "project_id": "00010203-0405-0607-0809-0a0b0c0d0e0f"}
|
params = {"name": "test", "path": str(config.settings.Server.projects_path), "project_id": "00010203-0405-0607-0809-0a0b0c0d0e0f"}
|
||||||
response = await client.post(app.url_path_for("create_project"), json=params)
|
response = await client.post(app.url_path_for("create_project"), json=params)
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
assert response.status_code == status.HTTP_201_CREATED
|
||||||
assert response.json()["name"] == "test"
|
assert response.json()["name"] == "test"
|
||||||
@ -128,9 +128,9 @@ async def test_update_project_with_variables(app: FastAPI, client: AsyncClient,
|
|||||||
assert response.json()["variables"] == variables
|
assert response.json()["variables"] == variables
|
||||||
|
|
||||||
|
|
||||||
async def test_list_projects(app: FastAPI, client: AsyncClient, controller: Controller, tmpdir) -> None:
|
async def test_list_projects(app: FastAPI, client: AsyncClient, controller: Controller) -> None:
|
||||||
|
|
||||||
params = {"name": "test", "path": str(tmpdir), "project_id": "00010203-0405-0607-0809-0a0b0c0d0e0f"}
|
params = {"name": "test", "project_id": "00010203-0405-0607-0809-0a0b0c0d0e0f"}
|
||||||
await client.post(app.url_path_for("create_project"), json=params)
|
await client.post(app.url_path_for("create_project"), json=params)
|
||||||
response = await client.get(app.url_path_for("get_projects"))
|
response = await client.get(app.url_path_for("get_projects"))
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
@ -261,7 +261,7 @@ async def test_export_with_images(app: FastAPI, client: AsyncClient, tmpdir, pro
|
|||||||
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
|
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
|
||||||
f.write(response.content)
|
f.write(response.content)
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'project.zip')) as myzip:
|
with zipfile_zstd.ZipFile(str(tmpdir / 'project.zip')) as myzip:
|
||||||
with myzip.open("a") as myfile:
|
with myzip.open("a") as myfile:
|
||||||
content = myfile.read()
|
content = myfile.read()
|
||||||
assert content == b"hello"
|
assert content == b"hello"
|
||||||
@ -304,7 +304,7 @@ async def test_export_without_images(app: FastAPI, client: AsyncClient, tmpdir,
|
|||||||
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
|
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
|
||||||
f.write(response.content)
|
f.write(response.content)
|
||||||
|
|
||||||
with zipfile.ZipFile(str(tmpdir / 'project.zip')) as myzip:
|
with zipfile_zstd.ZipFile(str(tmpdir / 'project.zip')) as myzip:
|
||||||
with myzip.open("a") as myfile:
|
with myzip.open("a") as myfile:
|
||||||
content = myfile.read()
|
content = myfile.read()
|
||||||
assert content == b"hello"
|
assert content == b"hello"
|
||||||
@ -313,6 +313,67 @@ async def test_export_without_images(app: FastAPI, client: AsyncClient, tmpdir,
|
|||||||
myzip.getinfo("images/IOS/test.image")
|
myzip.getinfo("images/IOS/test.image")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize(
|
||||||
|
"compression, compression_level, status_code",
|
||||||
|
(
|
||||||
|
("none", None, status.HTTP_200_OK),
|
||||||
|
("none", 4, status.HTTP_400_BAD_REQUEST),
|
||||||
|
("zip", None, status.HTTP_200_OK),
|
||||||
|
("zip", 1, status.HTTP_200_OK),
|
||||||
|
("zip", 12, status.HTTP_400_BAD_REQUEST),
|
||||||
|
("bzip2", None, status.HTTP_200_OK),
|
||||||
|
("bzip2", 1, status.HTTP_200_OK),
|
||||||
|
("bzip2", 13, status.HTTP_400_BAD_REQUEST),
|
||||||
|
("lzma", None, status.HTTP_200_OK),
|
||||||
|
("lzma", 1, status.HTTP_400_BAD_REQUEST),
|
||||||
|
("zstd", None, status.HTTP_200_OK),
|
||||||
|
("zstd", 12, status.HTTP_200_OK),
|
||||||
|
("zstd", 23, status.HTTP_400_BAD_REQUEST),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
async def test_export_compression(
|
||||||
|
app: FastAPI,
|
||||||
|
client: AsyncClient,
|
||||||
|
tmpdir,
|
||||||
|
project: Project,
|
||||||
|
compression: str,
|
||||||
|
compression_level: int,
|
||||||
|
status_code: int
|
||||||
|
) -> None:
|
||||||
|
|
||||||
|
project.dump = MagicMock()
|
||||||
|
os.makedirs(project.path, exist_ok=True)
|
||||||
|
|
||||||
|
topology = {
|
||||||
|
"topology": {
|
||||||
|
"nodes": [
|
||||||
|
{
|
||||||
|
"node_type": "qemu"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
with open(os.path.join(project.path, "test.gns3"), 'w+') as f:
|
||||||
|
json.dump(topology, f)
|
||||||
|
|
||||||
|
params = {"compression": compression}
|
||||||
|
if compression_level:
|
||||||
|
params["compression_level"] = compression_level
|
||||||
|
response = await client.get(app.url_path_for("export_project", project_id=project.id), params=params)
|
||||||
|
assert response.status_code == status_code
|
||||||
|
|
||||||
|
if response.status_code == status.HTTP_200_OK:
|
||||||
|
assert response.headers['CONTENT-TYPE'] == 'application/gns3project'
|
||||||
|
assert response.headers['CONTENT-DISPOSITION'] == 'attachment; filename="{}.gns3project"'.format(project.name)
|
||||||
|
|
||||||
|
with open(str(tmpdir / 'project.zip'), 'wb+') as f:
|
||||||
|
f.write(response.content)
|
||||||
|
|
||||||
|
with zipfile_zstd.ZipFile(str(tmpdir / 'project.zip')) as myzip:
|
||||||
|
with myzip.open("project.gns3") as myfile:
|
||||||
|
myfile.read()
|
||||||
|
|
||||||
|
|
||||||
async def test_get_file(app: FastAPI, client: AsyncClient, project: Project) -> None:
|
async def test_get_file(app: FastAPI, client: AsyncClient, project: Project) -> None:
|
||||||
|
|
||||||
os.makedirs(project.path, exist_ok=True)
|
os.makedirs(project.path, exist_ok=True)
|
||||||
@ -371,21 +432,21 @@ async def test_write_and_get_file_with_leading_slashes_in_filename(
|
|||||||
assert response.status_code == status.HTTP_403_FORBIDDEN
|
assert response.status_code == status.HTTP_403_FORBIDDEN
|
||||||
|
|
||||||
|
|
||||||
async def test_import(app: FastAPI, client: AsyncClient, tmpdir, controller: Controller) -> None:
|
# async def test_import(app: FastAPI, client: AsyncClient, tmpdir, controller: Controller) -> None:
|
||||||
|
#
|
||||||
with zipfile.ZipFile(str(tmpdir / "test.zip"), 'w') as myzip:
|
# with zipfile.ZipFile(str(tmpdir / "test.zip"), 'w') as myzip:
|
||||||
myzip.writestr("project.gns3", b'{"project_id": "c6992992-ac72-47dc-833b-54aa334bcd05", "version": "2.0.0", "name": "test"}')
|
# myzip.writestr("project.gns3", b'{"project_id": "c6992992-ac72-47dc-833b-54aa334bcd05", "version": "2.0.0", "name": "test"}')
|
||||||
myzip.writestr("demo", b"hello")
|
# myzip.writestr("demo", b"hello")
|
||||||
|
#
|
||||||
project_id = str(uuid.uuid4())
|
# project_id = str(uuid.uuid4())
|
||||||
with open(str(tmpdir / "test.zip"), "rb") as f:
|
# with open(str(tmpdir / "test.zip"), "rb") as f:
|
||||||
response = await client.post(app.url_path_for("import_project", project_id=project_id), content=f.read())
|
# response = await client.post(app.url_path_for("import_project", project_id=project_id), content=f.read())
|
||||||
assert response.status_code == status.HTTP_201_CREATED
|
# assert response.status_code == status.HTTP_201_CREATED
|
||||||
|
#
|
||||||
project = controller.get_project(project_id)
|
# project = controller.get_project(project_id)
|
||||||
with open(os.path.join(project.path, "demo")) as f:
|
# with open(os.path.join(project.path, "demo")) as f:
|
||||||
content = f.read()
|
# content = f.read()
|
||||||
assert content == "hello"
|
# assert content == "hello"
|
||||||
|
|
||||||
|
|
||||||
async def test_duplicate(app: FastAPI, client: AsyncClient, project: Project) -> None:
|
async def test_duplicate(app: FastAPI, client: AsyncClient, project: Project) -> None:
|
||||||
|
@ -29,7 +29,7 @@ async def test_version_output(app: FastAPI, client: AsyncClient) -> None:
|
|||||||
|
|
||||||
response = await client.get(app.url_path_for("get_version"))
|
response = await client.get(app.url_path_for("get_version"))
|
||||||
assert response.status_code == status.HTTP_200_OK
|
assert response.status_code == status.HTTP_200_OK
|
||||||
assert response.json() == {'controller_host': '127.0.0.1', 'local': True, 'version': __version__}
|
assert response.json() == {'controller_host': '127.0.0.1', 'local': False, 'version': __version__}
|
||||||
|
|
||||||
|
|
||||||
async def test_version_input(app: FastAPI, client: AsyncClient) -> None:
|
async def test_version_input(app: FastAPI, client: AsyncClient) -> None:
|
||||||
|
@ -230,6 +230,7 @@ def test_path_relative(vm, fake_iou_bin):
|
|||||||
|
|
||||||
def test_path_invalid_bin(vm, tmpdir, config):
|
def test_path_invalid_bin(vm, tmpdir, config):
|
||||||
|
|
||||||
|
config.settings.Server.images_path = str(tmpdir)
|
||||||
path = str(tmpdir / "test.bin")
|
path = str(tmpdir / "test.bin")
|
||||||
|
|
||||||
with open(path, "w+") as f:
|
with open(path, "w+") as f:
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import stat
|
import stat
|
||||||
import sys
|
|
||||||
import pytest
|
import pytest
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
@ -113,95 +112,6 @@ def test_get_legacy_vm_workdir():
|
|||||||
assert Qemu.get_legacy_vm_workdir(42, "bla") == os.path.join("qemu", "vm-42")
|
assert Qemu.get_legacy_vm_workdir(42, "bla") == os.path.join("qemu", "vm-42")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_create_image_abs_path(tmpdir, fake_qemu_img_binary):
|
|
||||||
|
|
||||||
options = {
|
|
||||||
"format": "qcow2",
|
|
||||||
"preallocation": "metadata",
|
|
||||||
"cluster_size": 64,
|
|
||||||
"refcount_bits": 12,
|
|
||||||
"lazy_refcounts": "off",
|
|
||||||
"size": 100
|
|
||||||
}
|
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
|
||||||
await Qemu.instance().create_disk(fake_qemu_img_binary, str(tmpdir / "hda.qcow2"), options)
|
|
||||||
args, kwargs = process.call_args
|
|
||||||
assert args == (
|
|
||||||
fake_qemu_img_binary,
|
|
||||||
"create",
|
|
||||||
"-f",
|
|
||||||
"qcow2",
|
|
||||||
"-o",
|
|
||||||
"cluster_size=64",
|
|
||||||
"-o",
|
|
||||||
"lazy_refcounts=off",
|
|
||||||
"-o",
|
|
||||||
"preallocation=metadata",
|
|
||||||
"-o",
|
|
||||||
"refcount_bits=12",
|
|
||||||
str(tmpdir / "hda.qcow2"),
|
|
||||||
"100M"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_create_image_relative_path(tmpdir, fake_qemu_img_binary):
|
|
||||||
|
|
||||||
options = {
|
|
||||||
"format": "raw",
|
|
||||||
"size": 100
|
|
||||||
}
|
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
|
||||||
with patch("gns3server.compute.qemu.Qemu.get_images_directory", return_value=str(tmpdir)):
|
|
||||||
await Qemu.instance().create_disk(fake_qemu_img_binary, "hda.qcow2", options)
|
|
||||||
args, kwargs = process.call_args
|
|
||||||
assert args == (
|
|
||||||
fake_qemu_img_binary,
|
|
||||||
"create",
|
|
||||||
"-f",
|
|
||||||
"raw",
|
|
||||||
str(tmpdir / "hda.qcow2"),
|
|
||||||
"100M"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_create_image_exist(tmpdir, fake_qemu_img_binary):
|
|
||||||
|
|
||||||
open(str(tmpdir / "hda.qcow2"), "w+").close()
|
|
||||||
options = {
|
|
||||||
"format": "raw",
|
|
||||||
"size": 100
|
|
||||||
}
|
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process:
|
|
||||||
with patch("gns3server.compute.qemu.Qemu.get_images_directory", return_value=str(tmpdir)):
|
|
||||||
with pytest.raises(QemuError):
|
|
||||||
await Qemu.instance().create_disk(fake_qemu_img_binary, "hda.qcow2", options)
|
|
||||||
assert not process.called
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_create_image_with_not_supported_characters_by_filesystem(tmpdir, fake_qemu_img_binary):
|
|
||||||
|
|
||||||
open(str(tmpdir / "hda.qcow2"), "w+").close()
|
|
||||||
|
|
||||||
options = {
|
|
||||||
"format": "raw",
|
|
||||||
"size": 100
|
|
||||||
}
|
|
||||||
|
|
||||||
# patching os.makedirs is necessary as it depends on already mocked os.path.exists
|
|
||||||
with asyncio_patch("asyncio.create_subprocess_exec", return_value=MagicMock()) as process, \
|
|
||||||
patch("gns3server.compute.qemu.Qemu.get_images_directory", return_value=str(tmpdir)), \
|
|
||||||
patch("os.path.exists", side_effect=UnicodeEncodeError('error', u"", 1, 2, 'Emulated Unicode Err')),\
|
|
||||||
patch("os.makedirs"):
|
|
||||||
|
|
||||||
with pytest.raises(QemuError):
|
|
||||||
await Qemu.instance().create_disk(fake_qemu_img_binary, "hda.qcow2", options)
|
|
||||||
assert not process.called
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_get_kvm_archs_kvm_ok():
|
async def test_get_kvm_archs_kvm_ok():
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
import pytest
|
import pytest
|
||||||
import asyncio
|
import asyncio
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import stat
|
import stat
|
||||||
from tests.utils import asyncio_patch, AsyncioMagicMock
|
from tests.utils import asyncio_patch, AsyncioMagicMock
|
||||||
|
|
||||||
@ -95,20 +94,18 @@ async def test_vm(compute_project, manager, fake_qemu_binary):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_vm_create(tmpdir, compute_project, manager, fake_qemu_binary):
|
async def test_vm_create(compute_project, manager, fake_qemu_binary):
|
||||||
|
|
||||||
fake_img = str(tmpdir / 'hello')
|
|
||||||
|
|
||||||
with open(fake_img, 'w+') as f:
|
|
||||||
f.write('hello')
|
|
||||||
|
|
||||||
vm = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", compute_project, manager, qemu_path=fake_qemu_binary)
|
vm = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", compute_project, manager, qemu_path=fake_qemu_binary)
|
||||||
|
fake_img = os.path.join(vm.working_dir, 'hello')
|
||||||
|
with open(fake_img, 'w+') as f:
|
||||||
|
f.write('hello')
|
||||||
vm._hda_disk_image = fake_img
|
vm._hda_disk_image = fake_img
|
||||||
|
|
||||||
await vm.create()
|
await vm.create()
|
||||||
|
|
||||||
# tests if `create` created md5sums
|
# tests if `create` created md5sums
|
||||||
assert os.path.exists(str(tmpdir / 'hello.md5sum'))
|
assert os.path.exists(os.path.join(vm.working_dir, 'hello.md5sum'))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@ -106,16 +106,15 @@ def test_get_abs_image_path_non_local(qemu, tmpdir, config):
|
|||||||
|
|
||||||
# If non local we can't use path outside images directory
|
# If non local we can't use path outside images directory
|
||||||
config.settings.Server.images_path = str(tmpdir / "images")
|
config.settings.Server.images_path = str(tmpdir / "images")
|
||||||
config.settings.Server.local = False
|
|
||||||
assert qemu.get_abs_image_path(path1) == path1
|
assert qemu.get_abs_image_path(path1) == path1
|
||||||
with pytest.raises(NodeError):
|
with pytest.raises(NodeError):
|
||||||
qemu.get_abs_image_path(path2)
|
qemu.get_abs_image_path(path2)
|
||||||
with pytest.raises(NodeError):
|
with pytest.raises(NodeError):
|
||||||
qemu.get_abs_image_path("C:\\test2.bin")
|
qemu.get_abs_image_path("C:\\test2.bin")
|
||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir / "images")
|
# config.settings.Server.images_path = str(tmpdir / "images")
|
||||||
config.settings.Server.local = True
|
# config.settings.Server.local = True
|
||||||
assert qemu.get_abs_image_path(path2) == path2
|
# assert qemu.get_abs_image_path(path2) == path2
|
||||||
|
|
||||||
|
|
||||||
def test_get_abs_image_additional_image_paths(qemu, tmpdir, config):
|
def test_get_abs_image_additional_image_paths(qemu, tmpdir, config):
|
||||||
@ -130,7 +129,6 @@ def test_get_abs_image_additional_image_paths(qemu, tmpdir, config):
|
|||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir / "images1")
|
config.settings.Server.images_path = str(tmpdir / "images1")
|
||||||
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
||||||
config.settings.Server.local = False
|
|
||||||
|
|
||||||
assert qemu.get_abs_image_path("test1.bin") == path1
|
assert qemu.get_abs_image_path("test1.bin") == path1
|
||||||
assert qemu.get_abs_image_path("test2.bin") == path2
|
assert qemu.get_abs_image_path("test2.bin") == path2
|
||||||
@ -152,7 +150,6 @@ def test_get_abs_image_recursive(qemu, tmpdir, config):
|
|||||||
path2 = force_unix_path(str(path2))
|
path2 = force_unix_path(str(path2))
|
||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir / "images1")
|
config.settings.Server.images_path = str(tmpdir / "images1")
|
||||||
config.settings.Server.local = False
|
|
||||||
|
|
||||||
assert qemu.get_abs_image_path("test1.bin") == path1
|
assert qemu.get_abs_image_path("test1.bin") == path1
|
||||||
assert qemu.get_abs_image_path("test2.bin") == path2
|
assert qemu.get_abs_image_path("test2.bin") == path2
|
||||||
@ -171,7 +168,6 @@ def test_get_abs_image_recursive_ova(qemu, tmpdir, config):
|
|||||||
path2 = force_unix_path(str(path2))
|
path2 = force_unix_path(str(path2))
|
||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir / "images1")
|
config.settings.Server.images_path = str(tmpdir / "images1")
|
||||||
config.settings.Server.local = False
|
|
||||||
|
|
||||||
assert qemu.get_abs_image_path("demo/test.ova/test1.bin") == path1
|
assert qemu.get_abs_image_path("demo/test.ova/test1.bin") == path1
|
||||||
assert qemu.get_abs_image_path("test.ova/test2.bin") == path2
|
assert qemu.get_abs_image_path("test.ova/test2.bin") == path2
|
||||||
@ -202,7 +198,6 @@ def test_get_relative_image_path(qemu, tmpdir, config):
|
|||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir / "images1")
|
config.settings.Server.images_path = str(tmpdir / "images1")
|
||||||
config.settings.Server.additional_images_paths = str(tmpdir / "images2")
|
config.settings.Server.additional_images_paths = str(tmpdir / "images2")
|
||||||
config.settings.Server.local = True
|
|
||||||
|
|
||||||
assert qemu.get_relative_image_path(path1) == "test1.bin"
|
assert qemu.get_relative_image_path(path1) == "test1.bin"
|
||||||
assert qemu.get_relative_image_path("test1.bin") == "test1.bin"
|
assert qemu.get_relative_image_path("test1.bin") == "test1.bin"
|
||||||
@ -210,6 +205,7 @@ def test_get_relative_image_path(qemu, tmpdir, config):
|
|||||||
assert qemu.get_relative_image_path("test2.bin") == "test2.bin"
|
assert qemu.get_relative_image_path("test2.bin") == "test2.bin"
|
||||||
assert qemu.get_relative_image_path("../test1.bin") == "test1.bin"
|
assert qemu.get_relative_image_path("../test1.bin") == "test1.bin"
|
||||||
assert qemu.get_relative_image_path("test3.bin") == "test3.bin"
|
assert qemu.get_relative_image_path("test3.bin") == "test3.bin"
|
||||||
|
with pytest.raises(NodeError):
|
||||||
assert qemu.get_relative_image_path(path4) == path4
|
assert qemu.get_relative_image_path(path4) == path4
|
||||||
assert qemu.get_relative_image_path(path5) == path5
|
assert qemu.get_relative_image_path(path5) == path5
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
|
||||||
import uuid
|
import uuid
|
||||||
import pytest
|
import pytest
|
||||||
from uuid import uuid4
|
from uuid import uuid4
|
||||||
@ -28,7 +27,6 @@ from gns3server.compute.project import Project
|
|||||||
from gns3server.compute.notification_manager import NotificationManager
|
from gns3server.compute.notification_manager import NotificationManager
|
||||||
from gns3server.compute.compute_error import ComputeError, ComputeForbiddenError
|
from gns3server.compute.compute_error import ComputeError, ComputeForbiddenError
|
||||||
from gns3server.compute.vpcs import VPCS, VPCSVM
|
from gns3server.compute.vpcs import VPCS, VPCSVM
|
||||||
from gns3server.config import Config
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="function")
|
@pytest.fixture(scope="function")
|
||||||
@ -76,25 +74,15 @@ async def test_clean_tmp_directory():
|
|||||||
async def test_path(projects_dir):
|
async def test_path(projects_dir):
|
||||||
|
|
||||||
directory = projects_dir
|
directory = projects_dir
|
||||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
|
||||||
with patch("gns3server.utils.path.get_default_project_directory", return_value=directory):
|
with patch("gns3server.utils.path.get_default_project_directory", return_value=directory):
|
||||||
p = Project(project_id=str(uuid4()))
|
p = Project(project_id=str(uuid4()))
|
||||||
assert p.path == os.path.join(directory, p.id)
|
assert p.path == os.path.join(directory, p.id)
|
||||||
assert os.path.exists(os.path.join(directory, p.id))
|
assert os.path.exists(os.path.join(directory, p.id))
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_init_path(tmpdir):
|
|
||||||
|
|
||||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
|
||||||
p = Project(path=str(tmpdir), project_id=str(uuid4()))
|
|
||||||
assert p.path == str(tmpdir)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_changing_path_not_allowed(tmpdir):
|
async def test_changing_path_not_allowed(tmpdir):
|
||||||
|
|
||||||
with patch("gns3server.compute.project.Project.is_local", return_value=False):
|
|
||||||
with pytest.raises(ComputeForbiddenError):
|
with pytest.raises(ComputeForbiddenError):
|
||||||
p = Project(project_id=str(uuid4()))
|
p = Project(project_id=str(uuid4()))
|
||||||
p.path = str(tmpdir)
|
p.path = str(tmpdir)
|
||||||
@ -135,7 +123,6 @@ async def test_json_with_variables():
|
|||||||
async def test_node_working_directory(node, projects_dir):
|
async def test_node_working_directory(node, projects_dir):
|
||||||
|
|
||||||
directory = projects_dir
|
directory = projects_dir
|
||||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
|
||||||
p = Project(project_id=str(uuid4()))
|
p = Project(project_id=str(uuid4()))
|
||||||
assert p.node_working_directory(node) == os.path.join(directory, p.id, 'project-files', node.module_name, node.id)
|
assert p.node_working_directory(node) == os.path.join(directory, p.id, 'project-files', node.module_name, node.id)
|
||||||
assert os.path.exists(p.node_working_directory(node))
|
assert os.path.exists(p.node_working_directory(node))
|
||||||
@ -145,7 +132,6 @@ async def test_node_working_directory(node, projects_dir):
|
|||||||
async def test_node_working_path(node, projects_dir):
|
async def test_node_working_path(node, projects_dir):
|
||||||
|
|
||||||
directory = projects_dir
|
directory = projects_dir
|
||||||
with patch("gns3server.compute.project.Project.is_local", return_value=True):
|
|
||||||
p = Project(project_id=str(uuid4()))
|
p = Project(project_id=str(uuid4()))
|
||||||
assert p.node_working_path(node) == os.path.join(directory, p.id, 'project-files', node.module_name, node.id)
|
assert p.node_working_path(node) == os.path.join(directory, p.id, 'project-files', node.module_name, node.id)
|
||||||
# after this execution directory structure should not be created
|
# after this execution directory structure should not be created
|
||||||
@ -194,9 +180,8 @@ async def test_project_close(node, compute_project):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_list_files(tmpdir, config):
|
async def test_list_files():
|
||||||
|
|
||||||
config.settings.Server.projects_path = str(tmpdir)
|
|
||||||
project = Project(project_id=str(uuid4()))
|
project = Project(project_id=str(uuid4()))
|
||||||
path = project.path
|
path = project.path
|
||||||
os.makedirs(os.path.join(path, "vm-1", "dynamips"))
|
os.makedirs(os.path.join(path, "vm-1", "dynamips"))
|
||||||
|
@ -359,7 +359,7 @@ def ubridge_path(config):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture(autouse=True)
|
@pytest.fixture(autouse=True)
|
||||||
def run_around_tests(monkeypatch, config, port_manager):#port_manager, controller, config):
|
def run_around_tests(monkeypatch, config, port_manager):
|
||||||
"""
|
"""
|
||||||
This setup a temporary project file environment around tests
|
This setup a temporary project file environment around tests
|
||||||
"""
|
"""
|
||||||
@ -392,7 +392,6 @@ def run_around_tests(monkeypatch, config, port_manager):#port_manager, controlle
|
|||||||
config.settings.Server.appliances_path = appliances_dir
|
config.settings.Server.appliances_path = appliances_dir
|
||||||
|
|
||||||
config.settings.Server.ubridge_path = os.path.join(tmppath, 'bin', 'ubridge')
|
config.settings.Server.ubridge_path = os.path.join(tmppath, 'bin', 'ubridge')
|
||||||
config.settings.Server.local = True
|
|
||||||
|
|
||||||
# Prevent executions of the VM if we forgot to mock something
|
# Prevent executions of the VM if we forgot to mock something
|
||||||
config.settings.VirtualBox.vboxmanage_path = tmppath
|
config.settings.VirtualBox.vboxmanage_path = tmppath
|
||||||
|
@ -20,8 +20,8 @@ import pytest
|
|||||||
from unittest.mock import patch, MagicMock
|
from unittest.mock import patch, MagicMock
|
||||||
|
|
||||||
from gns3server.controller.project import Project
|
from gns3server.controller.project import Project
|
||||||
from gns3server.controller.compute import Compute, ComputeConflict
|
from gns3server.controller.compute import Compute
|
||||||
from gns3server.controller.controller_error import ControllerError, ControllerNotFoundError
|
from gns3server.controller.controller_error import ControllerError, ControllerNotFoundError, ComputeConflictError
|
||||||
from pydantic import SecretStr
|
from pydantic import SecretStr
|
||||||
from tests.utils import asyncio_patch, AsyncioMagicMock
|
from tests.utils import asyncio_patch, AsyncioMagicMock
|
||||||
|
|
||||||
@ -212,7 +212,7 @@ async def test_compute_httpQueryConflictError(compute):
|
|||||||
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
with asyncio_patch("aiohttp.ClientSession.request", return_value=response) as mock:
|
||||||
response.status = 409
|
response.status = 409
|
||||||
response.read = AsyncioMagicMock(return_value=b'{"message": "Test"}')
|
response.read = AsyncioMagicMock(return_value=b'{"message": "Test"}')
|
||||||
with pytest.raises(ComputeConflict):
|
with pytest.raises(ComputeConflictError):
|
||||||
await compute.post("/projects", {"a": "b"})
|
await compute.post("/projects", {"a": "b"})
|
||||||
assert mock.called
|
assert mock.called
|
||||||
await compute.close()
|
await compute.close()
|
||||||
|
@ -21,6 +21,7 @@ import uuid
|
|||||||
import json
|
import json
|
||||||
import zipfile
|
import zipfile
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
from tests.utils import asyncio_patch, AsyncioMagicMock
|
from tests.utils import asyncio_patch, AsyncioMagicMock
|
||||||
|
|
||||||
from gns3server.controller.import_project import import_project, _move_files_to_compute
|
from gns3server.controller.import_project import import_project, _move_files_to_compute
|
||||||
@ -74,12 +75,13 @@ async def test_import_project(tmpdir, controller):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_import_project_override(tmpdir, controller):
|
async def test_import_project_override(projects_dir, controller):
|
||||||
"""
|
"""
|
||||||
In the case of snapshot we will import a project for
|
In the case of snapshot we will import a project for
|
||||||
override the previous keeping the same project id & location
|
override the previous keeping the same project id & location
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
tmpdir = Path(projects_dir)
|
||||||
project_id = str(uuid.uuid4())
|
project_id = str(uuid.uuid4())
|
||||||
topology = {
|
topology = {
|
||||||
"project_id": project_id,
|
"project_id": project_id,
|
||||||
@ -523,11 +525,12 @@ async def test_move_files_to_compute(tmpdir):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_import_project_name_and_location(tmpdir, controller):
|
async def test_import_project_name_and_location(projects_dir, controller):
|
||||||
"""
|
"""
|
||||||
Import a project with a different location and name
|
Import a project with a different location and name
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
tmpdir = Path(projects_dir)
|
||||||
project_id = str(uuid.uuid4())
|
project_id = str(uuid.uuid4())
|
||||||
topology = {
|
topology = {
|
||||||
"project_id": str(uuid.uuid4()),
|
"project_id": str(uuid.uuid4()),
|
||||||
|
@ -128,18 +128,19 @@ def test_path_exist(tmpdir):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_init_path(tmpdir):
|
async def test_init_path(projects_dir):
|
||||||
|
|
||||||
p = Project(path=str(tmpdir), project_id=str(uuid4()), name="Test")
|
project_id = str(uuid4())
|
||||||
assert p.path == str(tmpdir)
|
p = Project(project_id=project_id, name="Test")
|
||||||
|
assert p.path == os.path.join(projects_dir, project_id)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_changing_path_with_quote_not_allowed(tmpdir):
|
async def test_changing_path_with_quote_not_allowed(projects_dir):
|
||||||
|
|
||||||
with pytest.raises(ControllerForbiddenError):
|
with pytest.raises(ControllerForbiddenError):
|
||||||
p = Project(project_id=str(uuid4()), name="Test")
|
p = Project(project_id=str(uuid4()), name="Test")
|
||||||
p.path = str(tmpdir / "project\"53")
|
p.path = os.path.join(projects_dir, "project\"53")
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
import json
|
import json
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
@ -171,7 +171,7 @@ def demo_topology():
|
|||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_open(controller, tmpdir):
|
async def test_open(controller, projects_dir):
|
||||||
|
|
||||||
simple_topology = {
|
simple_topology = {
|
||||||
"auto_close": True,
|
"auto_close": True,
|
||||||
@ -192,12 +192,12 @@ async def test_open(controller, tmpdir):
|
|||||||
"version": "2.0.0"
|
"version": "2.0.0"
|
||||||
}
|
}
|
||||||
|
|
||||||
with open(str(tmpdir / "demo.gns3"), "w+") as f:
|
with open(os.path.join(projects_dir, "demo.gns3"), "w+") as f:
|
||||||
json.dump(simple_topology, f)
|
json.dump(simple_topology, f)
|
||||||
|
|
||||||
project = Project(name="demo",
|
project = Project(name="demo",
|
||||||
project_id="64ba8408-afbf-4b66-9cdd-1fd854427478",
|
project_id="64ba8408-afbf-4b66-9cdd-1fd854427478",
|
||||||
path=str(tmpdir),
|
path=str(projects_dir),
|
||||||
controller=controller,
|
controller=controller,
|
||||||
filename="demo.gns3",
|
filename="demo.gns3",
|
||||||
status="closed")
|
status="closed")
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import threading
|
import threading
|
||||||
|
import pytest
|
||||||
from unittest.mock import patch
|
from unittest.mock import patch
|
||||||
|
|
||||||
|
|
||||||
@ -37,7 +38,6 @@ def test_images_directories(tmpdir, config):
|
|||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir / "images1")
|
config.settings.Server.images_path = str(tmpdir / "images1")
|
||||||
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
||||||
config.settings.Server.local = False
|
|
||||||
|
|
||||||
# /tmp/null24564 is ignored because doesn't exists
|
# /tmp/null24564 is ignored because doesn't exists
|
||||||
res = images_directories("qemu")
|
res = images_directories("qemu")
|
||||||
@ -111,7 +111,8 @@ def test_remove_checksum(tmpdir):
|
|||||||
remove_checksum(str(tmpdir / 'not_exists'))
|
remove_checksum(str(tmpdir / 'not_exists'))
|
||||||
|
|
||||||
|
|
||||||
def test_list_images(tmpdir, config):
|
@pytest.mark.asyncio
|
||||||
|
async def test_list_images(tmpdir, config):
|
||||||
|
|
||||||
path1 = tmpdir / "images1" / "IOS" / "test1.image"
|
path1 = tmpdir / "images1" / "IOS" / "test1.image"
|
||||||
path1.write(b'\x7fELF\x01\x02\x01', ensure=True)
|
path1.write(b'\x7fELF\x01\x02\x01', ensure=True)
|
||||||
@ -140,9 +141,8 @@ def test_list_images(tmpdir, config):
|
|||||||
|
|
||||||
config.settings.Server.images_path = str(tmpdir / "images1")
|
config.settings.Server.images_path = str(tmpdir / "images1")
|
||||||
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
config.settings.Server.additional_images_paths = "/tmp/null24564;" + str(tmpdir / "images2")
|
||||||
config.settings.Server.local = False
|
|
||||||
|
|
||||||
assert list_images("dynamips") == [
|
assert await list_images("dynamips") == [
|
||||||
{
|
{
|
||||||
'filename': 'test1.image',
|
'filename': 'test1.image',
|
||||||
'filesize': 7,
|
'filesize': 7,
|
||||||
@ -158,7 +158,7 @@ def test_list_images(tmpdir, config):
|
|||||||
]
|
]
|
||||||
|
|
||||||
if sys.platform.startswith("linux"):
|
if sys.platform.startswith("linux"):
|
||||||
assert list_images("iou") == [
|
assert await list_images("iou") == [
|
||||||
{
|
{
|
||||||
'filename': 'test3.bin',
|
'filename': 'test3.bin',
|
||||||
'filesize': 7,
|
'filesize': 7,
|
||||||
@ -167,7 +167,7 @@ def test_list_images(tmpdir, config):
|
|||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
||||||
assert list_images("qemu") == [
|
assert await list_images("qemu") == [
|
||||||
{
|
{
|
||||||
'filename': 'test4.qcow2',
|
'filename': 'test4.qcow2',
|
||||||
'filesize': 1,
|
'filesize': 1,
|
||||||
|
@ -23,17 +23,11 @@ from fastapi import HTTPException
|
|||||||
from gns3server.utils.path import check_path_allowed, get_default_project_directory
|
from gns3server.utils.path import check_path_allowed, get_default_project_directory
|
||||||
|
|
||||||
|
|
||||||
def test_check_path_allowed(config, tmpdir):
|
def test_check_path_allowed():
|
||||||
|
|
||||||
config.settings.Server.local = False
|
|
||||||
config.settings.Server.projects_path = str(tmpdir)
|
|
||||||
with pytest.raises(HTTPException):
|
with pytest.raises(HTTPException):
|
||||||
check_path_allowed("/private")
|
check_path_allowed("/private")
|
||||||
|
|
||||||
config.settings.Server.local = True
|
|
||||||
check_path_allowed(str(tmpdir / "hello" / "world"))
|
|
||||||
check_path_allowed("/private")
|
|
||||||
|
|
||||||
|
|
||||||
def test_get_default_project_directory(config):
|
def test_get_default_project_directory(config):
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user