diff --git a/gns3server/__init__.py b/gns3server/__init__.py
index ccdabda2..66dd93d3 100644
--- a/gns3server/__init__.py
+++ b/gns3server/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/atm_switch_nodes.py b/gns3server/api/routes/compute/atm_switch_nodes.py
index 0b73b1b3..5847ca90 100644
--- a/gns3server/api/routes/compute/atm_switch_nodes.py
+++ b/gns3server/api/routes/compute/atm_switch_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/capabilities.py b/gns3server/api/routes/compute/capabilities.py
index a644ef4d..4acbf39b 100644
--- a/gns3server/api/routes/compute/capabilities.py
+++ b/gns3server/api/routes/compute/capabilities.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/cloud_nodes.py b/gns3server/api/routes/compute/cloud_nodes.py
index e36cc54a..bf7977fe 100644
--- a/gns3server/api/routes/compute/cloud_nodes.py
+++ b/gns3server/api/routes/compute/cloud_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/compute.py b/gns3server/api/routes/compute/compute.py
index 0d1f1a65..2fae74f6 100644
--- a/gns3server/api/routes/compute/compute.py
+++ b/gns3server/api/routes/compute/compute.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/docker_nodes.py b/gns3server/api/routes/compute/docker_nodes.py
index 1284d4ed..e23a7e47 100644
--- a/gns3server/api/routes/compute/docker_nodes.py
+++ b/gns3server/api/routes/compute/docker_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/dynamips_nodes.py b/gns3server/api/routes/compute/dynamips_nodes.py
index 3ef77a68..98e14c20 100644
--- a/gns3server/api/routes/compute/dynamips_nodes.py
+++ b/gns3server/api/routes/compute/dynamips_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
@@ -226,7 +225,7 @@ async def start_capture(adapter_number: int,
try:
pcap_file_path.encode('ascii')
except UnicodeEncodeError:
- raise DynamipsError('The capture file path "{}" must only contain ASCII (English) characters'.format(pcap_file_path))
+ raise DynamipsError(f"The capture file path '{pcap_file_path}' must only contain ASCII (English) characters")
await node.start_capture(adapter_number, port_number, pcap_file_path, node_capture_data.data_link_type)
return {"pcap_file_path": pcap_file_path}
diff --git a/gns3server/api/routes/compute/ethernet_hub_nodes.py b/gns3server/api/routes/compute/ethernet_hub_nodes.py
index 23075741..3bb00d94 100644
--- a/gns3server/api/routes/compute/ethernet_hub_nodes.py
+++ b/gns3server/api/routes/compute/ethernet_hub_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/ethernet_switch_nodes.py b/gns3server/api/routes/compute/ethernet_switch_nodes.py
index 2021f6fb..20719ebc 100644
--- a/gns3server/api/routes/compute/ethernet_switch_nodes.py
+++ b/gns3server/api/routes/compute/ethernet_switch_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/frame_relay_switch_nodes.py b/gns3server/api/routes/compute/frame_relay_switch_nodes.py
index da32e668..2a5d2997 100644
--- a/gns3server/api/routes/compute/frame_relay_switch_nodes.py
+++ b/gns3server/api/routes/compute/frame_relay_switch_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/images.py b/gns3server/api/routes/compute/images.py
index af1a42bc..cdb1fb9b 100644
--- a/gns3server/api/routes/compute/images.py
+++ b/gns3server/api/routes/compute/images.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/iou_nodes.py b/gns3server/api/routes/compute/iou_nodes.py
index 98fdb897..59962654 100644
--- a/gns3server/api/routes/compute/iou_nodes.py
+++ b/gns3server/api/routes/compute/iou_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/nat_nodes.py b/gns3server/api/routes/compute/nat_nodes.py
index 3e538815..b2f3b7cb 100644
--- a/gns3server/api/routes/compute/nat_nodes.py
+++ b/gns3server/api/routes/compute/nat_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/notifications.py b/gns3server/api/routes/compute/notifications.py
index fd2ad568..5807a0c4 100644
--- a/gns3server/api/routes/compute/notifications.py
+++ b/gns3server/api/routes/compute/notifications.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
@@ -46,7 +45,7 @@ async def notification_ws(websocket: WebSocket):
except (ConnectionClosed, WebSocketDisconnect):
log.info(f"Client {websocket.client.host}:{websocket.client.port} has disconnected from compute WebSocket")
except WebSocketException as e:
- log.warning("Error while sending to controller event to WebSocket client: '{}'".format(e))
+ log.warning(f"Error while sending to controller event to WebSocket client: {e}")
finally:
await websocket.close()
diff --git a/gns3server/api/routes/compute/projects.py b/gns3server/api/routes/compute/projects.py
index fa0cea13..e0162a16 100644
--- a/gns3server/api/routes/compute/projects.py
+++ b/gns3server/api/routes/compute/projects.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/qemu_nodes.py b/gns3server/api/routes/compute/qemu_nodes.py
index 184b2e44..5e91b959 100644
--- a/gns3server/api/routes/compute/qemu_nodes.py
+++ b/gns3server/api/routes/compute/qemu_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/virtualbox_nodes.py b/gns3server/api/routes/compute/virtualbox_nodes.py
index 4a80814f..a39eec6f 100644
--- a/gns3server/api/routes/compute/virtualbox_nodes.py
+++ b/gns3server/api/routes/compute/virtualbox_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/vmware_nodes.py b/gns3server/api/routes/compute/vmware_nodes.py
index e9df222c..9a20c717 100644
--- a/gns3server/api/routes/compute/vmware_nodes.py
+++ b/gns3server/api/routes/compute/vmware_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/compute/vpcs_nodes.py b/gns3server/api/routes/compute/vpcs_nodes.py
index 89281143..d15140ee 100644
--- a/gns3server/api/routes/compute/vpcs_nodes.py
+++ b/gns3server/api/routes/compute/vpcs_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/controller/appliances.py b/gns3server/api/routes/controller/appliances.py
index 466cca28..eef6f361 100644
--- a/gns3server/api/routes/controller/appliances.py
+++ b/gns3server/api/routes/controller/appliances.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/controller/computes.py b/gns3server/api/routes/controller/computes.py
index cfffdcc1..77ee10a9 100644
--- a/gns3server/api/routes/controller/computes.py
+++ b/gns3server/api/routes/controller/computes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/controller/controller.py b/gns3server/api/routes/controller/controller.py
index dc4f4ada..ce5a5a9d 100644
--- a/gns3server/api/routes/controller/controller.py
+++ b/gns3server/api/routes/controller/controller.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -61,7 +60,7 @@ async def shutdown():
try:
future.result()
except Exception as e:
- log.error("Could not close project {}".format(e), exc_info=1)
+ log.error(f"Could not close project: {e}", exc_info=1)
continue
# then shutdown the server itself
@@ -94,7 +93,7 @@ def check_version(version: schemas.Version):
print(version.version)
if version.version != __version__:
- raise ControllerError("Client version {} is not the same as server version {}".format(version.version, __version__))
+ raise ControllerError(f"Client version {version.version} is not the same as server version {__version__}")
return {"version": __version__}
@@ -135,7 +134,7 @@ async def statistics():
r = await compute.get("/statistics")
compute_statistics.append({"compute_id": compute.id, "compute_name": compute.name, "statistics": r.json})
except ControllerError as e:
- log.error("Could not retrieve statistics on compute {}: {}".format(compute.name, e))
+ log.error(f"Could not retrieve statistics on compute {compute.name}: {e}")
return compute_statistics
# @Route.post(
diff --git a/gns3server/api/routes/controller/dependencies/authentication.py b/gns3server/api/routes/controller/dependencies/authentication.py
index c4986577..851be2bc 100644
--- a/gns3server/api/routes/controller/dependencies/authentication.py
+++ b/gns3server/api/routes/controller/dependencies/authentication.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/controller/dependencies/database.py b/gns3server/api/routes/controller/dependencies/database.py
index f3f59d88..89860de9 100644
--- a/gns3server/api/routes/controller/dependencies/database.py
+++ b/gns3server/api/routes/controller/dependencies/database.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/controller/drawings.py b/gns3server/api/routes/controller/drawings.py
index bb4b3ec7..e6924fc6 100644
--- a/gns3server/api/routes/controller/drawings.py
+++ b/gns3server/api/routes/controller/drawings.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/controller/links.py b/gns3server/api/routes/controller/links.py
index b02ff5f2..a1cd9ee9 100644
--- a/gns3server/api/routes/controller/links.py
+++ b/gns3server/api/routes/controller/links.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/routes/controller/nodes.py b/gns3server/api/routes/controller/nodes.py
index cd2dd9b8..e1e71272 100644
--- a/gns3server/api/routes/controller/nodes.py
+++ b/gns3server/api/routes/controller/nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
@@ -58,7 +57,7 @@ class NodeConcurrency(APIRoute):
project_id = request.path_params.get("project_id")
if node_id and "pcap" not in request.url.path and not request.url.path.endswith("console/ws"):
- lock_key = "{}:{}".format(project_id, node_id)
+ lock_key = f"{project_id}:{node_id}"
node_locks.setdefault(lock_key, {"lock": asyncio.Lock(), "concurrency": 0})
node_locks[lock_key]["concurrency"] += 1
@@ -323,11 +322,9 @@ async def get_file(file_path: str, node: Node = Depends(dep_node)):
raise ControllerForbiddenError("It is forbidden to get a file outside the project directory")
node_type = node.node_type
- path = "/project-files/{}/{}/{}".format(node_type, node.id, path)
+ path = f"/project-files/{node_type}/{node.id}/{path}"
- res = await node.compute.http_query("GET", "/projects/{project_id}/files{path}".format(project_id=node.project.id, path=path),
- timeout=None,
- raw=True)
+ res = await node.compute.http_query("GET", f"/projects/{node.project.id}/files{path}", timeout=None, raw=True)
return Response(res.body, media_type="application/octet-stream")
@@ -345,14 +342,11 @@ async def post_file(file_path: str, request: Request, node: Node = Depends(dep_n
raise ControllerForbiddenError("Cannot write outside the node directory")
node_type = node.node_type
- path = "/project-files/{}/{}/{}".format(node_type, node.id, path)
+ path = f"/project-files/{node_type}/{node.id}/{path}"
data = await request.body() #FIXME: are we handling timeout or large files correctly?
- await node.compute.http_query("POST", "/projects/{project_id}/files{path}".format(project_id=node.project.id, path=path),
- data=data,
- timeout=None,
- raw=True)
+ await node.compute.http_query("POST", f"/projects/{node.project.id}/files{path}", data=data, timeout=None, raw=True)
@router.websocket("/{node_id}/console/ws")
diff --git a/gns3server/api/routes/controller/notifications.py b/gns3server/api/routes/controller/notifications.py
index 2009ad0c..10bb6d4a 100644
--- a/gns3server/api/routes/controller/notifications.py
+++ b/gns3server/api/routes/controller/notifications.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
@@ -41,7 +40,7 @@ async def http_notification():
with Controller.instance().notification.controller_queue() as queue:
while True:
msg = await queue.get_json(5)
- yield ("{}\n".format(msg)).encode("utf-8")
+ yield (f"{msg}\n").encode("utf-8")
return StreamingResponse(event_stream(), media_type="application/json")
@@ -62,6 +61,6 @@ async def notification_ws(websocket: WebSocket):
except (ConnectionClosed, WebSocketDisconnect):
log.info(f"Client {websocket.client.host}:{websocket.client.port} has disconnected from controller WebSocket")
except WebSocketException as e:
- log.warning("Error while sending to controller event to WebSocket client: '{}'".format(e))
+ log.warning(f"Error while sending to controller event to WebSocket client: {e}")
finally:
await websocket.close()
diff --git a/gns3server/api/routes/controller/projects.py b/gns3server/api/routes/controller/projects.py
index d565e75f..707eee46 100644
--- a/gns3server/api/routes/controller/projects.py
+++ b/gns3server/api/routes/controller/projects.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
@@ -180,7 +179,7 @@ async def load_project(path: str = Body(..., embed=True)):
controller = Controller.instance()
dot_gns3_file = path
if Config.instance().settings.Server.local is False:
- log.error("Cannot load '{}' because the server has not been started with the '--local' parameter".format(dot_gns3_file))
+ log.error(f"Cannot load '{dot_gns3_file}' because the server has not been started with the '--local' parameter")
raise ControllerForbiddenError("Cannot load project when server is not local")
project = await controller.load_project(dot_gns3_file,)
return project.__json__()
@@ -195,7 +194,7 @@ async def notification(project_id: UUID):
controller = Controller.instance()
project = controller.get_project(str(project_id))
- log.info("New client has connected to the notification stream for project ID '{}' (HTTP steam method)".format(project.id))
+ log.info(f"New client has connected to the notification stream for project ID '{project.id}' (HTTP steam method)")
async def event_stream():
@@ -203,15 +202,15 @@ async def notification(project_id: UUID):
with controller.notification.project_queue(project.id) as queue:
while True:
msg = await queue.get_json(5)
- yield ("{}\n".format(msg)).encode("utf-8")
+ yield (f"{msg}\n").encode("utf-8")
finally:
- log.info("Client has disconnected from notification for project ID '{}' (HTTP stream method)".format(project.id))
+ log.info(f"Client has disconnected from notification for project ID '{project.id}' (HTTP stream method)")
if project.auto_close:
# To avoid trouble with client connecting disconnecting we sleep few seconds before checking
# if someone else is not connected
await asyncio.sleep(5)
if not controller.notification.project_has_listeners(project.id):
- log.info("Project '{}' is automatically closing due to no client listening".format(project.id))
+ log.info(f"Project '{project.id}' is automatically closing due to no client listening")
await project.close()
return StreamingResponse(event_stream(), media_type="application/json")
@@ -227,16 +226,16 @@ async def notification_ws(project_id: UUID, websocket: WebSocket):
project = controller.get_project(str(project_id))
await websocket.accept()
- log.info("New client has connected to the notification stream for project ID '{}' (WebSocket method)".format(project.id))
+ log.info(f"New client has connected to the notification stream for project ID '{project.id}' (WebSocket method)")
try:
with controller.notification.project_queue(project.id) as queue:
while True:
notification = await queue.get_json(5)
await websocket.send_text(notification)
except (ConnectionClosed, WebSocketDisconnect):
- log.info("Client has disconnected from notification stream for project ID '{}' (WebSocket method)".format(project.id))
+ log.info(f"Client has disconnected from notification stream for project ID '{project.id}' (WebSocket method)")
except WebSocketException as e:
- log.warning("Error while sending to project event to WebSocket client: '{}'".format(e))
+ log.warning(f"Error while sending to project event to WebSocket client: {e}")
finally:
await websocket.close()
if project.auto_close:
@@ -244,7 +243,7 @@ async def notification_ws(project_id: UUID, websocket: WebSocket):
# if someone else is not connected
await asyncio.sleep(5)
if not controller.notification.project_has_listeners(project.id):
- log.info("Project '{}' is automatically closing due to no client listening".format(project.id))
+ log.info(f"Project '{project.id}' is automatically closing due to no client listening")
await project.close()
@@ -285,14 +284,14 @@ async def export_project(project: Project = Depends(dep_project),
async for chunk in zstream:
yield chunk
- log.info("Project '{}' exported in {:.4f} seconds".format(project.name, time.time() - begin))
+ log.info(f"Project '{project.name}' exported in {time.time() - begin:.4f} seconds")
# Will be raise if you have no space left or permission issue on your temporary directory
# RuntimeError: something was wrong during the zip process
except (ValueError, OSError, RuntimeError) as e:
- raise ConnectionError("Cannot export project: {}".format(e))
+ raise ConnectionError(f"Cannot export project: {e}")
- headers = {"CONTENT-DISPOSITION": 'attachment; filename="{}.gns3project"'.format(project.name)}
+ headers = {"CONTENT-DISPOSITION": f'attachment; filename="{project.name}.gns3project"'}
return StreamingResponse(streamer(), media_type="application/gns3project", headers=headers)
@@ -325,9 +324,9 @@ async def import_project(project_id: UUID, request: Request, path: Optional[Path
with open(temp_project_path, "rb") as f:
project = await import_controller_project(controller, str(project_id), f, location=path, name=name)
- log.info("Project '{}' imported in {:.4f} seconds".format(project.name, time.time() - begin))
+ log.info(f"Project '{project.name}' imported in {time.time() - begin:.4f} seconds")
except OSError as e:
- raise ControllerError("Could not import the project: {}".format(e))
+ raise ControllerError(f"Could not import the project: {e}")
return project.__json__()
diff --git a/gns3server/api/routes/controller/symbols.py b/gns3server/api/routes/controller/symbols.py
index fa94a1d2..e2f21b1a 100644
--- a/gns3server/api/routes/controller/symbols.py
+++ b/gns3server/api/routes/controller/symbols.py
@@ -54,7 +54,7 @@ async def get_symbol(symbol_id: str):
symbol = controller.symbols.get_path(symbol_id)
return FileResponse(symbol)
except (KeyError, OSError) as e:
- return ControllerNotFoundError("Could not get symbol file: {}".format(e))
+ return ControllerNotFoundError(f"Could not get symbol file: {e}")
@router.get("/{symbol_id:path}/dimensions",
@@ -70,7 +70,7 @@ async def get_symbol_dimensions(symbol_id: str):
symbol_dimensions = {'width': width, 'height': height}
return symbol_dimensions
except (KeyError, OSError, ValueError) as e:
- return ControllerNotFoundError("Could not get symbol file: {}".format(e))
+ return ControllerNotFoundError(f"Could not get symbol file: {e}")
@router.post("/{symbol_id:path}/raw",
@@ -87,7 +87,7 @@ async def upload_symbol(symbol_id: str, request: Request):
with open(path, "wb") as f:
f.write(await request.body())
except (UnicodeEncodeError, OSError) as e:
- raise ControllerError("Could not write symbol file '{}': {}".format(path, e))
+ raise ControllerError(f"Could not write symbol file '{path}': {e}")
# Reset the symbol list
controller.symbols.list()
diff --git a/gns3server/api/routes/controller/templates.py b/gns3server/api/routes/controller/templates.py
index fed65b00..d22a8333 100644
--- a/gns3server/api/routes/controller/templates.py
+++ b/gns3server/api/routes/controller/templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GNS3 Technologies Inc.
#
diff --git a/gns3server/api/server.py b/gns3server/api/server.py
index f63bb3cf..5cbdf8c4 100644
--- a/gns3server/api/server.py
+++ b/gns3server/api/server.py
@@ -153,5 +153,5 @@ async def add_extra_headers(request: Request, call_next):
response = await call_next(request)
process_time = time.time() - start_time
response.headers["X-Process-Time"] = str(process_time)
- response.headers["X-GNS3-Server-Version"] = "{}".format(__version__)
+ response.headers["X-GNS3-Server-Version"] = f"{__version__}"
return response
diff --git a/gns3server/compute/__init__.py b/gns3server/compute/__init__.py
index 7de7eac9..04758a5f 100644
--- a/gns3server/compute/__init__.py
+++ b/gns3server/compute/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/adapters/adapter.py b/gns3server/compute/adapters/adapter.py
index 33c916c4..8294a036 100644
--- a/gns3server/compute/adapters/adapter.py
+++ b/gns3server/compute/adapters/adapter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -16,7 +15,7 @@
# along with this program. If not, see .
-class Adapter(object):
+class Adapter:
"""
Base class for adapters.
diff --git a/gns3server/compute/adapters/ethernet_adapter.py b/gns3server/compute/adapters/ethernet_adapter.py
index cffa50a3..1a09c25c 100644
--- a/gns3server/compute/adapters/ethernet_adapter.py
+++ b/gns3server/compute/adapters/ethernet_adapter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/adapters/serial_adapter.py b/gns3server/compute/adapters/serial_adapter.py
index 9305b4fd..989d5be4 100644
--- a/gns3server/compute/adapters/serial_adapter.py
+++ b/gns3server/compute/adapters/serial_adapter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/base_manager.py b/gns3server/compute/base_manager.py
index fd911d0b..e148419e 100644
--- a/gns3server/compute/base_manager.py
+++ b/gns3server/compute/base_manager.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -144,12 +143,12 @@ class BaseManager:
try:
future.result()
except (Exception, GeneratorExit) as e:
- log.error("Could not close node {}".format(e), exc_info=1)
+ log.error(f"Could not close node: {e}", exc_info=1)
continue
if hasattr(BaseManager, "_instance"):
BaseManager._instance = None
- log.debug("Module {} unloaded".format(self.module_name))
+ log.debug(f"Module {self.module_name} unloaded")
def get_node(self, node_id, project_id=None):
"""
@@ -168,15 +167,15 @@ class BaseManager:
try:
UUID(node_id, version=4)
except ValueError:
- raise ComputeError("Node ID {} is not a valid UUID".format(node_id))
+ raise ComputeError(f"Node ID {node_id} is not a valid UUID")
if node_id not in self._nodes:
- raise ComputeNotFoundError("Node ID {} doesn't exist".format(node_id))
+ raise ComputeNotFoundError(f"Node ID {node_id} doesn't exist")
node = self._nodes[node_id]
if project_id:
if node.project.id != project.id:
- raise ComputeNotFoundError("Project ID {} doesn't belong to node {}".format(project_id, node.name))
+ raise ComputeNotFoundError("Project ID {project_id} doesn't belong to node {node.name}")
return node
@@ -226,7 +225,7 @@ class BaseManager:
shutil.rmtree(destination_dir)
shutil.copytree(source_node.working_dir, destination_dir, symlinks=True, ignore_dangling_symlinks=True)
except OSError as e:
- raise ComputeError("Cannot duplicate node data: {}".format(e))
+ raise ComputeError(f"Cannot duplicate node data: {e}")
# We force a refresh of the name. This forces the rewrite
# of some configuration files
@@ -326,7 +325,7 @@ class BaseManager:
if struct.unpack(" available_ram:
- message = '"{}" requires {}MB of RAM to run but there is only {}MB - {}% of RAM left on "{}"'.format(self.name,
- requested_ram,
- available_ram,
- percentage_left,
- platform.node())
+ message = '"{}" requires {}MB of RAM to run but there is only {}MB - {}% of RAM left on "{}"'.format(
+ self.name,
+ requested_ram,
+ available_ram,
+ percentage_left,
+ platform.node()
+ )
self.project.emit("log.warning", {"message": message})
def _get_custom_adapter_settings(self, adapter_number):
diff --git a/gns3server/compute/builtin/__init__.py b/gns3server/compute/builtin/__init__.py
index 2bef75c6..4b7a35fb 100644
--- a/gns3server/compute/builtin/__init__.py
+++ b/gns3server/compute/builtin/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/builtin/builtin_node_factory.py b/gns3server/compute/builtin/builtin_node_factory.py
index 093863f3..78815dba 100644
--- a/gns3server/compute/builtin/builtin_node_factory.py
+++ b/gns3server/compute/builtin/builtin_node_factory.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
@@ -40,6 +39,6 @@ class BuiltinNodeFactory:
def __new__(cls, name, node_id, project, manager, node_type, **kwargs):
if node_type not in BUILTIN_NODES:
- raise NodeError("Unknown node type: {}".format(node_type))
+ raise NodeError(f"Unknown node type: {node_type}")
return BUILTIN_NODES[node_type](name, node_id, project, manager, **kwargs)
diff --git a/gns3server/compute/builtin/nodes/cloud.py b/gns3server/compute/builtin/nodes/cloud.py
index 2a4597c4..8e1705ab 100644
--- a/gns3server/compute/builtin/nodes/cloud.py
+++ b/gns3server/compute/builtin/nodes/cloud.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
@@ -213,7 +212,7 @@ class Cloud(BaseNode):
"""
await self.start()
- log.info('Cloud "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ log.info(f'Cloud "{self._name}" [{self._id}] has been created')
async def start(self):
"""
@@ -246,7 +245,7 @@ class Cloud(BaseNode):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
await self._stop_ubridge()
- log.info('Cloud "{name}" [{id}] has been closed'.format(name=self._name, id=self._id))
+ log.info(f'Cloud "{self._name}" [{self._id}] has been closed')
async def _is_wifi_adapter_osx(self, adapter_name):
"""
@@ -256,7 +255,7 @@ class Cloud(BaseNode):
try:
output = await gns3server.utils.asyncio.subprocess_check_output("networksetup", "-listallhardwareports")
except (OSError, subprocess.SubprocessError) as e:
- log.warning("Could not execute networksetup: {}".format(e))
+ log.warning(f"Could not execute networksetup: {e}")
return False
is_wifi = False
@@ -285,17 +284,21 @@ class Cloud(BaseNode):
break
if not port_info:
- raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name,
- port_number=port_number))
+ raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(
+ name=self.name,
+ port_number=port_number)
+ )
- bridge_name = "{}-{}".format(self._id, port_number)
- await self._ubridge_send("bridge create {name}".format(name=bridge_name))
+ bridge_name = f"{self._id}-{port_number}"
+ await self._ubridge_send(f"bridge create {bridge_name}")
if not isinstance(nio, NIOUDP):
raise NodeError("Source NIO is not UDP")
- await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
- lport=nio.lport,
- rhost=nio.rhost,
- rport=nio.rport))
+ await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(
+ name=bridge_name,
+ lport=nio.lport,
+ rhost=nio.rhost,
+ rport=nio.rport)
+ )
await self._ubridge_apply_filters(bridge_name, nio.filters)
if port_info["type"] in ("ethernet", "tap"):
@@ -310,7 +313,7 @@ class Cloud(BaseNode):
if port_info["type"] == "ethernet":
network_interfaces = [interface["name"] for interface in self._interfaces()]
if not port_info["interface"] in network_interfaces:
- raise NodeError("Interface '{}' could not be found on this system, please update '{}'".format(port_info["interface"], self.name))
+ raise NodeError(f"Interface '{port_info['interface']}' could not be found on this system, please update '{self.name}'")
if sys.platform.startswith("linux"):
await self._add_linux_ethernet(port_info, bridge_name)
@@ -320,19 +323,26 @@ class Cloud(BaseNode):
await self._add_windows_ethernet(port_info, bridge_name)
elif port_info["type"] == "tap":
- await self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))
+ await self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(
+ name=bridge_name,
+ interface=port_info["interface"])
+ )
elif port_info["type"] == "udp":
- await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
- lport=port_info["lport"],
- rhost=port_info["rhost"],
- rport=port_info["rport"]))
+ await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(
+ name=bridge_name,
+ lport=port_info["lport"],
+ rhost=port_info["rhost"],
+ rport=port_info["rport"])
+ )
if nio.capturing:
- await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name,
- pcap_file=nio.pcap_output_file))
+ await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(
+ name=bridge_name,
+ pcap_file=nio.pcap_output_file)
+ )
- await self._ubridge_send('bridge start {name}'.format(name=bridge_name))
+ await self._ubridge_send(f"bridge start {bridge_name}")
async def _add_linux_ethernet(self, port_info, bridge_name):
"""
@@ -352,10 +362,19 @@ class Cloud(BaseNode):
break
i += 1
- await self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap))
- await self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface))
+ await self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(
+ name=bridge_name,
+ interface=tap)
+ )
+ await self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(
+ tap=tap,
+ interface=interface)
+ )
else:
- await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface))
+ await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(
+ name=bridge_name,
+ interface=interface)
+ )
async def _add_osx_ethernet(self, port_info, bridge_name):
"""
@@ -367,12 +386,17 @@ class Cloud(BaseNode):
raise NodeError("Connecting to a Wireless adapter is not supported on Mac OS")
if port_info["interface"].startswith("vmnet"):
# Use a special NIO to connect to VMware vmnet interfaces on OSX (libpcap doesn't support them)
- await self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=bridge_name,
- interface=port_info["interface"]))
+ await self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(
+ name=bridge_name,
+ interface=port_info["interface"])
+ )
return
if not gns3server.utils.interfaces.has_netmask(port_info["interface"]):
- raise NodeError("Interface {} has no netmask, interface down?".format(port_info["interface"]))
- await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))
+ raise NodeError(f"Interface {port_info['interface']} has no netmask, interface down?")
+ await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(
+ name=bridge_name,
+ interface=port_info["interface"])
+ )
async def _add_windows_ethernet(self, port_info, bridge_name):
"""
@@ -380,8 +404,11 @@ class Cloud(BaseNode):
"""
if not gns3server.utils.interfaces.has_netmask(port_info["interface"]):
- raise NodeError("Interface {} has no netmask, interface down?".format(port_info["interface"]))
- await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))
+ raise NodeError(f"Interface {port_info['interface']} has no netmask, interface down?")
+ await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(
+ name=bridge_name,
+ interface=port_info["interface"])
+ )
async def add_nio(self, nio, port_number):
"""
@@ -392,7 +419,7 @@ class Cloud(BaseNode):
"""
if port_number in self._nios:
- raise NodeError("Port {} isn't free".format(port_number))
+ raise NodeError(f"Port {port_number} isn't free")
log.info('Cloud "{name}" [{id}]: NIO {nio} bound to port {port}'.format(name=self._name,
id=self._id,
@@ -416,7 +443,7 @@ class Cloud(BaseNode):
:param port_number: port to allocate for the NIO
"""
- bridge_name = "{}-{}".format(self._id, port_number)
+ bridge_name = f"{self._id}-{port_number}"
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
await self._ubridge_apply_filters(bridge_name, nio.filters)
@@ -427,8 +454,8 @@ class Cloud(BaseNode):
:param port_number: adapter number
"""
- bridge_name = "{}-{}".format(self._id, port_number)
- await self._ubridge_send("bridge delete {name}".format(name=bridge_name))
+ bridge_name = f"{self._id}-{port_number}"
+ await self._ubridge_send(f"bridge delete {bridge_name}")
async def remove_nio(self, port_number):
"""
@@ -440,17 +467,19 @@ class Cloud(BaseNode):
"""
if port_number not in self._nios:
- raise NodeError("Port {} is not allocated".format(port_number))
+ raise NodeError(f"Port {port_number} is not allocated")
await self.stop_capture(port_number)
nio = self._nios[port_number]
if isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
- log.info('Cloud "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name,
- id=self._id,
- nio=nio,
- port=port_number))
+ log.info('Cloud "{name}" [{id}]: NIO {nio} removed from port {port}'.format(
+ name=self._name,
+ id=self._id,
+ nio=nio,
+ port=port_number)
+ )
del self._nios[port_number]
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
@@ -468,11 +497,13 @@ class Cloud(BaseNode):
"""
if not [port["port_number"] for port in self._ports_mapping if port_number == port["port_number"]]:
- raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name,
- port_number=port_number))
+ raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(
+ name=self.name,
+ port_number=port_number)
+ )
if port_number not in self._nios:
- raise NodeError("Port {} is not connected".format(port_number))
+ raise NodeError(f"Port {port_number} is not connected")
nio = self._nios[port_number]
@@ -489,14 +520,18 @@ class Cloud(BaseNode):
nio = self.get_nio(port_number)
if nio.capturing:
- raise NodeError("Packet capture is already activated on port {port_number}".format(port_number=port_number))
+ raise NodeError(f"Packet capture is already activated on port {port_number}")
nio.start_packet_capture(output_file)
- bridge_name = "{}-{}".format(self._id, port_number)
- await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name,
- output_file=output_file))
- log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name,
- id=self.id,
- port_number=port_number))
+ bridge_name = f"{self._id}-{port_number}"
+ await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(
+ name=bridge_name,
+ output_file=output_file)
+ )
+ log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(
+ name=self.name,
+ id=self.id,
+ port_number=port_number)
+ )
async def stop_capture(self, port_number):
"""
@@ -509,9 +544,11 @@ class Cloud(BaseNode):
if not nio.capturing:
return
nio.stop_packet_capture()
- bridge_name = "{}-{}".format(self._id, port_number)
- await self._ubridge_send("bridge stop_capture {name}".format(name=bridge_name))
+ bridge_name = f"{self._id}-{port_number}"
+ await self._ubridge_send(f"bridge stop_capture {bridge_name}")
- log.info("Cloud'{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name,
- id=self.id,
- port_number=port_number))
+ log.info("Cloud'{name}' [{id}]: stopping packet capture on port {port_number}".format(
+ name=self.name,
+ id=self.id,
+ port_number=port_number)
+ )
diff --git a/gns3server/compute/builtin/nodes/ethernet_hub.py b/gns3server/compute/builtin/nodes/ethernet_hub.py
index 015350fb..3c6a2e5d 100644
--- a/gns3server/compute/builtin/nodes/ethernet_hub.py
+++ b/gns3server/compute/builtin/nodes/ethernet_hub.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
@@ -51,7 +50,7 @@ class EthernetHub(BaseNode):
"""
super().create()
- log.info('Ethernet hub "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ log.info(f'Ethernet hub "{self._name}" [{self._id}] has been created')
async def delete(self):
"""
diff --git a/gns3server/compute/builtin/nodes/ethernet_switch.py b/gns3server/compute/builtin/nodes/ethernet_switch.py
index 241dac09..623efa22 100644
--- a/gns3server/compute/builtin/nodes/ethernet_switch.py
+++ b/gns3server/compute/builtin/nodes/ethernet_switch.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
@@ -51,7 +50,7 @@ class EthernetSwitch(BaseNode):
"""
super().create()
- log.info('Ethernet switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ log.info(f'Ethernet switch "{self._name}" [{self._id}] has been created')
async def delete(self):
"""
diff --git a/gns3server/compute/builtin/nodes/nat.py b/gns3server/compute/builtin/nodes/nat.py
index ee1e0084..2b97cd4c 100644
--- a/gns3server/compute/builtin/nodes/nat.py
+++ b/gns3server/compute/builtin/nodes/nat.py
@@ -40,7 +40,7 @@ class Nat(Cloud):
if not nat_interface:
nat_interface = "virbr0"
if nat_interface not in [interface["name"] for interface in gns3server.utils.interfaces.interfaces()]:
- raise NodeError("NAT interface {} is missing, please install libvirt".format(nat_interface))
+ raise NodeError(f"NAT interface {nat_interface} is missing, please install libvirt")
interface = nat_interface
else:
nat_interface = Config.instance().settings.Server.default_nat_interface
@@ -49,10 +49,11 @@ class Nat(Cloud):
interfaces = list(filter(lambda x: nat_interface in x.lower(),
[interface["name"] for interface in gns3server.utils.interfaces.interfaces()]))
if not len(interfaces):
- raise NodeError("NAT interface {} is missing. You need to install VMware or use the NAT node on GNS3 VM".format(nat_interface))
+ raise NodeError(f"NAT interface {nat_interface} is missing. "
+ f"You need to install VMware or use the NAT node on GNS3 VM")
interface = interfaces[0] # take the first available interface containing the vmnet8 name
- log.info("NAT node '{}' configured to use NAT interface '{}'".format(name, interface))
+ log.info(f"NAT node '{name}' configured to use NAT interface '{interface}'")
ports = [
{
"name": "nat0",
diff --git a/gns3server/compute/docker/__init__.py b/gns3server/compute/docker/__init__.py
index 126311db..7c76a8c8 100644
--- a/gns3server/compute/docker/__init__.py
+++ b/gns3server/compute/docker/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -68,9 +67,8 @@ class Docker(BaseManager):
docker_version = parse_version(version['ApiVersion'])
if docker_version < parse_version(DOCKER_MINIMUM_API_VERSION):
- raise DockerError(
- "Docker version is {}. GNS3 requires a minimum version of {}".format(version["Version"],
- DOCKER_MINIMUM_VERSION))
+ raise DockerError(f"Docker version is {version['Version']}. "
+ f"GNS3 requires a minimum version of {DOCKER_MINIMUM_VERSION}")
preferred_api_version = parse_version(DOCKER_PREFERRED_API_VERSION)
if docker_version >= preferred_api_version:
@@ -150,7 +148,7 @@ class Docker(BaseManager):
headers={"content-type": "application/json", },
timeout=timeout)
except aiohttp.ClientError as e:
- raise DockerError("Docker has returned an error: {}".format(str(e)))
+ raise DockerError(f"Docker has returned an error: {e}")
except (asyncio.TimeoutError):
raise DockerError("Docker timeout " + method + " " + path)
if response.status >= 300:
@@ -159,13 +157,13 @@ class Docker(BaseManager):
body = json.loads(body.decode("utf-8"))["message"]
except ValueError:
pass
- log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
+ log.debug(f"Query Docker {method} {path} params={params} data={data} Response: {body}")
if response.status == 304:
- raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body))
+ raise DockerHttp304Error(f"Docker has returned an error: {response.status} {body}")
elif response.status == 404:
- raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body))
+ raise DockerHttp404Error(f"Docker has returned an error: {response.status} {body}")
else:
- raise DockerError("Docker has returned an error: {} {}".format(response.status, body))
+ raise DockerError(f"Docker has returned an error: {response.status} {body}")
return response
async def websocket_query(self, path, params={}):
@@ -191,27 +189,28 @@ class Docker(BaseManager):
"""
try:
- await self.query("GET", "images/{}/json".format(image))
+ await self.query("GET", f"images/{image}/json")
return # We already have the image skip the download
except DockerHttp404Error:
pass
if progress_callback:
- progress_callback("Pulling '{}' from docker hub".format(image))
+ progress_callback(f"Pulling '{image}' from docker hub")
try:
response = await self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
except DockerError as e:
- raise DockerError("Could not pull the '{}' image from Docker Hub, please check your Internet connection (original error: {})".format(image, e))
+ raise DockerError(f"Could not pull the '{image}' image from Docker Hub, "
+ f"please check your Internet connection (original error: {e})")
# The pull api will stream status via an HTTP JSON stream
content = ""
while True:
try:
chunk = await response.content.read(CHUNK_SIZE)
except aiohttp.ServerDisconnectedError:
- log.error("Disconnected from server while pulling Docker image '{}' from docker hub".format(image))
+ log.error(f"Disconnected from server while pulling Docker image '{image}' from docker hub")
break
except asyncio.TimeoutError:
- log.error("Timeout while pulling Docker image '{}' from docker hub".format(image))
+ log.error(f"Timeout while pulling Docker image '{image}' from docker hub")
break
if not chunk:
break
@@ -228,7 +227,7 @@ class Docker(BaseManager):
pass
response.close()
if progress_callback:
- progress_callback("Success pulling image {}".format(image))
+ progress_callback(f"Success pulling image {image}")
async def list_images(self):
"""
diff --git a/gns3server/compute/docker/docker_error.py b/gns3server/compute/docker/docker_error.py
index 5d2b9b1d..298b9c3b 100644
--- a/gns3server/compute/docker/docker_error.py
+++ b/gns3server/compute/docker/docker_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/docker/docker_vm.py b/gns3server/compute/docker/docker_vm.py
index b5cb1cfc..1cb52e56 100644
--- a/gns3server/compute/docker/docker_vm.py
+++ b/gns3server/compute/docker/docker_vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -78,7 +77,7 @@ class DockerVM(BaseNode):
# force the latest image if no version is specified
if ":" not in image:
- image = "{}:latest".format(image)
+ image = f"{image}:latest"
self._image = image
self._start_command = start_command
self._environment = environment
@@ -110,9 +109,11 @@ class DockerVM(BaseNode):
else:
self.adapters = adapters
- log.debug("{module}: {name} [{image}] initialized.".format(module=self.manager.module_name,
- name=self.name,
- image=self._image))
+ log.debug("{module}: {name} [{image}] initialized.".format(
+ module=self.manager.module_name,
+ name=self.name,
+ image=self._image)
+ )
def __json__(self):
return {
@@ -148,7 +149,7 @@ class DockerVM(BaseNode):
if not os.path.exists("/tmp/.X11-unix/"):
return display
while True:
- if not os.path.exists("/tmp/.X11-unix/X{}".format(display)):
+ if not os.path.exists(f"/tmp/.X11-unix/X{display}"):
return display
display += 1
@@ -242,7 +243,7 @@ class DockerVM(BaseNode):
"""
try:
- result = await self.manager.query("GET", "containers/{}/json".format(self._cid))
+ result = await self.manager.query("GET", f"containers/{self._cid}/json")
except DockerError:
return "exited"
@@ -257,7 +258,7 @@ class DockerVM(BaseNode):
:returns: Dictionary information about the container image
"""
- result = await self.manager.query("GET", "images/{}/json".format(self._image))
+ result = await self.manager.query("GET", f"images/{self._image}/json")
return result
def _mount_binds(self, image_info):
@@ -267,20 +268,20 @@ class DockerVM(BaseNode):
resources = get_resource("compute/docker/resources")
if not os.path.exists(resources):
- raise DockerError("{} is missing can't start Docker containers".format(resources))
- binds = ["{}:/gns3:ro".format(resources)]
+ raise DockerError(f"{resources} is missing, can't start Docker container")
+ binds = [f"{resources}:/gns3:ro"]
# We mount our own etc/network
try:
self._create_network_config()
except OSError as e:
- raise DockerError("Could not create network config in the container: {}".format(e))
+ raise DockerError(f"Could not create network config in the container: {e}")
volumes = ["/etc/network"]
volumes.extend((image_info.get("Config", {}).get("Volumes") or {}).keys())
for volume in self._extra_volumes:
if not volume.strip() or volume[0] != "/" or volume.find("..") >= 0:
- raise DockerError("Persistent volume '{}' has invalid format. It must start with a '/' and not contain '..'.".format(volume))
+ raise DockerError(f"Persistent volume '{volume}' has invalid format. It must start with a '/' and not contain '..'.")
volumes.extend(self._extra_volumes)
self._volumes = []
@@ -297,7 +298,7 @@ class DockerVM(BaseNode):
for volume in self._volumes:
source = os.path.join(self.working_dir, os.path.relpath(volume, "/"))
os.makedirs(source, exist_ok=True)
- binds.append("{}:/gns3volumes{}".format(source, volume))
+ binds.append(f"{source}:/gns3volumes{volume}")
return binds
@@ -343,16 +344,17 @@ class DockerVM(BaseNode):
try:
image_infos = await self._get_image_information()
except DockerHttp404Error:
- log.info("Image '{}' is missing, pulling it from Docker hub...".format(self._image))
+ log.info(f"Image '{self._image}' is missing, pulling it from Docker hub...")
await self.pull_image(self._image)
image_infos = await self._get_image_information()
if image_infos is None:
- raise DockerError("Cannot get information for image '{}', please try again.".format(self._image))
+ raise DockerError(f"Cannot get information for image '{self._image}', please try again.")
available_cpus = psutil.cpu_count(logical=True)
if self._cpus > available_cpus:
- raise DockerError("You have allocated too many CPUs for the Docker container (max available is {} CPUs)".format(available_cpus))
+ raise DockerError(f"You have allocated too many CPUs for the Docker container "
+ f"(max available is {available_cpus} CPUs)")
params = {
"Hostname": self._name,
@@ -381,7 +383,7 @@ class DockerVM(BaseNode):
try:
params["Cmd"] = shlex.split(self._start_command)
except ValueError as e:
- raise DockerError("Invalid start command '{}': {}".format(self._start_command, e))
+ raise DockerError(f"Invalid start command '{self._start_command}': {e}")
if len(params["Cmd"]) == 0:
params["Cmd"] = image_infos.get("Config", {"Cmd": []}).get("Cmd")
if params["Cmd"] is None:
@@ -391,7 +393,7 @@ class DockerVM(BaseNode):
params["Entrypoint"].insert(0, "/gns3/init.sh") # FIXME /gns3/init.sh is not found?
# Give the information to the container on how many interface should be inside
- params["Env"].append("GNS3_MAX_ETHERNET=eth{}".format(self.adapters - 1))
+ params["Env"].append(f"GNS3_MAX_ETHERNET=eth{self.adapters - 1}")
# Give the information to the container the list of volume path mounted
params["Env"].append("GNS3_VOLUMES={}".format(":".join(self._volumes)))
@@ -412,7 +414,7 @@ class DockerVM(BaseNode):
for e in self._environment.strip().split("\n"):
e = e.strip()
if e.split("=")[0] == "":
- self.project.emit("log.warning", {"message": "{} has invalid environment variable: {}".format(self.name, e)})
+ self.project.emit("log.warning", {"message": f"{self.name} has invalid environment variable: {e}"})
continue
if not e.startswith("GNS3_"):
formatted = self._format_env(variables, e)
@@ -421,17 +423,17 @@ class DockerVM(BaseNode):
if self._console_type == "vnc":
await self._start_vnc()
params["Env"].append("QT_GRAPHICSSYSTEM=native") # To fix a Qt issue: https://github.com/GNS3/gns3-server/issues/556
- params["Env"].append("DISPLAY=:{}".format(self._display))
+ params["Env"].append(f"DISPLAY=:{self._display}")
params["HostConfig"]["Binds"].append("/tmp/.X11-unix/:/tmp/.X11-unix/")
if self._extra_hosts:
extra_hosts = self._format_extra_hosts(self._extra_hosts)
if extra_hosts:
- params["Env"].append("GNS3_EXTRA_HOSTS={}".format(extra_hosts))
+ params["Env"].append(f"GNS3_EXTRA_HOSTS={extra_hosts}")
result = await self.manager.query("POST", "containers/create", data=params)
self._cid = result['Id']
- log.info("Docker container '{name}' [{id}] created".format(name=self._name, id=self._id))
+ log.info(f"Docker container '{self._name}' [{self._id}] created")
return True
def _format_env(self, variables, env):
@@ -450,8 +452,8 @@ class DockerVM(BaseNode):
if hostname and ip:
hosts.append((hostname, ip))
except ValueError:
- raise DockerError("Can't apply `ExtraHosts`, wrong format: {}".format(extra_hosts))
- return "\n".join(["{}\t{}".format(h[1], h[0]) for h in hosts])
+ raise DockerError(f"Can't apply `ExtraHosts`, wrong format: {extra_hosts}")
+ return "\n".join([f"{h[1]}\t{h[0]}" for h in hosts])
async def update(self):
"""
@@ -479,8 +481,10 @@ class DockerVM(BaseNode):
try:
state = await self._get_container_state()
except DockerHttp404Error:
- raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(name=self.name,
- cid=self._cid))
+ raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(
+ name=self.name,
+ cid=self._cid)
+ )
if state == "paused":
await self.unpause()
elif state == "running":
@@ -494,7 +498,7 @@ class DockerVM(BaseNode):
await self._clean_servers()
- await self.manager.query("POST", "containers/{}/start".format(self._cid))
+ await self.manager.query("POST", f"containers/{self._cid}/start")
self._namespace = await self._get_namespace()
await self._start_ubridge(require_privileged_access=True)
@@ -524,10 +528,12 @@ class DockerVM(BaseNode):
self._permissions_fixed = False
self.status = "started"
- log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(name=self._name,
- image=self._image,
- console=self.console,
- console_type=self.console_type))
+ log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(
+ name=self._name,
+ image=self._image,
+ console=self.console,
+ console_type=self.console_type)
+ )
async def _start_aux(self):
"""
@@ -543,12 +549,12 @@ class DockerVM(BaseNode):
stderr=asyncio.subprocess.STDOUT,
stdin=asyncio.subprocess.PIPE)
except OSError as e:
- raise DockerError("Could not start auxiliary console process: {}".format(e))
+ raise DockerError(f"Could not start auxiliary console process: {e}")
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True)
try:
- self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux)))
+ self._telnet_servers.append(await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux))
except OSError as e:
- raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.aux, e))
+ raise DockerError(f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.aux}: {e}")
log.debug(f"Docker container '{self.name}' started listen for auxiliary telnet on {self.aux}")
async def _fix_permissions(self):
@@ -558,10 +564,10 @@ class DockerVM(BaseNode):
"""
state = await self._get_container_state()
- log.info("Docker container '{name}' fix ownership, state = {state}".format(name=self._name, state=state))
+ log.info(f"Docker container '{self._name}' fix ownership, state = {state}")
if state == "stopped" or state == "exited":
# We need to restart it to fix permissions
- await self.manager.query("POST", "containers/{}/start".format(self._cid))
+ await self.manager.query("POST", f"containers/{self._cid}/start")
for volume in self._volumes:
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
@@ -584,7 +590,7 @@ class DockerVM(BaseNode):
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
)
except OSError as e:
- raise DockerError("Could not fix permissions for {}: {}".format(volume, e))
+ raise DockerError(f"Could not fix permissions for {volume}: {e}")
await process.wait()
self._permissions_fixed = True
@@ -608,13 +614,13 @@ class DockerVM(BaseNode):
"-rfbport", str(self.console),
"-AlwaysShared",
"-SecurityTypes", "None",
- ":{}".format(self._display),
+ f":{self._display}",
stdout=fd, stderr=subprocess.STDOUT)
else:
if restart is False:
self._xvfb_process = await asyncio.create_subprocess_exec("Xvfb",
"-nolisten",
- "tcp", ":{}".format(self._display),
+ "tcp", f":{self._display}",
"-screen", "0",
self._console_resolution + "x16")
@@ -625,7 +631,7 @@ class DockerVM(BaseNode):
"-nopw",
"-shared",
"-geometry", self._console_resolution,
- "-display", "WAIT:{}".format(self._display),
+ "-display", f"WAIT:{self._display}",
"-rfbport", str(self.console),
"-rfbportv6", str(self.console),
"-noncache",
@@ -642,17 +648,17 @@ class DockerVM(BaseNode):
if not (tigervnc_path or shutil.which("Xvfb") and shutil.which("x11vnc")):
raise DockerError("Please install TigerVNC server (recommended) or Xvfb + x11vnc before using VNC support")
await self._start_vnc_process()
- x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
+ x11_socket = os.path.join("/tmp/.X11-unix/", f"X{self._display}")
try:
await wait_for_file_creation(x11_socket)
except asyncio.TimeoutError:
- raise DockerError('x11 socket file "{}" does not exist'.format(x11_socket))
+ raise DockerError(f'x11 socket file "{x11_socket}" does not exist')
if not hasattr(sys, "_called_from_test") or not sys._called_from_test:
# Start vncconfig for tigervnc clipboard support, connection available only after socket creation.
tigervncconfig_path = shutil.which("vncconfig")
if tigervnc_path and tigervncconfig_path:
- self._vncconfig_process = await asyncio.create_subprocess_exec(tigervncconfig_path, "-display", ":{}".format(self._display), "-nowin")
+ self._vncconfig_process = await asyncio.create_subprocess_exec(tigervncconfig_path, "-display", f":{self._display}", "-nowin")
# sometimes the VNC process can crash
monitor_process(self._vnc_process, self._vnc_callback)
@@ -665,7 +671,7 @@ class DockerVM(BaseNode):
"""
if returncode != 0 and self._closing is False:
- self.project.emit("log.error", {"message": "The vnc process has stopped with return code {} for node '{}'. Please restart this node.".format(returncode, self.name)})
+ self.project.emit("log.error", {"message": f"The vnc process has stopped with return code {returncode} for node '{self.name}'. Please restart this node."})
self._vnc_process = None
async def _start_http(self):
@@ -679,15 +685,15 @@ class DockerVM(BaseNode):
# We replace host and port in the server answer otherwise some link could be broken
server = AsyncioRawCommandServer(command, replaces=[
(
- '://127.0.0.1'.encode(), # {{HOST}} mean client host
- '://{{HOST}}'.encode(),
+ b'://127.0.0.1', # {{HOST}} mean client host
+ b'://{{HOST}}',
),
(
- ':{}'.format(self._console_http_port).encode(),
- ':{}'.format(self.console).encode(),
+ f':{self._console_http_port}'.encode(),
+ f':{self.console}'.encode(),
)
])
- self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)))
+ self._telnet_servers.append(await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console))
async def _window_size_changed_callback(self, columns, rows):
"""
@@ -699,7 +705,7 @@ class DockerVM(BaseNode):
"""
# resize the container TTY.
- await self._manager.query("POST", "containers/{}/resize?h={}&w={}".format(self._cid, rows, columns))
+ await self._manager.query("POST", f"containers/{self._cid}/resize?h={rows}&w={columns}")
async def _start_console(self):
@@ -724,11 +730,11 @@ class DockerVM(BaseNode):
input_stream = InputStream()
telnet = AsyncioTelnetServer(reader=output_stream, writer=input_stream, echo=True, naws=True, window_size_changed_callback=self._window_size_changed_callback)
try:
- self._telnet_servers.append((await asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console)))
+ self._telnet_servers.append(await asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console))
except OSError as e:
- raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e))
+ raise DockerError(f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.console}: {e}")
- self._console_websocket = await self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid))
+ self._console_websocket = await self.manager.websocket_query(f"containers/{self._cid}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1")
input_stream.ws = self._console_websocket
output_stream.feed_data(self.name.encode() + b" console is now available... Press RETURN to get started.\r\n")
@@ -750,7 +756,7 @@ class DockerVM(BaseNode):
elif msg.type == aiohttp.WSMsgType.BINARY:
out.feed_data(msg.data)
elif msg.type == aiohttp.WSMsgType.ERROR:
- log.critical("Docker WebSocket Error: {}".format(ws.exception()))
+ log.critical(f"Docker WebSocket Error: {ws.exception()}")
else:
out.feed_eof()
await ws.close()
@@ -785,7 +791,7 @@ class DockerVM(BaseNode):
Restart this Docker container.
"""
- await self.manager.query("POST", "containers/{}/restart".format(self._cid))
+ await self.manager.query("POST", f"containers/{self._cid}/restart")
log.info("Docker container '{name}' [{image}] restarted".format(
name=self._name, image=self._image))
@@ -825,14 +831,14 @@ class DockerVM(BaseNode):
if state != "stopped" or state != "exited":
# t=5 number of seconds to wait before killing the container
try:
- await self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5})
- log.info("Docker container '{name}' [{image}] stopped".format(name=self._name, image=self._image))
+ await self.manager.query("POST", f"containers/{self._cid}/stop", params={"t": 5})
+ log.info(f"Docker container '{self._name}' [{self._image}] stopped")
except DockerHttp304Error:
# Container is already stopped
pass
# Ignore runtime error because when closing the server
except RuntimeError as e:
- log.debug("Docker runtime error when closing: {}".format(str(e)))
+ log.debug(f"Docker runtime error when closing: {str(e)}")
return
self.status = "stopped"
@@ -841,18 +847,18 @@ class DockerVM(BaseNode):
Pauses this Docker container.
"""
- await self.manager.query("POST", "containers/{}/pause".format(self._cid))
+ await self.manager.query("POST", f"containers/{self._cid}/pause")
self.status = "suspended"
- log.info("Docker container '{name}' [{image}] paused".format(name=self._name, image=self._image))
+ log.info(f"Docker container '{self._name}' [{self._image}] paused")
async def unpause(self):
"""
Unpauses this Docker container.
"""
- await self.manager.query("POST", "containers/{}/unpause".format(self._cid))
+ await self.manager.query("POST", f"containers/{self._cid}/unpause")
self.status = "started"
- log.info("Docker container '{name}' [{image}] unpaused".format(name=self._name, image=self._image))
+ log.info(f"Docker container '{self._name}' [{self._image}] unpaused")
async def close(self):
"""
@@ -892,17 +898,17 @@ class DockerVM(BaseNode):
pass
if self._display:
- display = "/tmp/.X11-unix/X{}".format(self._display)
+ display = f"/tmp/.X11-unix/X{self._display}"
try:
if os.path.exists(display):
os.remove(display)
except OSError as e:
- log.warning("Could not remove display {}: {}".format(display, e))
+ log.warning(f"Could not remove display {display}: {e}")
# v – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false.
# force - 1/True/true or 0/False/false, Kill then remove the container. Default false.
try:
- await self.manager.query("DELETE", "containers/{}".format(self._cid), params={"force": 1, "v": 1})
+ await self.manager.query("DELETE", f"containers/{self._cid}", params={"force": 1, "v": 1})
except DockerError:
pass
log.info("Docker container '{name}' [{image}] removed".format(
@@ -916,7 +922,7 @@ class DockerVM(BaseNode):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
# Ignore runtime error because when closing the server
except (DockerHttp404Error, RuntimeError) as e:
- log.debug("Docker error when closing: {}".format(str(e)))
+ log.debug(f"Docker error when closing: {str(e)}")
return
async def _add_ubridge_connection(self, nio, adapter_number):
@@ -934,14 +940,14 @@ class DockerVM(BaseNode):
adapter_number=adapter_number))
for index in range(4096):
- if "tap-gns3-e{}".format(index) not in psutil.net_if_addrs():
- adapter.host_ifc = "tap-gns3-e{}".format(str(index))
+ if f"tap-gns3-e{index}" not in psutil.net_if_addrs():
+ adapter.host_ifc = f"tap-gns3-e{str(index)}"
break
if adapter.host_ifc is None:
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
adapter_number=adapter_number))
- bridge_name = 'bridge{}'.format(adapter_number)
- await self._ubridge_send('bridge create {}'.format(bridge_name))
+ bridge_name = f'bridge{adapter_number}'
+ await self._ubridge_send(f'bridge create {bridge_name}')
self._bridges.add(bridge_name)
await self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number,
hostif=adapter.host_ifc))
@@ -958,12 +964,12 @@ class DockerVM(BaseNode):
async def _get_namespace(self):
- result = await self.manager.query("GET", "containers/{}/json".format(self._cid))
+ result = await self.manager.query("GET", f"containers/{self._cid}/json")
return int(result['State']['Pid'])
async def _connect_nio(self, adapter_number, nio):
- bridge_name = 'bridge{}'.format(adapter_number)
+ bridge_name = f'bridge{adapter_number}'
await self._ubridge_send('bridge add_nio_udp {bridge_name} {lport} {rhost} {rport}'.format(bridge_name=bridge_name,
lport=nio.lport,
rhost=nio.rhost,
@@ -972,7 +978,7 @@ class DockerVM(BaseNode):
if nio.capturing:
await self._ubridge_send('bridge start_capture {bridge_name} "{pcap_file}"'.format(bridge_name=bridge_name,
pcap_file=nio.pcap_output_file))
- await self._ubridge_send('bridge start {bridge_name}'.format(bridge_name=bridge_name))
+ await self._ubridge_send(f'bridge start {bridge_name}')
await self._ubridge_apply_filters(bridge_name, nio.filters)
async def adapter_add_nio_binding(self, adapter_number, nio):
@@ -1007,7 +1013,7 @@ class DockerVM(BaseNode):
"""
if self.ubridge:
- bridge_name = 'bridge{}'.format(adapter_number)
+ bridge_name = f'bridge{adapter_number}'
if bridge_name in self._bridges:
await self._ubridge_apply_filters(bridge_name, nio.filters)
@@ -1029,8 +1035,8 @@ class DockerVM(BaseNode):
await self.stop_capture(adapter_number)
if self.ubridge:
nio = adapter.get_nio(0)
- bridge_name = 'bridge{}'.format(adapter_number)
- await self._ubridge_send("bridge stop {}".format(bridge_name))
+ bridge_name = f'bridge{adapter_number}'
+ await self._ubridge_send(f"bridge stop {bridge_name}")
await self._ubridge_send('bridge remove_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
lport=nio.lport,
rhost=nio.rhost,
@@ -1061,7 +1067,7 @@ class DockerVM(BaseNode):
nio = adapter.get_nio(0)
if not nio:
- raise DockerError("Adapter {} is not connected".format(adapter_number))
+ raise DockerError(f"Adapter {adapter_number} is not connected")
return nio
@@ -1112,10 +1118,10 @@ class DockerVM(BaseNode):
:param output_file: PCAP destination file for the capture
"""
- adapter = "bridge{}".format(adapter_number)
+ adapter = f"bridge{adapter_number}"
if not self.ubridge:
raise DockerError("Cannot start the packet capture: uBridge is not running")
- await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file))
+ await self._ubridge_send(f'bridge start_capture {adapter} "{output_file}"')
async def _stop_ubridge_capture(self, adapter_number):
"""
@@ -1124,10 +1130,10 @@ class DockerVM(BaseNode):
:param adapter_number: adapter number
"""
- adapter = "bridge{}".format(adapter_number)
+ adapter = f"bridge{adapter_number}"
if not self.ubridge:
raise DockerError("Cannot stop the packet capture: uBridge is not running")
- await self._ubridge_send("bridge stop_capture {name}".format(name=adapter))
+ await self._ubridge_send(f"bridge stop_capture {adapter}")
async def start_capture(self, adapter_number, output_file):
"""
@@ -1139,7 +1145,7 @@ class DockerVM(BaseNode):
nio = self.get_nio(adapter_number)
if nio.capturing:
- raise DockerError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
+ raise DockerError(f"Packet capture is already activated on adapter {adapter_number}")
nio.start_packet_capture(output_file)
if self.status == "started" and self.ubridge:
@@ -1174,7 +1180,7 @@ class DockerVM(BaseNode):
:returns: string
"""
- result = await self.manager.query("GET", "containers/{}/logs".format(self._cid), params={"stderr": 1, "stdout": 1})
+ result = await self.manager.query("GET", f"containers/{self._cid}/logs", params={"stderr": 1, "stdout": 1})
return result
async def delete(self):
diff --git a/gns3server/compute/dynamips/__init__.py b/gns3server/compute/dynamips/__init__.py
index 89b85f14..b174fdb8 100644
--- a/gns3server/compute/dynamips/__init__.py
+++ b/gns3server/compute/dynamips/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -150,7 +149,7 @@ class Dynamips(BaseManager):
"""
self._dynamips_ids.setdefault(project_id, set())
if dynamips_id in self._dynamips_ids[project_id]:
- raise DynamipsError("Dynamips identifier {} is already used by another router".format(dynamips_id))
+ raise DynamipsError(f"Dynamips identifier {dynamips_id} is already used by another router")
self._dynamips_ids[project_id].add(dynamips_id)
def release_dynamips_id(self, project_id, dynamips_id):
@@ -178,7 +177,7 @@ class Dynamips(BaseManager):
try:
future.result()
except (Exception, GeneratorExit) as e:
- log.error("Could not stop device hypervisor {}".format(e), exc_info=1)
+ log.error(f"Could not stop device hypervisor {e}", exc_info=1)
continue
async def project_closing(self, project):
@@ -201,7 +200,7 @@ class Dynamips(BaseManager):
try:
future.result()
except (Exception, GeneratorExit) as e:
- log.error("Could not delete device {}".format(e), exc_info=1)
+ log.error(f"Could not delete device {e}", exc_info=1)
async def project_closed(self, project):
"""
@@ -222,12 +221,12 @@ class Dynamips(BaseManager):
files += glob.glob(os.path.join(glob.escape(project_dir), "*", "c[0-9][0-9][0-9][0-9]_i[0-9]*_log.txt"))
for file in files:
try:
- log.debug("Deleting file {}".format(file))
+ log.debug(f"Deleting file {file}")
if file in self._ghost_files:
self._ghost_files.remove(file)
await wait_run_in_executor(os.remove, file)
except OSError as e:
- log.warning("Could not delete file {}: {}".format(file, e))
+ log.warning(f"Could not delete file {file}: {e}")
continue
# Release the dynamips ids if we want to reload the same project
@@ -255,9 +254,9 @@ class Dynamips(BaseManager):
if not dynamips_path:
raise DynamipsError("Could not find Dynamips")
if not os.path.isfile(dynamips_path):
- raise DynamipsError("Dynamips {} is not accessible".format(dynamips_path))
+ raise DynamipsError(f"Dynamips {dynamips_path} is not accessible")
if not os.access(dynamips_path, os.X_OK):
- raise DynamipsError("Dynamips {} is not executable".format(dynamips_path))
+ raise DynamipsError(f"Dynamips {dynamips_path} is not executable")
self._dynamips_path = dynamips_path
return dynamips_path
@@ -284,7 +283,7 @@ class Dynamips(BaseManager):
try:
info = socket.getaddrinfo(server_host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
if not info:
- raise DynamipsError("getaddrinfo returns an empty list on {}".format(server_host))
+ raise DynamipsError(f"getaddrinfo returns an empty list on {server_host}")
for res in info:
af, socktype, proto, _, sa = res
# let the OS find an unused port for the Dynamips hypervisor
@@ -293,17 +292,17 @@ class Dynamips(BaseManager):
port = sock.getsockname()[1]
break
except OSError as e:
- raise DynamipsError("Could not find free port for the Dynamips hypervisor: {}".format(e))
+ raise DynamipsError(f"Could not find free port for the Dynamips hypervisor: {e}")
port_manager = PortManager.instance()
hypervisor = Hypervisor(self._dynamips_path, working_dir, server_host, port, port_manager.console_host)
- log.info("Creating new hypervisor {}:{} with working directory {}".format(hypervisor.host, hypervisor.port, working_dir))
+ log.info(f"Creating new hypervisor {hypervisor.host}:{hypervisor.port} with working directory {working_dir}")
await hypervisor.start()
- log.info("Hypervisor {}:{} has successfully started".format(hypervisor.host, hypervisor.port))
+ log.info(f"Hypervisor {hypervisor.host}:{hypervisor.port} has successfully started")
await hypervisor.connect()
if parse_version(hypervisor.version) < parse_version('0.2.11'):
- raise DynamipsError("Dynamips version must be >= 0.2.11, detected version is {}".format(hypervisor.version))
+ raise DynamipsError(f"Dynamips version must be >= 0.2.11, detected version is {hypervisor.version}")
return hypervisor
@@ -315,7 +314,7 @@ class Dynamips(BaseManager):
try:
await self._set_ghost_ios(vm)
except GeneratorExit:
- log.warning("Could not create ghost IOS image {} (GeneratorExit)".format(vm.name))
+ log.warning(f"Could not create ghost IOS image {vm.name} (GeneratorExit)")
async def create_nio(self, node, nio_settings):
"""
@@ -335,13 +334,13 @@ class Dynamips(BaseManager):
try:
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
if not info:
- raise DynamipsError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
+ raise DynamipsError(f"getaddrinfo returns an empty list on {rhost}:{rport}")
for res in info:
af, socktype, proto, _, sa = res
with socket.socket(af, socktype, proto) as sock:
sock.connect(sa)
except OSError as e:
- raise DynamipsError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
+ raise DynamipsError(f"Could not create an UDP connection to {rhost}:{rport}: {e}")
nio = NIOUDP(node, lport, rhost, rport)
nio.filters = nio_settings.get("filters", {})
nio.suspend = nio_settings.get("suspend", False)
@@ -355,11 +354,11 @@ class Dynamips(BaseManager):
if interface["name"] == ethernet_device:
npf_interface = interface["id"]
if not npf_interface:
- raise DynamipsError("Could not find interface {} on this host".format(ethernet_device))
+ raise DynamipsError(f"Could not find interface {ethernet_device} on this host")
else:
ethernet_device = npf_interface
if not is_interface_up(ethernet_device):
- raise DynamipsError("Ethernet interface {} is down".format(ethernet_device))
+ raise DynamipsError(f"Ethernet interface {ethernet_device} is down")
nio = NIOGenericEthernet(node.hypervisor, ethernet_device)
elif nio_settings["type"] == "nio_linux_ethernet":
if sys.platform.startswith("win"):
@@ -371,7 +370,7 @@ class Dynamips(BaseManager):
nio = NIOTAP(node.hypervisor, tap_device)
if not is_interface_up(tap_device):
# test after the TAP interface has been created (if it doesn't exist yet)
- raise DynamipsError("TAP interface {} is down".format(tap_device))
+ raise DynamipsError(f"TAP interface {tap_device} is down")
elif nio_settings["type"] == "nio_unix":
local_file = nio_settings["local_file"]
remote_file = nio_settings["remote_file"]
@@ -425,7 +424,7 @@ class Dynamips(BaseManager):
finally:
await ghost.clean_delete()
except DynamipsError as e:
- log.warning("Could not create ghost instance: {}".format(e))
+ log.warning(f"Could not create ghost instance: {e}")
if vm.ghost_file != ghost_file and os.path.isfile(ghost_file_path):
# set the ghost file to the router
@@ -442,8 +441,8 @@ class Dynamips(BaseManager):
for name, value in settings.items():
if hasattr(vm, name) and getattr(vm, name) != value:
- if hasattr(vm, "set_{}".format(name)):
- setter = getattr(vm, "set_{}".format(name))
+ if hasattr(vm, f"set_{name}"):
+ setter = getattr(vm, f"set_{name}")
await setter(value)
elif name.startswith("slot") and value in ADAPTER_MATRIX:
slot_id = int(name[-1])
@@ -455,14 +454,14 @@ class Dynamips(BaseManager):
if not isinstance(vm.slots[slot_id], type(adapter)):
await vm.slot_add_binding(slot_id, adapter)
except IndexError:
- raise DynamipsError("Slot {} doesn't exist on this router".format(slot_id))
+ raise DynamipsError(f"Slot {slot_id} doesn't exist on this router")
elif name.startswith("slot") and (value is None or value == ""):
slot_id = int(name[-1])
try:
if vm.slots[slot_id]:
await vm.slot_remove_binding(slot_id)
except IndexError:
- raise DynamipsError("Slot {} doesn't exist on this router".format(slot_id))
+ raise DynamipsError(f"Slot {slot_id} doesn't exist on this router")
elif name.startswith("wic") and value in WIC_MATRIX:
wic_slot_id = int(name[-1])
wic_name = value
@@ -473,14 +472,14 @@ class Dynamips(BaseManager):
if not isinstance(vm.slots[0].wics[wic_slot_id], type(wic)):
await vm.install_wic(wic_slot_id, wic)
except IndexError:
- raise DynamipsError("WIC slot {} doesn't exist on this router".format(wic_slot_id))
+ raise DynamipsError(f"WIC slot {wic_slot_id} doesn't exist on this router")
elif name.startswith("wic") and (value is None or value == ""):
wic_slot_id = int(name[-1])
try:
if vm.slots[0].wics and vm.slots[0].wics[wic_slot_id]:
await vm.uninstall_wic(wic_slot_id)
except IndexError:
- raise DynamipsError("WIC slot {} doesn't exist on this router".format(wic_slot_id))
+ raise DynamipsError(f"WIC slot {wic_slot_id} doesn't exist on this router")
mmap_support = self.config.settings.Dynamips.mmap_support
if mmap_support is False:
@@ -523,12 +522,12 @@ class Dynamips(BaseManager):
:returns: relative path to the created config file
"""
- log.info("Creating config file {}".format(path))
+ log.info(f"Creating config file {path}")
config_dir = os.path.dirname(path)
try:
os.makedirs(config_dir, exist_ok=True)
except OSError as e:
- raise DynamipsError("Could not create Dynamips configs directory: {}".format(e))
+ raise DynamipsError(f"Could not create Dynamips configs directory: {e}")
if content is None or len(content) == 0:
content = "!\n"
@@ -542,7 +541,7 @@ class Dynamips(BaseManager):
content = content.replace('%h', vm.name)
f.write(content.encode("utf-8"))
except OSError as e:
- raise DynamipsError("Could not create config file '{}': {}".format(path, e))
+ raise DynamipsError(f"Could not create config file '{path}': {e}")
return os.path.join("configs", os.path.basename(path))
@@ -574,10 +573,10 @@ class Dynamips(BaseManager):
if not match:
continue
await vm.set_idlepc(idlepc.split()[0])
- log.debug("Auto Idle-PC: trying idle-PC value {}".format(vm.idlepc))
+ log.debug(f"Auto Idle-PC: trying idle-PC value {vm.idlepc}")
start_time = time.time()
initial_cpu_usage = await vm.get_cpu_usage()
- log.debug("Auto Idle-PC: initial CPU usage is {}%".format(initial_cpu_usage))
+ log.debug(f"Auto Idle-PC: initial CPU usage is {initial_cpu_usage}%")
await asyncio.sleep(3) # wait 3 seconds to probe the cpu again
elapsed_time = time.time() - start_time
cpu_usage = await vm.get_cpu_usage()
@@ -585,10 +584,10 @@ class Dynamips(BaseManager):
cpu_usage = abs(cpu_elapsed_usage * 100.0 / elapsed_time)
if cpu_usage > 100:
cpu_usage = 100
- log.debug("Auto Idle-PC: CPU usage is {}% after {:.2} seconds".format(cpu_usage, elapsed_time))
+ log.debug(f"Auto Idle-PC: CPU usage is {cpu_usage}% after {elapsed_time:.2} seconds")
if cpu_usage < 70:
validated_idlepc = vm.idlepc
- log.debug("Auto Idle-PC: idle-PC value {} has been validated".format(validated_idlepc))
+ log.debug(f"Auto Idle-PC: idle-PC value {validated_idlepc} has been validated")
break
if validated_idlepc is None:
diff --git a/gns3server/compute/dynamips/adapters/adapter.py b/gns3server/compute/dynamips/adapters/adapter.py
index 9dd61619..5126f0bb 100644
--- a/gns3server/compute/dynamips/adapters/adapter.py
+++ b/gns3server/compute/dynamips/adapters/adapter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -16,7 +15,7 @@
# along with this program. If not, see .
-class Adapter(object):
+class Adapter:
"""
Base class for adapters.
diff --git a/gns3server/compute/dynamips/adapters/c1700_mb_1fe.py b/gns3server/compute/dynamips/adapters/c1700_mb_1fe.py
index 4a77efb5..ef7d88c4 100644
--- a/gns3server/compute/dynamips/adapters/c1700_mb_1fe.py
+++ b/gns3server/compute/dynamips/adapters/c1700_mb_1fe.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c1700_mb_wic1.py b/gns3server/compute/dynamips/adapters/c1700_mb_wic1.py
index 70a7149f..170a4489 100644
--- a/gns3server/compute/dynamips/adapters/c1700_mb_wic1.py
+++ b/gns3server/compute/dynamips/adapters/c1700_mb_wic1.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c2600_mb_1e.py b/gns3server/compute/dynamips/adapters/c2600_mb_1e.py
index addb1f9b..be92cf3f 100644
--- a/gns3server/compute/dynamips/adapters/c2600_mb_1e.py
+++ b/gns3server/compute/dynamips/adapters/c2600_mb_1e.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c2600_mb_1fe.py b/gns3server/compute/dynamips/adapters/c2600_mb_1fe.py
index 8f0f199d..2c06dbdd 100644
--- a/gns3server/compute/dynamips/adapters/c2600_mb_1fe.py
+++ b/gns3server/compute/dynamips/adapters/c2600_mb_1fe.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c2600_mb_2e.py b/gns3server/compute/dynamips/adapters/c2600_mb_2e.py
index 78921c83..7798b07a 100644
--- a/gns3server/compute/dynamips/adapters/c2600_mb_2e.py
+++ b/gns3server/compute/dynamips/adapters/c2600_mb_2e.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c2600_mb_2fe.py b/gns3server/compute/dynamips/adapters/c2600_mb_2fe.py
index 0ed67f5d..a3016f01 100644
--- a/gns3server/compute/dynamips/adapters/c2600_mb_2fe.py
+++ b/gns3server/compute/dynamips/adapters/c2600_mb_2fe.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c7200_io_2fe.py b/gns3server/compute/dynamips/adapters/c7200_io_2fe.py
index d250fe89..addde38c 100644
--- a/gns3server/compute/dynamips/adapters/c7200_io_2fe.py
+++ b/gns3server/compute/dynamips/adapters/c7200_io_2fe.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c7200_io_fe.py b/gns3server/compute/dynamips/adapters/c7200_io_fe.py
index 230b0f6f..4d2c1b1b 100644
--- a/gns3server/compute/dynamips/adapters/c7200_io_fe.py
+++ b/gns3server/compute/dynamips/adapters/c7200_io_fe.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/c7200_io_ge_e.py b/gns3server/compute/dynamips/adapters/c7200_io_ge_e.py
index 42f975c3..09f982ed 100644
--- a/gns3server/compute/dynamips/adapters/c7200_io_ge_e.py
+++ b/gns3server/compute/dynamips/adapters/c7200_io_ge_e.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/gt96100_fe.py b/gns3server/compute/dynamips/adapters/gt96100_fe.py
index 5551ebcd..6759319b 100644
--- a/gns3server/compute/dynamips/adapters/gt96100_fe.py
+++ b/gns3server/compute/dynamips/adapters/gt96100_fe.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/leopard_2fe.py b/gns3server/compute/dynamips/adapters/leopard_2fe.py
index a8e8ff5c..55d75c19 100644
--- a/gns3server/compute/dynamips/adapters/leopard_2fe.py
+++ b/gns3server/compute/dynamips/adapters/leopard_2fe.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/nm_16esw.py b/gns3server/compute/dynamips/adapters/nm_16esw.py
index 1cc01880..f346da8b 100644
--- a/gns3server/compute/dynamips/adapters/nm_16esw.py
+++ b/gns3server/compute/dynamips/adapters/nm_16esw.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/nm_1e.py b/gns3server/compute/dynamips/adapters/nm_1e.py
index 4c29097e..91932474 100644
--- a/gns3server/compute/dynamips/adapters/nm_1e.py
+++ b/gns3server/compute/dynamips/adapters/nm_1e.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/nm_1fe_tx.py b/gns3server/compute/dynamips/adapters/nm_1fe_tx.py
index 2e734236..bb03d3f3 100644
--- a/gns3server/compute/dynamips/adapters/nm_1fe_tx.py
+++ b/gns3server/compute/dynamips/adapters/nm_1fe_tx.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/nm_4e.py b/gns3server/compute/dynamips/adapters/nm_4e.py
index f13309ee..6a4db9f7 100644
--- a/gns3server/compute/dynamips/adapters/nm_4e.py
+++ b/gns3server/compute/dynamips/adapters/nm_4e.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/nm_4t.py b/gns3server/compute/dynamips/adapters/nm_4t.py
index 02773ab0..fa527c2f 100644
--- a/gns3server/compute/dynamips/adapters/nm_4t.py
+++ b/gns3server/compute/dynamips/adapters/nm_4t.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_2fe_tx.py b/gns3server/compute/dynamips/adapters/pa_2fe_tx.py
index 9b914d76..36119999 100644
--- a/gns3server/compute/dynamips/adapters/pa_2fe_tx.py
+++ b/gns3server/compute/dynamips/adapters/pa_2fe_tx.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_4e.py b/gns3server/compute/dynamips/adapters/pa_4e.py
index f379d53d..5f3288b7 100644
--- a/gns3server/compute/dynamips/adapters/pa_4e.py
+++ b/gns3server/compute/dynamips/adapters/pa_4e.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_4t.py b/gns3server/compute/dynamips/adapters/pa_4t.py
index ddc14fcd..beae7965 100644
--- a/gns3server/compute/dynamips/adapters/pa_4t.py
+++ b/gns3server/compute/dynamips/adapters/pa_4t.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_8e.py b/gns3server/compute/dynamips/adapters/pa_8e.py
index 38311742..b36173a2 100644
--- a/gns3server/compute/dynamips/adapters/pa_8e.py
+++ b/gns3server/compute/dynamips/adapters/pa_8e.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_8t.py b/gns3server/compute/dynamips/adapters/pa_8t.py
index 8a48c145..81d307c8 100644
--- a/gns3server/compute/dynamips/adapters/pa_8t.py
+++ b/gns3server/compute/dynamips/adapters/pa_8t.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_a1.py b/gns3server/compute/dynamips/adapters/pa_a1.py
index fe320de8..b20efb70 100644
--- a/gns3server/compute/dynamips/adapters/pa_a1.py
+++ b/gns3server/compute/dynamips/adapters/pa_a1.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_fe_tx.py b/gns3server/compute/dynamips/adapters/pa_fe_tx.py
index 4a90536e..3a5f3fdb 100644
--- a/gns3server/compute/dynamips/adapters/pa_fe_tx.py
+++ b/gns3server/compute/dynamips/adapters/pa_fe_tx.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_ge.py b/gns3server/compute/dynamips/adapters/pa_ge.py
index d1c330e4..dde8d7ed 100644
--- a/gns3server/compute/dynamips/adapters/pa_ge.py
+++ b/gns3server/compute/dynamips/adapters/pa_ge.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/pa_pos_oc3.py b/gns3server/compute/dynamips/adapters/pa_pos_oc3.py
index bfd35df3..d6b8487c 100644
--- a/gns3server/compute/dynamips/adapters/pa_pos_oc3.py
+++ b/gns3server/compute/dynamips/adapters/pa_pos_oc3.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/adapters/wic_1enet.py b/gns3server/compute/dynamips/adapters/wic_1enet.py
index 2d5e62b7..1c8c9805 100644
--- a/gns3server/compute/dynamips/adapters/wic_1enet.py
+++ b/gns3server/compute/dynamips/adapters/wic_1enet.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -16,7 +15,7 @@
# along with this program. If not, see .
-class WIC_1ENET(object):
+class WIC_1ENET:
"""
WIC-1ENET Ethernet
diff --git a/gns3server/compute/dynamips/adapters/wic_1t.py b/gns3server/compute/dynamips/adapters/wic_1t.py
index 2067246d..95bc57d4 100644
--- a/gns3server/compute/dynamips/adapters/wic_1t.py
+++ b/gns3server/compute/dynamips/adapters/wic_1t.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -16,7 +15,7 @@
# along with this program. If not, see .
-class WIC_1T(object):
+class WIC_1T:
"""
WIC-1T Serial
diff --git a/gns3server/compute/dynamips/adapters/wic_2t.py b/gns3server/compute/dynamips/adapters/wic_2t.py
index b5af954e..2f32db65 100644
--- a/gns3server/compute/dynamips/adapters/wic_2t.py
+++ b/gns3server/compute/dynamips/adapters/wic_2t.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -16,7 +15,7 @@
# along with this program. If not, see .
-class WIC_2T(object):
+class WIC_2T:
"""
WIC-2T Serial
diff --git a/gns3server/compute/dynamips/dynamips_error.py b/gns3server/compute/dynamips/dynamips_error.py
index ff2fac00..8f9140e0 100644
--- a/gns3server/compute/dynamips/dynamips_error.py
+++ b/gns3server/compute/dynamips/dynamips_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/dynamips_factory.py b/gns3server/compute/dynamips/dynamips_factory.py
index 220e0d23..7a47deca 100644
--- a/gns3server/compute/dynamips/dynamips_factory.py
+++ b/gns3server/compute/dynamips/dynamips_factory.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -57,11 +56,11 @@ class DynamipsFactory:
if node_type == "dynamips":
if platform not in PLATFORMS:
- raise DynamipsError("Unknown router platform: {}".format(platform))
+ raise DynamipsError(f"Unknown router platform: {platform}")
return PLATFORMS[platform](name, node_id, project, manager, dynamips_id, **kwargs)
else:
if node_type not in DEVICES:
- raise DynamipsError("Unknown device type: {}".format(node_type))
+ raise DynamipsError(f"Unknown device type: {node_type}")
return DEVICES[node_type](name, node_id, project, manager, **kwargs)
diff --git a/gns3server/compute/dynamips/dynamips_hypervisor.py b/gns3server/compute/dynamips/dynamips_hypervisor.py
index b21e6494..714a7354 100644
--- a/gns3server/compute/dynamips/dynamips_hypervisor.py
+++ b/gns3server/compute/dynamips/dynamips_hypervisor.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -87,9 +86,9 @@ class DynamipsHypervisor:
break
if not connection_success:
- raise DynamipsError("Couldn't connect to hypervisor on {}:{} :{}".format(host, self._port, last_exception))
+ raise DynamipsError(f"Couldn't connect to hypervisor on {host}:{self._port} :{last_exception}")
else:
- log.info("Connected to Dynamips hypervisor on {}:{} after {:.4f} seconds".format(host, self._port, time.time() - begin))
+ log.info(f"Connected to Dynamips hypervisor on {host}:{self._port} after {time.time() - begin:.4f} seconds")
try:
version = await self.send("hypervisor version")
@@ -134,7 +133,7 @@ class DynamipsHypervisor:
await self._writer.drain()
self._writer.close()
except OSError as e:
- log.debug("Stopping hypervisor {}:{} {}".format(self._host, self._port, e))
+ log.debug(f"Stopping hypervisor {self._host}:{self._port} {e}")
self._reader = self._writer = None
async def reset(self):
@@ -152,9 +151,9 @@ class DynamipsHypervisor:
"""
# encase working_dir in quotes to protect spaces in the path
- await self.send('hypervisor working_dir "{}"'.format(working_dir))
+ await self.send(f'hypervisor working_dir "{working_dir}"')
self._working_dir = working_dir
- log.debug("Working directory set to {}".format(self._working_dir))
+ log.debug(f"Working directory set to {self._working_dir}")
@property
def working_dir(self):
@@ -244,7 +243,7 @@ class DynamipsHypervisor:
try:
command = command.strip() + '\n'
- log.debug("sending {}".format(command))
+ log.debug(f"sending {command}")
self._writer.write(command.encode())
await self._writer.drain()
except OSError as e:
@@ -269,7 +268,7 @@ class DynamipsHypervisor:
# Sometimes WinError 64 (ERROR_NETNAME_DELETED) is returned here on Windows.
# These happen if connection reset is received before IOCP could complete
# a previous operation. Ignore and try again....
- log.warning("Connection reset received while reading Dynamips response: {}".format(e))
+ log.warning(f"Connection reset received while reading Dynamips response: {e}")
continue
if not chunk:
if retries > max_retries:
@@ -300,7 +299,7 @@ class DynamipsHypervisor:
# Does it contain an error code?
if self.error_re.search(data[-1]):
- raise DynamipsError("Dynamips error when running command '{}': {}".format(command, data[-1][4:]))
+ raise DynamipsError(f"Dynamips error when running command '{command}': {data[-1][4:]}")
# Or does the last line begin with '100-'? Then we are done!
if data[-1][:4] == '100-':
@@ -314,5 +313,5 @@ class DynamipsHypervisor:
if self.success_re.search(data[index]):
data[index] = data[index][4:]
- log.debug("returned result {}".format(data))
+ log.debug(f"returned result {data}")
return data
diff --git a/gns3server/compute/dynamips/hypervisor.py b/gns3server/compute/dynamips/hypervisor.py
index d0ef0a2d..b08d5e88 100644
--- a/gns3server/compute/dynamips/hypervisor.py
+++ b/gns3server/compute/dynamips/hypervisor.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -124,20 +123,20 @@ class Hypervisor(DynamipsHypervisor):
if os.path.isdir(system_root):
env["PATH"] = system_root + ';' + env["PATH"]
try:
- log.info("Starting Dynamips: {}".format(self._command))
- self._stdout_file = os.path.join(self.working_dir, "dynamips_i{}_stdout.txt".format(self._id))
- log.info("Dynamips process logging to {}".format(self._stdout_file))
+ log.info(f"Starting Dynamips: {self._command}")
+ self._stdout_file = os.path.join(self.working_dir, f"dynamips_i{self._id}_stdout.txt")
+ log.info(f"Dynamips process logging to {self._stdout_file}")
with open(self._stdout_file, "w", encoding="utf-8") as fd:
self._process = await asyncio.create_subprocess_exec(*self._command,
stdout=fd,
stderr=subprocess.STDOUT,
cwd=self._working_dir,
env=env)
- log.info("Dynamips process started PID={}".format(self._process.pid))
+ log.info(f"Dynamips process started PID={self._process.pid}")
self._started = True
except (OSError, subprocess.SubprocessError) as e:
- log.error("Could not start Dynamips: {}".format(e))
- raise DynamipsError("Could not start Dynamips: {}".format(e))
+ log.error(f"Could not start Dynamips: {e}")
+ raise DynamipsError(f"Could not start Dynamips: {e}")
async def stop(self):
"""
@@ -145,7 +144,7 @@ class Hypervisor(DynamipsHypervisor):
"""
if self.is_running():
- log.info("Stopping Dynamips process PID={}".format(self._process.pid))
+ log.info(f"Stopping Dynamips process PID={self._process.pid}")
await DynamipsHypervisor.stop(self)
# give some time for the hypervisor to properly stop.
# time to delete UNIX NIOs for instance.
@@ -154,19 +153,19 @@ class Hypervisor(DynamipsHypervisor):
await wait_for_process_termination(self._process, timeout=3)
except asyncio.TimeoutError:
if self._process.returncode is None:
- log.warning("Dynamips process {} is still running... killing it".format(self._process.pid))
+ log.warning(f"Dynamips process {self._process.pid} is still running... killing it")
try:
self._process.kill()
except OSError as e:
- log.error("Cannot stop the Dynamips process: {}".format(e))
+ log.error(f"Cannot stop the Dynamips process: {e}")
if self._process.returncode is None:
- log.warning('Dynamips hypervisor with PID={} is still running'.format(self._process.pid))
+ log.warning(f'Dynamips hypervisor with PID={self._process.pid} is still running')
if self._stdout_file and os.access(self._stdout_file, os.W_OK):
try:
os.remove(self._stdout_file)
except OSError as e:
- log.warning("could not delete temporary Dynamips log file: {}".format(e))
+ log.warning(f"could not delete temporary Dynamips log file: {e}")
self._started = False
def read_stdout(self):
@@ -181,7 +180,7 @@ class Hypervisor(DynamipsHypervisor):
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
- log.warning("could not read {}: {}".format(self._stdout_file, e))
+ log.warning(f"could not read {self._stdout_file}: {e}")
return output
def is_running(self):
@@ -203,12 +202,12 @@ class Hypervisor(DynamipsHypervisor):
command = [self._path]
command.extend(["-N1"]) # use instance IDs for filenames
- command.extend(["-l", "dynamips_i{}_log.txt".format(self._id)]) # log file
+ command.extend(["-l", f"dynamips_i{self._id}_log.txt"]) # log file
# Dynamips cannot listen for hypervisor commands and for console connections on
# 2 different IP addresses.
# See https://github.com/GNS3/dynamips/issues/62
if self._console_host != "0.0.0.0" and self._console_host != "::":
- command.extend(["-H", "{}:{}".format(self._host, self._port)])
+ command.extend(["-H", f"{self._host}:{self._port}"])
else:
command.extend(["-H", str(self._port)])
return command
diff --git a/gns3server/compute/dynamips/nios/nio.py b/gns3server/compute/dynamips/nios/nio.py
index 17ed6a83..ae50a7ef 100644
--- a/gns3server/compute/dynamips/nios/nio.py
+++ b/gns3server/compute/dynamips/nios/nio.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -69,8 +68,8 @@ class NIO:
if self._input_filter or self._output_filter:
await self.unbind_filter("both")
self._capturing = False
- await self._hypervisor.send("nio delete {}".format(self._name))
- log.info("NIO {name} has been deleted".format(name=self._name))
+ await self._hypervisor.send(f"nio delete {self._name}")
+ log.info(f"NIO {self._name} has been deleted")
async def rename(self, new_name):
"""
@@ -79,9 +78,9 @@ class NIO:
:param new_name: new NIO name
"""
- await self._hypervisor.send("nio rename {name} {new_name}".format(name=self._name, new_name=new_name))
+ await self._hypervisor.send(f"nio rename {self._name} {new_name}")
- log.info("NIO {name} renamed to {new_name}".format(name=self._name, new_name=new_name))
+ log.info(f"NIO {self._name} renamed to {new_name}")
self._name = new_name
async def debug(self, debug):
@@ -91,7 +90,7 @@ class NIO:
:param debug: debug value (0 = disable, enable = 1)
"""
- await self._hypervisor.send("nio set_debug {name} {debug}".format(name=self._name, debug=debug))
+ await self._hypervisor.send(f"nio set_debug {self._name} {debug}")
async def start_packet_capture(self, pcap_output_file, pcap_data_link_type="DLT_EN10MB"):
"""
@@ -102,7 +101,7 @@ class NIO:
"""
await self.bind_filter("both", "capture")
- await self.setup_filter("both", '{} "{}"'.format(pcap_data_link_type, pcap_output_file))
+ await self.setup_filter("both", f'{pcap_data_link_type} "{pcap_output_file}"')
self._capturing = True
self._pcap_output_file = pcap_output_file
self._pcap_data_link_type = pcap_data_link_type
@@ -128,7 +127,7 @@ class NIO:
"""
if direction not in self._dynamips_direction:
- raise DynamipsError("Unknown direction {} to bind filter {}:".format(direction, filter_name))
+ raise DynamipsError(f"Unknown direction {direction} to bind filter {filter_name}:")
dynamips_direction = self._dynamips_direction[direction]
await self._hypervisor.send("nio bind_filter {name} {direction} {filter}".format(name=self._name,
@@ -151,7 +150,7 @@ class NIO:
"""
if direction not in self._dynamips_direction:
- raise DynamipsError("Unknown direction {} to unbind filter:".format(direction))
+ raise DynamipsError(f"Unknown direction {direction} to unbind filter:")
dynamips_direction = self._dynamips_direction[direction]
await self._hypervisor.send("nio unbind_filter {name} {direction}".format(name=self._name,
@@ -185,7 +184,7 @@ class NIO:
"""
if direction not in self._dynamips_direction:
- raise DynamipsError("Unknown direction {} to setup filter:".format(direction))
+ raise DynamipsError(f"Unknown direction {direction} to setup filter:")
dynamips_direction = self._dynamips_direction[direction]
await self._hypervisor.send("nio setup_filter {name} {direction} {options}".format(name=self._name,
@@ -227,7 +226,7 @@ class NIO:
:returns: NIO statistics (string with packets in, packets out, bytes in, bytes out)
"""
- stats = await self._hypervisor.send("nio get_stats {}".format(self._name))
+ stats = await self._hypervisor.send(f"nio get_stats {self._name}")
return stats[0]
async def reset_stats(self):
@@ -235,7 +234,7 @@ class NIO:
Resets statistics for this NIO.
"""
- await self._hypervisor.send("nio reset_stats {}".format(self._name))
+ await self._hypervisor.send(f"nio reset_stats {self._name}")
@property
def bandwidth(self):
@@ -254,7 +253,7 @@ class NIO:
:param bandwidth: bandwidth integer value (in Kb/s)
"""
- await self._hypervisor.send("nio set_bandwidth {name} {bandwidth}".format(name=self._name, bandwidth=bandwidth))
+ await self._hypervisor.send(f"nio set_bandwidth {self._name} {bandwidth}")
self._bandwidth = bandwidth
@property
diff --git a/gns3server/compute/dynamips/nios/nio_generic_ethernet.py b/gns3server/compute/dynamips/nios/nio_generic_ethernet.py
index 533de664..14cc9a2a 100644
--- a/gns3server/compute/dynamips/nios/nio_generic_ethernet.py
+++ b/gns3server/compute/dynamips/nios/nio_generic_ethernet.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -39,7 +38,7 @@ class NIOGenericEthernet(NIO):
def __init__(self, hypervisor, ethernet_device):
# create an unique name
- name = 'generic_ethernet-{}'.format(uuid.uuid4())
+ name = f'generic_ethernet-{uuid.uuid4()}'
self._ethernet_device = ethernet_device
super().__init__(name, hypervisor)
diff --git a/gns3server/compute/dynamips/nios/nio_linux_ethernet.py b/gns3server/compute/dynamips/nios/nio_linux_ethernet.py
index d032202b..a9a9b9ae 100644
--- a/gns3server/compute/dynamips/nios/nio_linux_ethernet.py
+++ b/gns3server/compute/dynamips/nios/nio_linux_ethernet.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -38,7 +37,7 @@ class NIOLinuxEthernet(NIO):
def __init__(self, hypervisor, ethernet_device):
# create an unique name
- name = 'linux_ethernet-{}'.format(uuid.uuid4())
+ name = f'linux_ethernet-{uuid.uuid4()}'
self._ethernet_device = ethernet_device
super().__init__(name, hypervisor)
diff --git a/gns3server/compute/dynamips/nios/nio_null.py b/gns3server/compute/dynamips/nios/nio_null.py
index 6524de40..792665e9 100644
--- a/gns3server/compute/dynamips/nios/nio_null.py
+++ b/gns3server/compute/dynamips/nios/nio_null.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -38,13 +37,13 @@ class NIONull(NIO):
def __init__(self, hypervisor):
# create an unique name
- name = 'null-{}'.format(uuid.uuid4())
+ name = f'null-{uuid.uuid4()}'
super().__init__(name, hypervisor)
async def create(self):
- await self._hypervisor.send("nio create_null {}".format(self._name))
- log.info("NIO NULL {name} created.".format(name=self._name))
+ await self._hypervisor.send(f"nio create_null {self._name}")
+ log.info(f"NIO NULL {self._name} created.")
def __json__(self):
diff --git a/gns3server/compute/dynamips/nios/nio_tap.py b/gns3server/compute/dynamips/nios/nio_tap.py
index ea5c8926..ba851856 100644
--- a/gns3server/compute/dynamips/nios/nio_tap.py
+++ b/gns3server/compute/dynamips/nios/nio_tap.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -39,14 +38,14 @@ class NIOTAP(NIO):
def __init__(self, hypervisor, tap_device):
# create an unique name
- name = 'tap-{}'.format(uuid.uuid4())
+ name = f'tap-{uuid.uuid4()}'
self._tap_device = tap_device
super().__init__(name, hypervisor)
async def create(self):
- await self._hypervisor.send("nio create_tap {name} {tap}".format(name=self._name, tap=self._tap_device))
- log.info("NIO TAP {name} created with device {device}".format(name=self._name, device=self._tap_device))
+ await self._hypervisor.send(f"nio create_tap {self._name} {self._tap_device}")
+ log.info(f"NIO TAP {self._name} created with device {self._tap_device}")
@property
def tap_device(self):
diff --git a/gns3server/compute/dynamips/nios/nio_udp.py b/gns3server/compute/dynamips/nios/nio_udp.py
index 987840b3..321f1a3f 100644
--- a/gns3server/compute/dynamips/nios/nio_udp.py
+++ b/gns3server/compute/dynamips/nios/nio_udp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -44,7 +43,7 @@ class NIOUDP(NIO):
def __init__(self, node, lport, rhost, rport):
# create an unique name
- name = 'udp-{}'.format(uuid.uuid4())
+ name = f'udp-{uuid.uuid4()}'
self._lport = lport
self._rhost = rhost
self._rport = rport
@@ -65,7 +64,7 @@ class NIOUDP(NIO):
return
self._local_tunnel_lport = self._node.manager.port_manager.get_free_udp_port(self._node.project)
self._local_tunnel_rport = self._node.manager.port_manager.get_free_udp_port(self._node.project)
- self._bridge_name = 'DYNAMIPS-{}-{}'.format(self._local_tunnel_lport, self._local_tunnel_rport)
+ self._bridge_name = f'DYNAMIPS-{self._local_tunnel_lport}-{self._local_tunnel_rport}'
await self._hypervisor.send("nio create_udp {name} {lport} {rhost} {rport}".format(name=self._name,
lport=self._local_tunnel_lport,
rhost='127.0.0.1',
diff --git a/gns3server/compute/dynamips/nios/nio_unix.py b/gns3server/compute/dynamips/nios/nio_unix.py
index 64eeca7b..b52e60b0 100644
--- a/gns3server/compute/dynamips/nios/nio_unix.py
+++ b/gns3server/compute/dynamips/nios/nio_unix.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -40,7 +39,7 @@ class NIOUNIX(NIO):
def __init__(self, hypervisor, local_file, remote_file):
# create an unique name
- name = 'unix-{}'.format(uuid.uuid4())
+ name = f'unix-{uuid.uuid4()}'
self._local_file = local_file
self._remote_file = remote_file
super().__init__(name, hypervisor)
diff --git a/gns3server/compute/dynamips/nios/nio_vde.py b/gns3server/compute/dynamips/nios/nio_vde.py
index 00701f3a..0da87846 100644
--- a/gns3server/compute/dynamips/nios/nio_vde.py
+++ b/gns3server/compute/dynamips/nios/nio_vde.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -40,7 +39,7 @@ class NIOVDE(NIO):
def __init__(self, hypervisor, control_file, local_file):
# create an unique name
- name = 'vde-{}'.format(uuid.uuid4())
+ name = f'vde-{uuid.uuid4()}'
self._control_file = control_file
self._local_file = local_file
super().__init__(name, hypervisor)
diff --git a/gns3server/compute/dynamips/nodes/atm_switch.py b/gns3server/compute/dynamips/nodes/atm_switch.py
index 46d975a9..8c902efc 100644
--- a/gns3server/compute/dynamips/nodes/atm_switch.py
+++ b/gns3server/compute/dynamips/nodes/atm_switch.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -71,8 +70,8 @@ class ATMSwitch(Device):
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir)
- await self._hypervisor.send('atmsw create "{}"'.format(self._name))
- log.info('ATM switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ await self._hypervisor.send(f'atmsw create "{self._name}"')
+ log.info(f'ATM switch "{self._name}" [{self._id}] has been created')
self._hypervisor.devices.append(self)
async def set_name(self, new_name):
@@ -82,7 +81,7 @@ class ATMSwitch(Device):
:param new_name: New name for this switch
"""
- await self._hypervisor.send('atmsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
+ await self._hypervisor.send(f'atmsw rename "{self._name}" "{new_name}"')
log.info('ATM switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name,
id=self._id,
new_name=new_name))
@@ -125,10 +124,10 @@ class ATMSwitch(Device):
if self._hypervisor:
try:
- await self._hypervisor.send('atmsw delete "{}"'.format(self._name))
- log.info('ATM switch "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id))
+ await self._hypervisor.send(f'atmsw delete "{self._name}"')
+ log.info(f'ATM switch "{self._name}" [{self._id}] has been deleted')
except DynamipsError:
- log.debug("Could not properly delete ATM switch {}".format(self._name))
+ log.debug(f"Could not properly delete ATM switch {self._name}")
if self._hypervisor and self in self._hypervisor.devices:
self._hypervisor.devices.remove(self)
if self._hypervisor and not self._hypervisor.devices:
@@ -162,7 +161,7 @@ class ATMSwitch(Device):
"""
if port_number in self._nios:
- raise DynamipsError("Port {} isn't free".format(port_number))
+ raise DynamipsError(f"Port {port_number} isn't free")
log.info('ATM switch "{name}" [id={id}]: NIO {nio} bound to port {port}'.format(name=self._name,
id=self._id,
@@ -180,7 +179,7 @@ class ATMSwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
await self.stop_capture(port_number)
# remove VCs mapped with the port
@@ -235,12 +234,12 @@ class ATMSwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
nio = self._nios[port_number]
if not nio:
- raise DynamipsError("Port {} is not connected".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not connected")
return nio
@@ -451,7 +450,7 @@ class ATMSwitch(Device):
data_link_type = data_link_type[4:]
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
- raise DynamipsError("Port {} has already a filter applied".format(port_number))
+ raise DynamipsError(f"Port {port_number} has already a filter applied")
await nio.start_packet_capture(output_file, data_link_type)
log.info('ATM switch "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name,
diff --git a/gns3server/compute/dynamips/nodes/bridge.py b/gns3server/compute/dynamips/nodes/bridge.py
index 7146865c..2b87117a 100644
--- a/gns3server/compute/dynamips/nodes/bridge.py
+++ b/gns3server/compute/dynamips/nodes/bridge.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -47,7 +46,7 @@ class Bridge(Device):
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir)
- await self._hypervisor.send('nio_bridge create "{}"'.format(self._name))
+ await self._hypervisor.send(f'nio_bridge create "{self._name}"')
self._hypervisor.devices.append(self)
async def set_name(self, new_name):
@@ -80,7 +79,7 @@ class Bridge(Device):
if self._hypervisor and self in self._hypervisor.devices:
self._hypervisor.devices.remove(self)
if self._hypervisor and not self._hypervisor.devices:
- await self._hypervisor.send('nio_bridge delete "{}"'.format(self._name))
+ await self._hypervisor.send(f'nio_bridge delete "{self._name}"')
async def add_nio(self, nio):
"""
@@ -89,7 +88,7 @@ class Bridge(Device):
:param nio: NIO instance to add
"""
- await self._hypervisor.send('nio_bridge add_nio "{name}" {nio}'.format(name=self._name, nio=nio))
+ await self._hypervisor.send(f'nio_bridge add_nio "{self._name}" {nio}')
self._nios.append(nio)
async def remove_nio(self, nio):
@@ -99,7 +98,7 @@ class Bridge(Device):
:param nio: NIO instance to remove
"""
if self._hypervisor:
- await self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio))
+ await self._hypervisor.send(f'nio_bridge remove_nio "{self._name}" {nio}')
self._nios.remove(nio)
@property
diff --git a/gns3server/compute/dynamips/nodes/c1700.py b/gns3server/compute/dynamips/nodes/c1700.py
index cdc0f343..200bc07f 100644
--- a/gns3server/compute/dynamips/nodes/c1700.py
+++ b/gns3server/compute/dynamips/nodes/c1700.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -112,7 +111,7 @@ class C1700(Router):
1720, 1721, 1750, 1751 or 1760
"""
- await self._hypervisor.send('c1700 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis))
+ await self._hypervisor.send(f'c1700 set_chassis "{self._name}" {chassis}')
log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name,
id=self._id,
@@ -138,7 +137,7 @@ class C1700(Router):
:param iomem: I/O memory size
"""
- await self._hypervisor.send('c1700 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
+ await self._hypervisor.send(f'c1700 set_iomem "{self._name}" {iomem}')
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
diff --git a/gns3server/compute/dynamips/nodes/c2600.py b/gns3server/compute/dynamips/nodes/c2600.py
index e2c3ea13..fee6b264 100644
--- a/gns3server/compute/dynamips/nodes/c2600.py
+++ b/gns3server/compute/dynamips/nodes/c2600.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -122,7 +121,7 @@ class C2600(Router):
2620XM, 2621XM, 2650XM or 2651XM
"""
- await self._hypervisor.send('c2600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis))
+ await self._hypervisor.send(f'c2600 set_chassis "{self._name}" {chassis}')
log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name,
id=self._id,
@@ -147,7 +146,7 @@ class C2600(Router):
:param iomem: I/O memory size
"""
- await self._hypervisor.send('c2600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
+ await self._hypervisor.send(f'c2600 set_iomem "{self._name}" {iomem}')
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
diff --git a/gns3server/compute/dynamips/nodes/c2691.py b/gns3server/compute/dynamips/nodes/c2691.py
index c946b391..fdeb8d87 100644
--- a/gns3server/compute/dynamips/nodes/c2691.py
+++ b/gns3server/compute/dynamips/nodes/c2691.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -88,7 +87,7 @@ class C2691(Router):
:param iomem: I/O memory size
"""
- await self._hypervisor.send('c2691 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
+ await self._hypervisor.send(f'c2691 set_iomem "{self._name}" {iomem}')
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
diff --git a/gns3server/compute/dynamips/nodes/c3600.py b/gns3server/compute/dynamips/nodes/c3600.py
index a5341f6e..7fed94f7 100644
--- a/gns3server/compute/dynamips/nodes/c3600.py
+++ b/gns3server/compute/dynamips/nodes/c3600.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -106,7 +105,7 @@ class C3600(Router):
:param: chassis string: 3620, 3640 or 3660
"""
- await self._hypervisor.send('c3600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis))
+ await self._hypervisor.send(f'c3600 set_chassis "{self._name}" {chassis}')
log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name,
id=self._id,
@@ -132,7 +131,7 @@ class C3600(Router):
:param iomem: I/O memory size
"""
- await self._hypervisor.send('c3600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
+ await self._hypervisor.send(f'c3600 set_iomem "{self._name}" {iomem}')
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
diff --git a/gns3server/compute/dynamips/nodes/c3725.py b/gns3server/compute/dynamips/nodes/c3725.py
index 5ba52e47..1cf0a399 100644
--- a/gns3server/compute/dynamips/nodes/c3725.py
+++ b/gns3server/compute/dynamips/nodes/c3725.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -88,7 +87,7 @@ class C3725(Router):
:param iomem: I/O memory size
"""
- await self._hypervisor.send('c3725 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
+ await self._hypervisor.send(f'c3725 set_iomem "{self._name}" {iomem}')
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
diff --git a/gns3server/compute/dynamips/nodes/c3745.py b/gns3server/compute/dynamips/nodes/c3745.py
index cdbc6b49..e680fee4 100644
--- a/gns3server/compute/dynamips/nodes/c3745.py
+++ b/gns3server/compute/dynamips/nodes/c3745.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -88,7 +87,7 @@ class C3745(Router):
:param iomem: I/O memory size
"""
- await self._hypervisor.send('c3745 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
+ await self._hypervisor.send(f'c3745 set_iomem "{self._name}" {iomem}')
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
id=self._id,
diff --git a/gns3server/compute/dynamips/nodes/c7200.py b/gns3server/compute/dynamips/nodes/c7200.py
index 6ebf9abb..764915c3 100644
--- a/gns3server/compute/dynamips/nodes/c7200.py
+++ b/gns3server/compute/dynamips/nodes/c7200.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -123,7 +122,7 @@ class C7200(Router):
if (await self.is_running()):
raise DynamipsError("Cannot change NPE on running router")
- await self._hypervisor.send('c7200 set_npe "{name}" {npe}'.format(name=self._name, npe=npe))
+ await self._hypervisor.send(f'c7200 set_npe "{self._name}" {npe}')
log.info('Router "{name}" [{id}]: NPE updated from {old_npe} to {new_npe}'.format(name=self._name,
id=self._id,
@@ -148,7 +147,7 @@ class C7200(Router):
:returns: midplane model string (e.g. "vxr" or "std")
"""
- await self._hypervisor.send('c7200 set_midplane "{name}" {midplane}'.format(name=self._name, midplane=midplane))
+ await self._hypervisor.send(f'c7200 set_midplane "{self._name}" {midplane}')
log.info('Router "{name}" [{id}]: midplane updated from {old_midplane} to {new_midplane}'.format(name=self._name,
id=self._id,
diff --git a/gns3server/compute/dynamips/nodes/device.py b/gns3server/compute/dynamips/nodes/device.py
index 8ac8e56d..898f0ef9 100644
--- a/gns3server/compute/dynamips/nodes/device.py
+++ b/gns3server/compute/dynamips/nodes/device.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/dynamips/nodes/ethernet_hub.py b/gns3server/compute/dynamips/nodes/ethernet_hub.py
index 264dc119..0253f28e 100644
--- a/gns3server/compute/dynamips/nodes/ethernet_hub.py
+++ b/gns3server/compute/dynamips/nodes/ethernet_hub.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -50,7 +49,7 @@ class EthernetHub(Bridge):
self._ports = []
for port_number in range(0, 8):
self._ports.append({"port_number": port_number,
- "name": "Ethernet{}".format(port_number)})
+ "name": f"Ethernet{port_number}"})
else:
self._ports = ports
@@ -86,7 +85,7 @@ class EthernetHub(Bridge):
port_number = 0
for port in ports:
- port["name"] = "Ethernet{}".format(port_number)
+ port["name"] = f"Ethernet{port_number}"
port["port_number"] = port_number
port_number += 1
@@ -95,7 +94,7 @@ class EthernetHub(Bridge):
async def create(self):
await Bridge.create(self)
- log.info('Ethernet hub "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ log.info(f'Ethernet hub "{self._name}" [{self._id}] has been created')
@property
def mappings(self):
@@ -121,9 +120,9 @@ class EthernetHub(Bridge):
try:
await Bridge.delete(self)
- log.info('Ethernet hub "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id))
+ log.info(f'Ethernet hub "{self._name}" [{self._id}] has been deleted')
except DynamipsError:
- log.debug("Could not properly delete Ethernet hub {}".format(self._name))
+ log.debug(f"Could not properly delete Ethernet hub {self._name}")
if self._hypervisor and not self._hypervisor.devices:
await self.hypervisor.stop()
self._hypervisor = None
@@ -138,10 +137,10 @@ class EthernetHub(Bridge):
"""
if port_number not in [port["port_number"] for port in self._ports]:
- raise DynamipsError("Port {} doesn't exist".format(port_number))
+ raise DynamipsError(f"Port {port_number} doesn't exist")
if port_number in self._mappings:
- raise DynamipsError("Port {} isn't free".format(port_number))
+ raise DynamipsError(f"Port {port_number} isn't free")
await Bridge.add_nio(self, nio)
@@ -161,7 +160,7 @@ class EthernetHub(Bridge):
"""
if port_number not in self._mappings:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
await self.stop_capture(port_number)
nio = self._mappings[port_number]
@@ -187,12 +186,12 @@ class EthernetHub(Bridge):
"""
if port_number not in self._mappings:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
nio = self._mappings[port_number]
if not nio:
- raise DynamipsError("Port {} is not connected".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not connected")
return nio
@@ -211,7 +210,7 @@ class EthernetHub(Bridge):
data_link_type = data_link_type[4:]
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
- raise DynamipsError("Port {} has already a filter applied".format(port_number))
+ raise DynamipsError(f"Port {port_number} has already a filter applied")
await nio.start_packet_capture(output_file, data_link_type)
log.info('Ethernet hub "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name,
diff --git a/gns3server/compute/dynamips/nodes/ethernet_switch.py b/gns3server/compute/dynamips/nodes/ethernet_switch.py
index 627f5447..0ccf7094 100644
--- a/gns3server/compute/dynamips/nodes/ethernet_switch.py
+++ b/gns3server/compute/dynamips/nodes/ethernet_switch.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -103,7 +102,7 @@ class EthernetSwitch(Device):
self._ports = []
for port_number in range(0, 8):
self._ports.append({"port_number": port_number,
- "name": "Ethernet{}".format(port_number),
+ "name": f"Ethernet{port_number}",
"type": "access",
"vlan": 1})
else:
@@ -140,7 +139,7 @@ class EthernetSwitch(Device):
if self._console_type != console_type:
if console_type == "telnet":
self.project.emit("log.warning", {
- "message": '"{name}": Telnet access for switches is not available in this version of GNS3'.format(name=self._name)})
+ "message": f'"{self._name}": Telnet access for switches is not available in this version of GNS3'})
self._console_type = console_type
@property
@@ -166,7 +165,7 @@ class EthernetSwitch(Device):
port_number = 0
for port in ports:
- port["name"] = "Ethernet{}".format(port_number)
+ port["name"] = f"Ethernet{port_number}"
port["port_number"] = port_number
port_number += 1
@@ -184,8 +183,8 @@ class EthernetSwitch(Device):
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir)
- await self._hypervisor.send('ethsw create "{}"'.format(self._name))
- log.info('Ethernet switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ await self._hypervisor.send(f'ethsw create "{self._name}"')
+ log.info(f'Ethernet switch "{self._name}" [{self._id}] has been created')
#self._telnet_shell = EthernetSwitchConsole(self)
#self._telnet_shell.prompt = self._name + '> '
@@ -195,7 +194,7 @@ class EthernetSwitch(Device):
#except OSError as e:
# self.project.emit("log.warning", {"message": "Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)})
if self._console_type == "telnet":
- self.project.emit("log.warning", {"message": '"{name}": Telnet access for switches is not available in this version of GNS3'.format(name=self._name)})
+ self.project.emit("log.warning", {"message": f'"{self._name}": Telnet access for switches is not available in this version of GNS3'})
self._hypervisor.devices.append(self)
async def set_name(self, new_name):
@@ -205,7 +204,7 @@ class EthernetSwitch(Device):
:param new_name: New name for this switch
"""
- await self._hypervisor.send('ethsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
+ await self._hypervisor.send(f'ethsw rename "{self._name}" "{new_name}"')
log.info('Ethernet switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name,
id=self._id,
new_name=new_name))
@@ -249,10 +248,10 @@ class EthernetSwitch(Device):
self.manager.port_manager.release_tcp_port(self._console, self._project)
if self._hypervisor:
try:
- await self._hypervisor.send('ethsw delete "{}"'.format(self._name))
- log.info('Ethernet switch "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id))
+ await self._hypervisor.send(f'ethsw delete "{self._name}"')
+ log.info(f'Ethernet switch "{self._name}" [{self._id}] has been deleted')
except DynamipsError:
- log.debug("Could not properly delete Ethernet switch {}".format(self._name))
+ log.debug(f"Could not properly delete Ethernet switch {self._name}")
if self._hypervisor and self in self._hypervisor.devices:
self._hypervisor.devices.remove(self)
if self._hypervisor and not self._hypervisor.devices:
@@ -269,9 +268,9 @@ class EthernetSwitch(Device):
"""
if port_number in self._nios:
- raise DynamipsError("Port {} isn't free".format(port_number))
+ raise DynamipsError(f"Port {port_number} isn't free")
- await self._hypervisor.send('ethsw add_nio "{name}" {nio}'.format(name=self._name, nio=nio))
+ await self._hypervisor.send(f'ethsw add_nio "{self._name}" {nio}')
log.info('Ethernet switch "{name}" [{id}]: NIO {nio} bound to port {port}'.format(name=self._name,
id=self._id,
@@ -293,14 +292,14 @@ class EthernetSwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
await self.stop_capture(port_number)
nio = self._nios[port_number]
if isinstance(nio, NIOUDP):
self.manager.port_manager.release_udp_port(nio.lport, self._project)
if self._hypervisor:
- await self._hypervisor.send('ethsw remove_nio "{name}" {nio}'.format(name=self._name, nio=nio))
+ await self._hypervisor.send(f'ethsw remove_nio "{self._name}" {nio}')
log.info('Ethernet switch "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name,
id=self._id,
@@ -323,12 +322,12 @@ class EthernetSwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
nio = self._nios[port_number]
if not nio:
- raise DynamipsError("Port {} is not connected".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not connected")
return nio
@@ -356,7 +355,7 @@ class EthernetSwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
nio = self._nios[port_number]
await self._hypervisor.send('ethsw set_access_port "{name}" {nio} {vlan_id}'.format(name=self._name,
@@ -378,7 +377,7 @@ class EthernetSwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
nio = self._nios[port_number]
await self._hypervisor.send('ethsw set_dot1q_port "{name}" {nio} {native_vlan}'.format(name=self._name,
@@ -401,11 +400,11 @@ class EthernetSwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
nio = self._nios[port_number]
if ethertype != "0x8100" and parse_version(self.hypervisor.version) < parse_version('0.2.16'):
- raise DynamipsError("Dynamips version required is >= 0.2.16 to change the default QinQ Ethernet type, detected version is {}".format(self.hypervisor.version))
+ raise DynamipsError(f"Dynamips version required is >= 0.2.16 to change the default QinQ Ethernet type, detected version is {self.hypervisor.version}")
await self._hypervisor.send('ethsw set_qinq_port "{name}" {nio} {outer_vlan} {ethertype}'.format(name=self._name,
nio=nio,
@@ -426,7 +425,7 @@ class EthernetSwitch(Device):
:returns: list of entries (Ethernet address, VLAN, NIO)
"""
- mac_addr_table = await self._hypervisor.send('ethsw show_mac_addr_table "{}"'.format(self._name))
+ mac_addr_table = await self._hypervisor.send(f'ethsw show_mac_addr_table "{self._name}"')
return mac_addr_table
async def clear_mac_addr_table(self):
@@ -434,7 +433,7 @@ class EthernetSwitch(Device):
Clears the MAC address table for this Ethernet switch.
"""
- await self._hypervisor.send('ethsw clear_mac_addr_table "{}"'.format(self._name))
+ await self._hypervisor.send(f'ethsw clear_mac_addr_table "{self._name}"')
async def start_capture(self, port_number, output_file, data_link_type="DLT_EN10MB"):
"""
@@ -451,7 +450,7 @@ class EthernetSwitch(Device):
data_link_type = data_link_type[4:]
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
- raise DynamipsError("Port {} has already a filter applied".format(port_number))
+ raise DynamipsError(f"Port {port_number} has already a filter applied")
await nio.start_packet_capture(output_file, data_link_type)
log.info('Ethernet switch "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name,
diff --git a/gns3server/compute/dynamips/nodes/frame_relay_switch.py b/gns3server/compute/dynamips/nodes/frame_relay_switch.py
index 0e7040a0..679e005a 100644
--- a/gns3server/compute/dynamips/nodes/frame_relay_switch.py
+++ b/gns3server/compute/dynamips/nodes/frame_relay_switch.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -70,8 +69,8 @@ class FrameRelaySwitch(Device):
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir)
- await self._hypervisor.send('frsw create "{}"'.format(self._name))
- log.info('Frame Relay switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ await self._hypervisor.send(f'frsw create "{self._name}"')
+ log.info(f'Frame Relay switch "{self._name}" [{self._id}] has been created')
self._hypervisor.devices.append(self)
async def set_name(self, new_name):
@@ -81,7 +80,7 @@ class FrameRelaySwitch(Device):
:param new_name: New name for this switch
"""
- await self._hypervisor.send('frsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
+ await self._hypervisor.send(f'frsw rename "{self._name}" "{new_name}"')
log.info('Frame Relay switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name,
id=self._id,
new_name=new_name))
@@ -124,10 +123,10 @@ class FrameRelaySwitch(Device):
if self._hypervisor:
try:
- await self._hypervisor.send('frsw delete "{}"'.format(self._name))
- log.info('Frame Relay switch "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id))
+ await self._hypervisor.send(f'frsw delete "{self._name}"')
+ log.info(f'Frame Relay switch "{self._name}" [{self._id}] has been deleted')
except DynamipsError:
- log.debug("Could not properly delete Frame relay switch {}".format(self._name))
+ log.debug(f"Could not properly delete Frame relay switch {self._name}")
if self._hypervisor and self in self._hypervisor.devices:
self._hypervisor.devices.remove(self)
@@ -162,7 +161,7 @@ class FrameRelaySwitch(Device):
"""
if port_number in self._nios:
- raise DynamipsError("Port {} isn't free".format(port_number))
+ raise DynamipsError(f"Port {port_number} isn't free")
log.info('Frame Relay switch "{name}" [{id}]: NIO {nio} bound to port {port}'.format(name=self._name,
id=self._id,
@@ -182,7 +181,7 @@ class FrameRelaySwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
await self.stop_capture(port_number)
# remove VCs mapped with the port
@@ -221,12 +220,12 @@ class FrameRelaySwitch(Device):
"""
if port_number not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not allocated")
nio = self._nios[port_number]
if not nio:
- raise DynamipsError("Port {} is not connected".format(port_number))
+ raise DynamipsError(f"Port {port_number} is not connected")
return nio
@@ -299,10 +298,10 @@ class FrameRelaySwitch(Device):
"""
if port1 not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port1))
+ raise DynamipsError(f"Port {port1} is not allocated")
if port2 not in self._nios:
- raise DynamipsError("Port {} is not allocated".format(port2))
+ raise DynamipsError(f"Port {port2} is not allocated")
nio1 = self._nios[port1]
nio2 = self._nios[port2]
@@ -337,7 +336,7 @@ class FrameRelaySwitch(Device):
data_link_type = data_link_type[4:]
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
- raise DynamipsError("Port {} has already a filter applied".format(port_number))
+ raise DynamipsError(f"Port {port_number} has already a filter applied")
await nio.start_packet_capture(output_file, data_link_type)
log.info('Frame relay switch "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name,
diff --git a/gns3server/compute/dynamips/nodes/router.py b/gns3server/compute/dynamips/nodes/router.py
index 82a89f06..49ac207b 100644
--- a/gns3server/compute/dynamips/nodes/router.py
+++ b/gns3server/compute/dynamips/nodes/router.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -71,7 +70,7 @@ class Router(BaseNode):
try:
os.makedirs(os.path.join(self._working_directory, "configs"), exist_ok=True)
except OSError as e:
- raise DynamipsError("Can't create the dynamips config directory: {}".format(str(e)))
+ raise DynamipsError(f"Can't create the dynamips config directory: {str(e)}")
if dynamips_id:
self._convert_before_2_0_0_b3(dynamips_id)
@@ -124,21 +123,21 @@ class Router(BaseNode):
conversion due to case of remote servers
"""
dynamips_dir = self.project.module_working_directory(self.manager.module_name.lower())
- for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", "i{}_*".format(dynamips_id))):
+ for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", f"i{dynamips_id}_*")):
dst = os.path.join(self._working_directory, "configs", os.path.basename(path))
if not os.path.exists(dst):
try:
shutil.move(path, dst)
except OSError as e:
- log.error("Can't move {}: {}".format(path, str(e)))
+ log.error(f"Can't move {path}: {str(e)}")
continue
- for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "*_i{}_*".format(dynamips_id))):
+ for path in glob.glob(os.path.join(glob.escape(dynamips_dir), f"*_i{dynamips_id}_*")):
dst = os.path.join(self._working_directory, os.path.basename(path))
if not os.path.exists(dst):
try:
shutil.move(path, dst)
except OSError as e:
- log.error("Can't move {}: {}".format(path, str(e)))
+ log.error(f"Can't move {path}: {str(e)}")
continue
def __json__(self):
@@ -227,13 +226,13 @@ class Router(BaseNode):
id=self._id))
if self._console is not None:
- await self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console))
+ await self._hypervisor.send(f'vm set_con_tcp_port "{self._name}" {self._console}')
if self.aux is not None:
- await self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=self.aux))
+ await self._hypervisor.send(f'vm set_aux_tcp_port "{self._name}" {self.aux}')
# get the default base MAC address
- mac_addr = await self._hypervisor.send('{platform} get_mac_addr "{name}"'.format(platform=self._platform, name=self._name))
+ mac_addr = await self._hypervisor.send(f'{self._platform} get_mac_addr "{self._name}"')
self._mac_addr = mac_addr[0]
self._hypervisor.devices.append(self)
@@ -245,9 +244,9 @@ class Router(BaseNode):
:returns: inactive, shutting down, running or suspended.
"""
- status = await self._hypervisor.send('vm get_status "{name}"'.format(name=self._name))
+ status = await self._hypervisor.send(f'vm get_status "{self._name}"')
if len(status) == 0:
- raise DynamipsError("Can't get vm {name} status".format(name=self._name))
+ raise DynamipsError(f"Can't get vm {self._name} status")
return self._status[int(status[0])]
async def start(self):
@@ -263,28 +262,28 @@ class Router(BaseNode):
if not os.path.isfile(self._image) or not os.path.exists(self._image):
if os.path.islink(self._image):
- raise DynamipsError('IOS image "{}" linked to "{}" is not accessible'.format(self._image, os.path.realpath(self._image)))
+ raise DynamipsError(f'IOS image "{self._image}" linked to "{os.path.realpath(self._image)}" is not accessible')
else:
- raise DynamipsError('IOS image "{}" is not accessible'.format(self._image))
+ raise DynamipsError(f'IOS image "{self._image}" is not accessible')
try:
with open(self._image, "rb") as f:
# read the first 7 bytes of the file.
elf_header_start = f.read(7)
except OSError as e:
- raise DynamipsError('Cannot read ELF header for IOS image "{}": {}'.format(self._image, e))
+ raise DynamipsError(f'Cannot read ELF header for IOS image "{self._image}": {e}')
# IOS images must start with the ELF magic number, be 32-bit, big endian and have an ELF version of 1
if elf_header_start != b'\x7fELF\x01\x02\x01':
- raise DynamipsError('"{}" is not a valid IOS image'.format(self._image))
+ raise DynamipsError(f'"{self._image}" is not a valid IOS image')
# check if there is enough RAM to run
if not self._ghost_flag:
self.check_available_ram(self.ram)
# config paths are relative to the working directory configured on Dynamips hypervisor
- startup_config_path = os.path.join("configs", "i{}_startup-config.cfg".format(self._dynamips_id))
- private_config_path = os.path.join("configs", "i{}_private-config.cfg".format(self._dynamips_id))
+ startup_config_path = os.path.join("configs", f"i{self._dynamips_id}_startup-config.cfg")
+ private_config_path = os.path.join("configs", f"i{self._dynamips_id}_private-config.cfg")
if not os.path.exists(os.path.join(self._working_directory, private_config_path)) or \
not os.path.getsize(os.path.join(self._working_directory, private_config_path)):
@@ -295,9 +294,9 @@ class Router(BaseNode):
name=self._name,
startup=startup_config_path,
private=private_config_path))
- await self._hypervisor.send('vm start "{name}"'.format(name=self._name))
+ await self._hypervisor.send(f'vm start "{self._name}"')
self.status = "started"
- log.info('router "{name}" [{id}] has been started'.format(name=self._name, id=self._id))
+ log.info(f'router "{self._name}" [{self._id}] has been started')
self._memory_watcher = FileWatcher(self._memory_files(), self._memory_changed, strategy='hash', delay=30)
monitor_process(self._hypervisor.process, self._termination_callback)
@@ -313,7 +312,7 @@ class Router(BaseNode):
self.status = "stopped"
log.info("Dynamips hypervisor process has stopped, return code: %d", returncode)
if returncode != 0:
- self.project.emit("log.error", {"message": "Dynamips hypervisor process has stopped, return code: {}\n{}".format(returncode, self._hypervisor.read_stdout())})
+ self.project.emit("log.error", {"message": f"Dynamips hypervisor process has stopped, return code: {returncode}\n{self._hypervisor.read_stdout()}"})
async def stop(self):
"""
@@ -323,11 +322,11 @@ class Router(BaseNode):
status = await self.get_status()
if status != "inactive":
try:
- await self._hypervisor.send('vm stop "{name}"'.format(name=self._name))
+ await self._hypervisor.send(f'vm stop "{self._name}"')
except DynamipsError as e:
- log.warning("Could not stop {}: {}".format(self._name, e))
+ log.warning(f"Could not stop {self._name}: {e}")
self.status = "stopped"
- log.info('Router "{name}" [{id}] has been stopped'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}] has been stopped')
if self._memory_watcher:
self._memory_watcher.close()
self._memory_watcher = None
@@ -348,9 +347,9 @@ class Router(BaseNode):
status = await self.get_status()
if status == "running":
- await self._hypervisor.send('vm suspend "{name}"'.format(name=self._name))
+ await self._hypervisor.send(f'vm suspend "{self._name}"')
self.status = "suspended"
- log.info('Router "{name}" [{id}] has been suspended'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}] has been suspended')
async def resume(self):
"""
@@ -359,9 +358,9 @@ class Router(BaseNode):
status = await self.get_status()
if status == "suspended":
- await self._hypervisor.send('vm resume "{name}"'.format(name=self._name))
+ await self._hypervisor.send(f'vm resume "{self._name}"')
self.status = "started"
- log.info('Router "{name}" [{id}] has been resumed'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}] has been resumed')
async def is_running(self):
"""
@@ -393,26 +392,26 @@ class Router(BaseNode):
if self._hypervisor and not self._hypervisor.devices:
try:
await self.stop()
- await self._hypervisor.send('vm delete "{}"'.format(self._name))
+ await self._hypervisor.send(f'vm delete "{self._name}"')
except DynamipsError as e:
- log.warning("Could not stop and delete {}: {}".format(self._name, e))
+ log.warning(f"Could not stop and delete {self._name}: {e}")
await self.hypervisor.stop()
if self._auto_delete_disks:
# delete nvram and disk files
- files = glob.glob(os.path.join(glob.escape(self._working_directory), "{}_i{}_disk[0-1]".format(self.platform, self.dynamips_id)))
- files += glob.glob(os.path.join(glob.escape(self._working_directory), "{}_i{}_slot[0-1]".format(self.platform, self.dynamips_id)))
- files += glob.glob(os.path.join(glob.escape(self._working_directory), "{}_i{}_nvram".format(self.platform, self.dynamips_id)))
- files += glob.glob(os.path.join(glob.escape(self._working_directory), "{}_i{}_flash[0-1]".format(self.platform, self.dynamips_id)))
- files += glob.glob(os.path.join(glob.escape(self._working_directory), "{}_i{}_rom".format(self.platform, self.dynamips_id)))
- files += glob.glob(os.path.join(glob.escape(self._working_directory), "{}_i{}_bootflash".format(self.platform, self.dynamips_id)))
- files += glob.glob(os.path.join(glob.escape(self._working_directory), "{}_i{}_ssa".format(self.platform, self.dynamips_id)))
+ files = glob.glob(os.path.join(glob.escape(self._working_directory), f"{self.platform}_i{self.dynamips_id}_disk[0-1]"))
+ files += glob.glob(os.path.join(glob.escape(self._working_directory), f"{self.platform}_i{self.dynamips_id}_slot[0-1]"))
+ files += glob.glob(os.path.join(glob.escape(self._working_directory), f"{self.platform}_i{self.dynamips_id}_nvram"))
+ files += glob.glob(os.path.join(glob.escape(self._working_directory), f"{self.platform}_i{self.dynamips_id}_flash[0-1]"))
+ files += glob.glob(os.path.join(glob.escape(self._working_directory), f"{self.platform}_i{self.dynamips_id}_rom"))
+ files += glob.glob(os.path.join(glob.escape(self._working_directory), f"{self.platform}_i{self.dynamips_id}_bootflash"))
+ files += glob.glob(os.path.join(glob.escape(self._working_directory), f"{self.platform}_i{self.dynamips_id}_ssa"))
for file in files:
try:
- log.debug("Deleting file {}".format(file))
+ log.debug(f"Deleting file {file}")
await wait_run_in_executor(os.remove, file)
except OSError as e:
- log.warning("Could not delete file {}: {}".format(file, e))
+ log.warning(f"Could not delete file {file}: {e}")
continue
self.manager.release_dynamips_id(self.project.id, self.dynamips_id)
@@ -464,7 +463,7 @@ class Router(BaseNode):
:param level: level number
"""
- await self._hypervisor.send('vm set_debug_level "{name}" {level}'.format(name=self._name, level=level))
+ await self._hypervisor.send(f'vm set_debug_level "{self._name}" {level}')
@property
def image(self):
@@ -486,7 +485,7 @@ class Router(BaseNode):
image = self.manager.get_abs_image_path(image, self.project.path)
- await self._hypervisor.send('vm set_ios "{name}" "{image}"'.format(name=self._name, image=image))
+ await self._hypervisor.send(f'vm set_ios "{self._name}" "{image}"')
log.info('Router "{name}" [{id}]: has a new IOS image set: "{image}"'.format(name=self._name,
id=self._id,
@@ -514,7 +513,7 @@ class Router(BaseNode):
if self._ram == ram:
return
- await self._hypervisor.send('vm set_ram "{name}" {ram}'.format(name=self._name, ram=ram))
+ await self._hypervisor.send(f'vm set_ram "{self._name}" {ram}')
log.info('Router "{name}" [{id}]: RAM updated from {old_ram}MB to {new_ram}MB'.format(name=self._name,
id=self._id,
old_ram=self._ram,
@@ -541,7 +540,7 @@ class Router(BaseNode):
if self._nvram == nvram:
return
- await self._hypervisor.send('vm set_nvram "{name}" {nvram}'.format(name=self._name, nvram=nvram))
+ await self._hypervisor.send(f'vm set_nvram "{self._name}" {nvram}')
log.info('Router "{name}" [{id}]: NVRAM updated from {old_nvram}KB to {new_nvram}KB'.format(name=self._name,
id=self._id,
old_nvram=self._nvram,
@@ -571,12 +570,12 @@ class Router(BaseNode):
else:
flag = 0
- await self._hypervisor.send('vm set_ram_mmap "{name}" {mmap}'.format(name=self._name, mmap=flag))
+ await self._hypervisor.send(f'vm set_ram_mmap "{self._name}" {flag}')
if mmap:
- log.info('Router "{name}" [{id}]: mmap enabled'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}]: mmap enabled')
else:
- log.info('Router "{name}" [{id}]: mmap disabled'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}]: mmap disabled')
self._mmap = mmap
@property
@@ -600,12 +599,12 @@ class Router(BaseNode):
flag = 1
else:
flag = 0
- await self._hypervisor.send('vm set_sparse_mem "{name}" {sparsemem}'.format(name=self._name, sparsemem=flag))
+ await self._hypervisor.send(f'vm set_sparse_mem "{self._name}" {flag}')
if sparsemem:
- log.info('Router "{name}" [{id}]: sparse memory enabled'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}]: sparse memory enabled')
else:
- log.info('Router "{name}" [{id}]: sparse memory disabled'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}]: sparse memory disabled')
self._sparsemem = sparsemem
@property
@@ -626,7 +625,7 @@ class Router(BaseNode):
:param clock_divisor: clock divisor value (integer)
"""
- await self._hypervisor.send('vm set_clock_divisor "{name}" {clock}'.format(name=self._name, clock=clock_divisor))
+ await self._hypervisor.send(f'vm set_clock_divisor "{self._name}" {clock_divisor}')
log.info('Router "{name}" [{id}]: clock divisor updated from {old_clock} to {new_clock}'.format(name=self._name,
id=self._id,
old_clock=self._clock_divisor,
@@ -656,11 +655,11 @@ class Router(BaseNode):
is_running = await self.is_running()
if not is_running:
# router is not running
- await self._hypervisor.send('vm set_idle_pc "{name}" {idlepc}'.format(name=self._name, idlepc=idlepc))
+ await self._hypervisor.send(f'vm set_idle_pc "{self._name}" {idlepc}')
else:
- await self._hypervisor.send('vm set_idle_pc_online "{name}" 0 {idlepc}'.format(name=self._name, idlepc=idlepc))
+ await self._hypervisor.send(f'vm set_idle_pc_online "{self._name}" 0 {idlepc}')
- log.info('Router "{name}" [{id}]: idle-PC set to {idlepc}'.format(name=self._name, id=self._id, idlepc=idlepc))
+ log.info(f'Router "{self._name}" [{self._id}]: idle-PC set to {idlepc}')
self._idlepc = idlepc
def set_process_priority_windows(self, pid, priority=None):
@@ -683,7 +682,7 @@ class Router(BaseNode):
priority = win32process.BELOW_NORMAL_PRIORITY_CLASS
win32process.SetPriorityClass(handle, priority)
except pywintypes.error as e:
- log.error("Cannot set priority for Dynamips process (PID={}) ".format(pid, e.strerror))
+ log.error(f"Cannot set priority for Dynamips process (PID={pid}) ")
return old_priority
async def get_idle_pc_prop(self):
@@ -702,12 +701,12 @@ class Router(BaseNode):
was_auto_started = True
await asyncio.sleep(20) # leave time to the router to boot
- log.info('Router "{name}" [{id}] has started calculating Idle-PC values'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}] has started calculating Idle-PC values')
old_priority = None
if sys.platform.startswith("win"):
old_priority = self.set_process_priority_windows(self._hypervisor.process.pid)
begin = time.time()
- idlepcs = await self._hypervisor.send('vm get_idle_pc_prop "{}" 0'.format(self._name))
+ idlepcs = await self._hypervisor.send(f'vm get_idle_pc_prop "{self._name}" 0')
if old_priority is not None:
self.set_process_priority_windows(self._hypervisor.process.pid, old_priority)
log.info('Router "{name}" [{id}] has finished calculating Idle-PC values after {time:.4f} seconds'.format(name=self._name,
@@ -727,9 +726,9 @@ class Router(BaseNode):
is_running = await self.is_running()
if not is_running:
# router is not running
- raise DynamipsError('Router "{name}" is not running'.format(name=self._name))
+ raise DynamipsError(f'Router "{self._name}" is not running')
- proposals = await self._hypervisor.send('vm show_idle_pc_prop "{}" 0'.format(self._name))
+ proposals = await self._hypervisor.send(f'vm show_idle_pc_prop "{self._name}" 0')
return proposals
@property
@@ -751,7 +750,7 @@ class Router(BaseNode):
is_running = await self.is_running()
if is_running: # router is running
- await self._hypervisor.send('vm set_idle_max "{name}" 0 {idlemax}'.format(name=self._name, idlemax=idlemax))
+ await self._hypervisor.send(f'vm set_idle_max "{self._name}" 0 {idlemax}')
log.info('Router "{name}" [{id}]: idlemax updated from {old_idlemax} to {new_idlemax}'.format(name=self._name,
id=self._id,
@@ -823,7 +822,7 @@ class Router(BaseNode):
"""
# replace specials characters in 'drive:\filename' in Linux and Dynamips in MS Windows or viceversa.
- ghost_file = "{}-{}.ghost".format(os.path.basename(self._image), self._ram)
+ ghost_file = f"{os.path.basename(self._image)}-{self._ram}.ghost"
ghost_file = ghost_file.replace('\\', '-').replace('/', '-').replace(':', '-')
return ghost_file
@@ -900,7 +899,7 @@ class Router(BaseNode):
:param disk0: disk0 size (integer)
"""
- await self._hypervisor.send('vm set_disk0 "{name}" {disk0}'.format(name=self._name, disk0=disk0))
+ await self._hypervisor.send(f'vm set_disk0 "{self._name}" {disk0}')
log.info('Router "{name}" [{id}]: disk0 updated from {old_disk0}MB to {new_disk0}MB'.format(name=self._name,
id=self._id,
@@ -925,7 +924,7 @@ class Router(BaseNode):
:param disk1: disk1 size (integer)
"""
- await self._hypervisor.send('vm set_disk1 "{name}" {disk1}'.format(name=self._name, disk1=disk1))
+ await self._hypervisor.send(f'vm set_disk1 "{self._name}" {disk1}')
log.info('Router "{name}" [{id}]: disk1 updated from {old_disk1}MB to {new_disk1}MB'.format(name=self._name,
id=self._id,
@@ -951,9 +950,9 @@ class Router(BaseNode):
"""
if auto_delete_disks:
- log.info('Router "{name}" [{id}]: auto delete disks enabled'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}]: auto delete disks enabled')
else:
- log.info('Router "{name}" [{id}]: auto delete disks disabled'.format(name=self._name, id=self._id))
+ log.info(f'Router "{self._name}" [{self._id}]: auto delete disks disabled')
self._auto_delete_disks = auto_delete_disks
async def set_console(self, console):
@@ -964,7 +963,7 @@ class Router(BaseNode):
"""
self.console = console
- await self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self.console))
+ await self._hypervisor.send(f'vm set_con_tcp_port "{self._name}" {self.console}')
async def set_console_type(self, console_type):
"""
@@ -983,7 +982,7 @@ class Router(BaseNode):
self.console_type = console_type
if self._console and console_type == "telnet":
- await self._hypervisor.send('vm set_con_tcp_port "{name}" {console}'.format(name=self._name, console=self._console))
+ await self._hypervisor.send(f'vm set_con_tcp_port "{self._name}" {self._console}')
async def set_aux(self, aux):
"""
@@ -993,7 +992,7 @@ class Router(BaseNode):
"""
self.aux = aux
- await self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux))
+ await self._hypervisor.send(f'vm set_aux_tcp_port "{self._name}" {aux}')
async def get_cpu_usage(self, cpu_id=0):
"""
@@ -1002,7 +1001,7 @@ class Router(BaseNode):
:returns: cpu usage in seconds
"""
- cpu_usage = await self._hypervisor.send('vm cpu_usage "{name}" {cpu_id}'.format(name=self._name, cpu_id=cpu_id))
+ cpu_usage = await self._hypervisor.send(f'vm cpu_usage "{self._name}" {cpu_id}')
return int(cpu_usage[0])
@property
@@ -1066,7 +1065,7 @@ class Router(BaseNode):
:returns: slot bindings (adapter names) list
"""
- slot_bindings = await self._hypervisor.send('vm slot_bindings "{}"'.format(self._name))
+ slot_bindings = await self._hypervisor.send(f'vm slot_bindings "{self._name}"')
return slot_bindings
async def slot_add_binding(self, slot_number, adapter):
@@ -1080,7 +1079,7 @@ class Router(BaseNode):
try:
slot = self._slots[slot_number]
except IndexError:
- raise DynamipsError('Slot {slot_number} does not exist on router "{name}"'.format(name=self._name, slot_number=slot_number))
+ raise DynamipsError(f'Slot {slot_number} does not exist on router "{self._name}"')
if slot is not None:
current_adapter = slot
@@ -1179,10 +1178,10 @@ class Router(BaseNode):
adapter = self._slots[slot_number]
if wic_slot_number > len(adapter.wics) - 1:
- raise DynamipsError("WIC slot {wic_slot_number} doesn't exist".format(wic_slot_number=wic_slot_number))
+ raise DynamipsError(f"WIC slot {wic_slot_number} doesn't exist")
if not adapter.wic_slot_available(wic_slot_number):
- raise DynamipsError("WIC slot {wic_slot_number} is already occupied by another WIC".format(wic_slot_number=wic_slot_number))
+ raise DynamipsError(f"WIC slot {wic_slot_number} is already occupied by another WIC")
if await self.is_running():
raise DynamipsError('WIC "{wic}" cannot be added while router "{name}" is running'.format(wic=wic,
@@ -1218,10 +1217,10 @@ class Router(BaseNode):
adapter = self._slots[slot_number]
if wic_slot_number > len(adapter.wics) - 1:
- raise DynamipsError("WIC slot {wic_slot_number} doesn't exist".format(wic_slot_number=wic_slot_number))
+ raise DynamipsError(f"WIC slot {wic_slot_number} doesn't exist")
if adapter.wic_slot_available(wic_slot_number):
- raise DynamipsError("No WIC is installed in WIC slot {wic_slot_number}".format(wic_slot_number=wic_slot_number))
+ raise DynamipsError(f"No WIC is installed in WIC slot {wic_slot_number}")
if await self.is_running():
raise DynamipsError('WIC cannot be removed from slot {wic_slot_number} while router "{name}" is running'.format(wic_slot_number=wic_slot_number,
@@ -1269,7 +1268,7 @@ class Router(BaseNode):
slot_number=slot_number))
if adapter is None:
- raise DynamipsError("Adapter is missing in slot {slot_number}".format(slot_number=slot_number))
+ raise DynamipsError(f"Adapter is missing in slot {slot_number}")
if not adapter.port_exists(port_number):
raise DynamipsError("Port {port_number} does not exist on adapter {adapter}".format(adapter=adapter,
@@ -1327,7 +1326,7 @@ class Router(BaseNode):
slot_number=slot_number))
if adapter is None:
- raise DynamipsError("Adapter is missing in slot {slot_number}".format(slot_number=slot_number))
+ raise DynamipsError(f"Adapter is missing in slot {slot_number}")
if not adapter.port_exists(port_number):
raise DynamipsError("Port {port_number} does not exist on adapter {adapter}".format(adapter=adapter,
@@ -1430,7 +1429,7 @@ class Router(BaseNode):
try:
open(output_file, 'w+').close()
except OSError as e:
- raise DynamipsError('Can not write capture to "{}": {}'.format(output_file, str(e)))
+ raise DynamipsError(f'Can not write capture to "{output_file}": {str(e)}')
try:
adapter = self._slots[slot_number]
@@ -1518,14 +1517,14 @@ class Router(BaseNode):
"""
:returns: Path of the startup config
"""
- return os.path.join(self._working_directory, "configs", "i{}_startup-config.cfg".format(self._dynamips_id))
+ return os.path.join(self._working_directory, "configs", f"i{self._dynamips_id}_startup-config.cfg")
@property
def private_config_path(self):
"""
:returns: Path of the private config
"""
- return os.path.join(self._working_directory, "configs", "i{}_private-config.cfg".format(self._dynamips_id))
+ return os.path.join(self._working_directory, "configs", f"i{self._dynamips_id}_private-config.cfg")
async def set_name(self, new_name):
"""
@@ -1534,7 +1533,7 @@ class Router(BaseNode):
:param new_name: new name string
"""
- await self._hypervisor.send('vm rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
+ await self._hypervisor.send(f'vm rename "{self._name}" "{new_name}"')
# change the hostname in the startup-config
if os.path.isfile(self.startup_config_path):
@@ -1545,7 +1544,7 @@ class Router(BaseNode):
f.seek(0)
f.write(new_config)
except OSError as e:
- raise DynamipsError("Could not amend the configuration {}: {}".format(self.startup_config_path, e))
+ raise DynamipsError(f"Could not amend the configuration {self.startup_config_path}: {e}")
# change the hostname in the private-config
if os.path.isfile(self.private_config_path):
@@ -1556,9 +1555,9 @@ class Router(BaseNode):
f.seek(0)
f.write(new_config)
except OSError as e:
- raise DynamipsError("Could not amend the configuration {}: {}".format(self.private_config_path, e))
+ raise DynamipsError(f"Could not amend the configuration {self.private_config_path}: {e}")
- log.info('Router "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name, id=self._id, new_name=new_name))
+ log.info(f'Router "{self._name}" [{self._id}]: renamed to "{new_name}"')
self._name = new_name
async def extract_config(self):
@@ -1570,7 +1569,7 @@ class Router(BaseNode):
"""
try:
- reply = await self._hypervisor.send('vm extract_config "{}"'.format(self._name))
+ reply = await self._hypervisor.send(f'vm extract_config "{self._name}"')
except DynamipsError:
# for some reason Dynamips gets frozen when it does not find the magic number in the NVRAM file.
return None, None
@@ -1588,7 +1587,7 @@ class Router(BaseNode):
config_path = os.path.join(self._working_directory, "configs")
os.makedirs(config_path, exist_ok=True)
except OSError as e:
- raise DynamipsError("Could could not create configuration directory {}: {}".format(config_path, e))
+ raise DynamipsError(f"Could could not create configuration directory {config_path}: {e}")
startup_config_base64, private_config_base64 = await self.extract_config()
if startup_config_base64:
@@ -1598,10 +1597,10 @@ class Router(BaseNode):
config = "!\n" + config.replace("\r", "")
config_path = os.path.join(self._working_directory, startup_config)
with open(config_path, "wb") as f:
- log.info("saving startup-config to {}".format(startup_config))
+ log.info(f"saving startup-config to {startup_config}")
f.write(config.encode("utf-8"))
except (binascii.Error, OSError) as e:
- raise DynamipsError("Could not save the startup configuration {}: {}".format(config_path, e))
+ raise DynamipsError(f"Could not save the startup configuration {config_path}: {e}")
if private_config_base64 and base64.b64decode(private_config_base64) != b'\nkerberos password \nend\n':
private_config = self.private_config_path
@@ -1609,10 +1608,10 @@ class Router(BaseNode):
config = base64.b64decode(private_config_base64).decode("utf-8", errors="replace")
config_path = os.path.join(self._working_directory, private_config)
with open(config_path, "wb") as f:
- log.info("saving private-config to {}".format(private_config))
+ log.info(f"saving private-config to {private_config}")
f.write(config.encode("utf-8"))
except (binascii.Error, OSError) as e:
- raise DynamipsError("Could not save the private configuration {}: {}".format(config_path, e))
+ raise DynamipsError(f"Could not save the private configuration {config_path}: {e}")
async def delete(self):
"""
@@ -1622,7 +1621,7 @@ class Router(BaseNode):
try:
await wait_run_in_executor(shutil.rmtree, self._working_directory)
except OSError as e:
- log.warning("Could not delete file {}".format(e))
+ log.warning(f"Could not delete file {e}")
self.manager.release_dynamips_id(self._project.id, self._dynamips_id)
@@ -1631,17 +1630,17 @@ class Router(BaseNode):
Deletes this router & associated files (nvram, disks etc.)
"""
- await self._hypervisor.send('vm clean_delete "{}"'.format(self._name))
+ await self._hypervisor.send(f'vm clean_delete "{self._name}"')
self._hypervisor.devices.remove(self)
try:
await wait_run_in_executor(shutil.rmtree, self._working_directory)
except OSError as e:
- log.warning("Could not delete file {}".format(e))
- log.info('Router "{name}" [{id}] has been deleted (including associated files)'.format(name=self._name, id=self._id))
+ log.warning(f"Could not delete file {e}")
+ log.info(f'Router "{self._name}" [{self._id}] has been deleted (including associated files)')
def _memory_files(self):
return [
- os.path.join(self._working_directory, "{}_i{}_rom".format(self.platform, self.dynamips_id)),
- os.path.join(self._working_directory, "{}_i{}_nvram".format(self.platform, self.dynamips_id))
+ os.path.join(self._working_directory, f"{self.platform}_i{self.dynamips_id}_rom"),
+ os.path.join(self._working_directory, f"{self.platform}_i{self.dynamips_id}_nvram")
]
diff --git a/gns3server/compute/error.py b/gns3server/compute/error.py
index f7d8b52e..589ec68d 100644
--- a/gns3server/compute/error.py
+++ b/gns3server/compute/error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -38,5 +37,5 @@ class ImageMissingError(Exception):
"""
def __init__(self, image):
- super().__init__("The image {} is missing".format(image))
+ super().__init__(f"The image '{image}' is missing")
self.image = image
diff --git a/gns3server/compute/iou/__init__.py b/gns3server/compute/iou/__init__.py
index 89d47e3e..29da6f72 100644
--- a/gns3server/compute/iou/__init__.py
+++ b/gns3server/compute/iou/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -61,4 +60,4 @@ class IOU(BaseManager):
:returns: working directory name
"""
- return os.path.join("iou", "device-{}".format(legacy_vm_id))
+ return os.path.join("iou", f"device-{legacy_vm_id}")
diff --git a/gns3server/compute/iou/iou_error.py b/gns3server/compute/iou/iou_error.py
index 33cf157a..c499608e 100644
--- a/gns3server/compute/iou/iou_error.py
+++ b/gns3server/compute/iou/iou_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/iou/iou_vm.py b/gns3server/compute/iou/iou_vm.py
index 0ad8f5d2..415b5b64 100644
--- a/gns3server/compute/iou/iou_vm.py
+++ b/gns3server/compute/iou/iou_vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -99,7 +98,7 @@ class IOUVM(BaseNode):
"""
Called when the NVRAM file has changed
"""
- log.debug("NVRAM changed: {}".format(path))
+ log.debug(f"NVRAM changed: {path}")
self.save_configs()
self.updated()
@@ -139,7 +138,7 @@ class IOUVM(BaseNode):
"""
self._path = self.manager.get_abs_image_path(path, self.project.path)
- log.info('IOU "{name}" [{id}]: IOU image updated to "{path}"'.format(name=self._name, id=self._id, path=self._path))
+ log.info(f'IOU "{self._name}" [{self._id}]: IOU image updated to "{self._path}"')
@property
def use_default_iou_values(self):
@@ -161,9 +160,9 @@ class IOUVM(BaseNode):
self._use_default_iou_values = state
if state:
- log.info('IOU "{name}" [{id}]: uses the default IOU image values'.format(name=self._name, id=self._id))
+ log.info(f'IOU "{self._name}" [{self._id}]: uses the default IOU image values')
else:
- log.info('IOU "{name}" [{id}]: does not use the default IOU image values'.format(name=self._name, id=self._id))
+ log.info(f'IOU "{self._name}" [{self._id}]: does not use the default IOU image values')
async def update_default_iou_values(self):
"""
@@ -179,7 +178,7 @@ class IOUVM(BaseNode):
if match:
self.ram = int(match.group(1))
except (ValueError, OSError, subprocess.SubprocessError) as e:
- log.warning("could not find default RAM and NVRAM values for {}: {}".format(os.path.basename(self._path), e))
+ log.warning(f"could not find default RAM and NVRAM values for {os.path.basename(self._path)}: {e}")
async def create(self):
@@ -194,24 +193,24 @@ class IOUVM(BaseNode):
raise IOUError("IOU image is not configured")
if not os.path.isfile(self._path) or not os.path.exists(self._path):
if os.path.islink(self._path):
- raise IOUError("IOU image '{}' linked to '{}' is not accessible".format(self._path, os.path.realpath(self._path)))
+ raise IOUError(f"IOU image '{self._path}' linked to '{os.path.realpath(self._path)}' is not accessible")
else:
- raise IOUError("IOU image '{}' is not accessible".format(self._path))
+ raise IOUError(f"IOU image '{self._path}' is not accessible")
try:
with open(self._path, "rb") as f:
# read the first 7 bytes of the file.
elf_header_start = f.read(7)
except OSError as e:
- raise IOUError("Cannot read ELF header for IOU image '{}': {}".format(self._path, e))
+ raise IOUError(f"Cannot read ELF header for IOU image '{self._path}': {e}")
# IOU images must start with the ELF magic number, be 32-bit or 64-bit, little endian
# and have an ELF version of 1 normal IOS image are big endian!
if elf_header_start != b'\x7fELF\x01\x01\x01' and elf_header_start != b'\x7fELF\x02\x01\x01':
- raise IOUError("'{}' is not a valid IOU image".format(self._path))
+ raise IOUError(f"'{self._path}' is not a valid IOU image")
if not os.access(self._path, os.X_OK):
- raise IOUError("IOU image '{}' is not executable".format(self._path))
+ raise IOUError(f"IOU image '{self._path}' is not executable")
def __json__(self):
@@ -351,14 +350,14 @@ class IOUVM(BaseNode):
with open(path, "wb") as f:
f.write(value.encode("utf-8"))
except OSError as e:
- raise IOUError("Could not write the iourc file {}: {}".format(path, e))
+ raise IOUError(f"Could not write the iourc file {path}: {e}")
path = os.path.join(self.temporary_directory, "iourc")
try:
with open(path, "wb") as f:
f.write(value.encode("utf-8"))
except OSError as e:
- raise IOUError("Could not write the iourc file {}: {}".format(path, e))
+ raise IOUError(f"Could not write the iourc file {path}: {e}")
@property
def license_check(self):
@@ -378,7 +377,7 @@ class IOUVM(BaseNode):
try:
output = await gns3server.utils.asyncio.subprocess_check_output("ldd", self._path)
except (OSError, subprocess.SubprocessError) as e:
- log.warning("Could not determine the shared library dependencies for {}: {}".format(self._path, e))
+ log.warning(f"Could not determine the shared library dependencies for {self._path}: {e}")
return
p = re.compile(r"([\.\w]+)\s=>\s+not found")
@@ -408,27 +407,27 @@ class IOUVM(BaseNode):
config = configparser.ConfigParser()
try:
- log.info("Checking IOU license in '{}'".format(self.iourc_path))
+ log.info(f"Checking IOU license in '{self.iourc_path}'")
with open(self.iourc_path, encoding="utf-8") as f:
config.read_file(f)
except OSError as e:
- raise IOUError("Could not open iourc file {}: {}".format(self.iourc_path, e))
+ raise IOUError(f"Could not open iourc file {self.iourc_path}: {e}")
except configparser.Error as e:
- raise IOUError("Could not parse iourc file {}: {}".format(self.iourc_path, e))
+ raise IOUError(f"Could not parse iourc file {self.iourc_path}: {e}")
except UnicodeDecodeError as e:
- raise IOUError("Non ascii characters in iourc file {}, please remove them: {}".format(self.iourc_path, e))
+ raise IOUError(f"Non ascii characters in iourc file {self.iourc_path}, please remove them: {e}")
if "license" not in config:
- raise IOUError("License section not found in iourc file {}".format(self.iourc_path))
+ raise IOUError(f"License section not found in iourc file {self.iourc_path}")
hostname = socket.gethostname()
if len(hostname) > 15:
- log.warning("Older IOU images may not boot because hostname '{}' length is above 15 characters".format(hostname))
+ log.warning(f"Older IOU images may not boot because hostname '{hostname}' length is above 15 characters")
if hostname not in config["license"]:
- raise IOUError("Hostname \"{}\" not found in iourc file {}".format(hostname, self.iourc_path))
+ raise IOUError(f"Hostname \"{hostname}\" not found in iourc file {self.iourc_path}")
user_ioukey = config["license"][hostname]
if user_ioukey[-1:] != ';':
- raise IOUError("IOU key not ending with ; in iourc file {}".format(self.iourc_path))
+ raise IOUError(f"IOU key not ending with ; in iourc file {self.iourc_path}")
if len(user_ioukey) != 17:
- raise IOUError("IOU key length is not 16 characters in iourc file {}".format(self.iourc_path))
+ raise IOUError(f"IOU key length is not 16 characters in iourc file {self.iourc_path}")
user_ioukey = user_ioukey[:16]
# We can't test this because it's mean distributing a valid licence key
@@ -437,14 +436,14 @@ class IOUVM(BaseNode):
try:
hostid = (await gns3server.utils.asyncio.subprocess_check_output("hostid")).strip()
except FileNotFoundError as e:
- raise IOUError("Could not find hostid: {}".format(e))
+ raise IOUError(f"Could not find hostid: {e}")
except (OSError, subprocess.SubprocessError) as e:
- raise IOUError("Could not execute hostid: {}".format(e))
+ raise IOUError(f"Could not execute hostid: {e}")
try:
ioukey = int(hostid, 16)
except ValueError:
- raise IOUError("Invalid hostid detected: {}".format(hostid))
+ raise IOUError(f"Invalid hostid detected: {hostid}")
for x in hostname:
ioukey += ord(x)
pad1 = b'\x4B\x58\x21\x81\x56\x7B\x0D\xF3\x21\x43\x9B\x7E\xAC\x1D\xE6\x8A'
@@ -459,7 +458,7 @@ class IOUVM(BaseNode):
"""
Path to the nvram file
"""
- return os.path.join(self.working_dir, "nvram_{:05d}".format(self.application_id))
+ return os.path.join(self.working_dir, f"nvram_{self.application_id:05d}")
def _push_configs_to_nvram(self):
"""
@@ -477,7 +476,7 @@ class IOUVM(BaseNode):
with open(nvram_file, "rb") as file:
nvram_content = file.read()
except OSError as e:
- raise IOUError("Cannot read nvram file {}: {}".format(nvram_file, e))
+ raise IOUError(f"Cannot read nvram file {nvram_file}: {e}")
startup_config_content = startup_config_content.encode("utf-8")
private_config_content = self.private_config_content
@@ -486,12 +485,12 @@ class IOUVM(BaseNode):
try:
nvram_content = nvram_import(nvram_content, startup_config_content, private_config_content, self.nvram)
except ValueError as e:
- raise IOUError("Cannot push configs to nvram {}: {}".format(nvram_file, e))
+ raise IOUError(f"Cannot push configs to nvram {nvram_file}: {e}")
try:
with open(nvram_file, "wb") as file:
file.write(nvram_content)
except OSError as e:
- raise IOUError("Cannot write nvram file {}: {}".format(nvram_file, e))
+ raise IOUError(f"Cannot write nvram file {nvram_file}: {e}")
async def start(self):
"""
@@ -506,13 +505,13 @@ class IOUVM(BaseNode):
try:
self._rename_nvram_file()
except OSError as e:
- raise IOUError("Could not rename nvram files: {}".format(e))
+ raise IOUError(f"Could not rename nvram files: {e}")
iourc_path = self.iourc_path
if not iourc_path:
raise IOUError("Could not find an iourc file (IOU license), please configure an IOU license")
if not os.path.isfile(iourc_path):
- raise IOUError("The iourc path '{}' is not a regular file".format(iourc_path))
+ raise IOUError(f"The iourc path '{iourc_path}' is not a regular file")
await self._check_iou_licence()
await self._start_ubridge()
@@ -541,11 +540,11 @@ class IOUVM(BaseNode):
os.unlink(symlink)
os.symlink(self.path, symlink)
except OSError as e:
- raise IOUError("Could not create symbolic link: {}".format(e))
+ raise IOUError(f"Could not create symbolic link: {e}")
command = await self._build_command()
try:
- log.info("Starting IOU: {}".format(command))
+ log.info(f"Starting IOU: {command}")
self.command_line = ' '.join(command)
self._iou_process = await asyncio.create_subprocess_exec(
*command,
@@ -554,17 +553,17 @@ class IOUVM(BaseNode):
stderr=subprocess.STDOUT,
cwd=self.working_dir,
env=env)
- log.info("IOU instance {} started PID={}".format(self._id, self._iou_process.pid))
+ log.info(f"IOU instance {self._id} started PID={self._iou_process.pid}")
self._started = True
self.status = "started"
callback = functools.partial(self._termination_callback, "IOU")
gns3server.utils.asyncio.monitor_process(self._iou_process, callback)
except FileNotFoundError as e:
- raise IOUError("Could not start IOU: {}: 32-bit binary support is probably not installed".format(e))
+ raise IOUError(f"Could not start IOU: {e}: 32-bit binary support is probably not installed")
except (OSError, subprocess.SubprocessError) as e:
iou_stdout = self.read_iou_stdout()
- log.error("Could not start IOU {}: {}\n{}".format(self._path, e, iou_stdout))
- raise IOUError("Could not start IOU {}: {}\n{}".format(self._path, e, iou_stdout))
+ log.error(f"Could not start IOU {self._path}: {e}\n{iou_stdout}")
+ raise IOUError(f"Could not start IOU {self._path}: {e}\n{iou_stdout}")
await self.start_console()
@@ -605,13 +604,13 @@ class IOUVM(BaseNode):
Configures the IOL bridge in uBridge.
"""
- bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
+ bridge_name = f"IOL-BRIDGE-{self.application_id + 512}"
try:
# delete any previous bridge if it exists
- await self._ubridge_send("iol_bridge delete {name}".format(name=bridge_name))
+ await self._ubridge_send(f"iol_bridge delete {bridge_name}")
except UbridgeError:
pass
- await self._ubridge_send("iol_bridge create {name} {bridge_id}".format(name=bridge_name, bridge_id=self.application_id + 512))
+ await self._ubridge_send(f"iol_bridge create {bridge_name} {self.application_id + 512}")
bay_id = 0
for adapter in self._adapters:
@@ -635,7 +634,7 @@ class IOUVM(BaseNode):
unit_id += 1
bay_id += 1
- await self._ubridge_send("iol_bridge start {name}".format(name=bridge_name))
+ await self._ubridge_send(f"iol_bridge start {bridge_name}")
def _termination_callback(self, process_name, returncode):
"""
@@ -651,7 +650,7 @@ class IOUVM(BaseNode):
returncode,
self.read_iou_stdout())
else:
- message = 'IOU VM "{}" process has stopped with return code: {}\n{}'.format(self.name, returncode, self.read_iou_stdout())
+ message = f'IOU VM "{self.name}" process has stopped with return code: {returncode}\n{self.read_iou_stdout()}'
log.warning(message)
self.project.emit("log.error", {"message": message})
if self._telnet_server:
@@ -666,7 +665,7 @@ class IOUVM(BaseNode):
destination = self._nvram_file()
for file_path in glob.glob(os.path.join(glob.escape(self.working_dir), "nvram_*")):
shutil.move(file_path, destination)
- destination = os.path.join(self.working_dir, "vlan.dat-{:05d}".format(self.application_id))
+ destination = os.path.join(self.working_dir, f"vlan.dat-{self.application_id:05d}")
for file_path in glob.glob(os.path.join(glob.escape(self.working_dir), "vlan.dat-*")):
shutil.move(file_path, destination)
@@ -691,7 +690,7 @@ class IOUVM(BaseNode):
await gns3server.utils.asyncio.wait_for_process_termination(self._iou_process, timeout=3)
except asyncio.TimeoutError:
if self._iou_process.returncode is None:
- log.warning("IOU process {} is still running... killing it".format(self._iou_process.pid))
+ log.warning(f"IOU process {self._iou_process.pid} is still running... killing it")
try:
self._iou_process.kill()
except ProcessLookupError:
@@ -703,7 +702,7 @@ class IOUVM(BaseNode):
if os.path.islink(symlink):
os.unlink(symlink)
except OSError as e:
- log.warning("Could not delete symbolic link: {}".format(e))
+ log.warning(f"Could not delete symbolic link: {e}")
self._started = False
self.save_configs()
@@ -714,7 +713,7 @@ class IOUVM(BaseNode):
"""
if self._iou_process:
- log.info('Stopping IOU process for IOU VM "{}" PID={}'.format(self.name, self._iou_process.pid))
+ log.info(f'Stopping IOU process for IOU VM "{self.name}" PID={self._iou_process.pid}')
try:
self._iou_process.terminate()
# Sometime the process can already be dead when we garbage collect
@@ -751,7 +750,7 @@ class IOUVM(BaseNode):
"""
if self.is_running() and self.console_type != new_console_type:
- raise IOUError('"{name}" must be stopped to change the console type to {new_console_type}'.format(name=self._name, new_console_type=new_console_type))
+ raise IOUError(f'"{self._name}" must be stopped to change the console type to {new_console_type}')
super(IOUVM, IOUVM).console_type.__set__(self, new_console_type)
@@ -772,7 +771,7 @@ class IOUVM(BaseNode):
log.info("IOU {name} [id={id}]: NETMAP file created".format(name=self._name,
id=self._id))
except OSError as e:
- raise IOUError("Could not create {}: {}".format(netmap_path, e))
+ raise IOUError(f"Could not create {netmap_path}: {e}")
async def _build_command(self):
"""
@@ -835,7 +834,7 @@ class IOUVM(BaseNode):
with open(self._iou_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
- log.warning("could not read {}: {}".format(self._iou_stdout_file, e))
+ log.warning(f"could not read {self._iou_stdout_file}: {e}")
return output
@property
@@ -925,7 +924,7 @@ class IOUVM(BaseNode):
port_number=port_number))
if self.ubridge:
- bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
+ bridge_name = f"IOL-BRIDGE-{self.application_id + 512}"
await self._ubridge_send("iol_bridge add_nio_udp {name} {iol_id} {bay} {unit} {lport} {rhost} {rport}".format(name=bridge_name,
iol_id=self.application_id,
bay=adapter_number,
@@ -955,7 +954,7 @@ class IOUVM(BaseNode):
:param port_number: port number
:param filters: Array of filter dictionnary
"""
- bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
+ bridge_name = f"IOL-BRIDGE-{self.application_id + 512}"
location = '{bridge_name} {bay} {unit}'.format(
bridge_name=bridge_name,
bay=adapter_number,
@@ -998,7 +997,7 @@ class IOUVM(BaseNode):
port_number=port_number))
if self.ubridge:
- bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
+ bridge_name = f"IOL-BRIDGE-{self.application_id + 512}"
await self._ubridge_send("iol_bridge delete_nio_udp {name} {bay} {unit}".format(name=bridge_name,
bay=adapter_number,
unit=port_number))
@@ -1052,9 +1051,9 @@ class IOUVM(BaseNode):
self._l1_keepalives = state
if state:
- log.info('IOU "{name}" [{id}]: has activated layer 1 keepalive messages'.format(name=self._name, id=self._id))
+ log.info(f'IOU "{self._name}" [{self._id}]: has activated layer 1 keepalive messages')
else:
- log.info('IOU "{name}" [{id}]: has deactivated layer 1 keepalive messages'.format(name=self._name, id=self._id))
+ log.info(f'IOU "{self._name}" [{self._id}]: has deactivated layer 1 keepalive messages')
async def _enable_l1_keepalives(self, command):
"""
@@ -1071,9 +1070,9 @@ class IOUVM(BaseNode):
if re.search(r"-l\s+Enable Layer 1 keepalive messages", output):
command.extend(["-l"])
else:
- raise IOUError("layer 1 keepalive messages are not supported by {}".format(os.path.basename(self._path)))
+ raise IOUError(f"layer 1 keepalive messages are not supported by {os.path.basename(self._path)}")
except (OSError, subprocess.SubprocessError) as e:
- log.warning("could not determine if layer 1 keepalive messages are supported by {}: {}".format(os.path.basename(self._path), e))
+ log.warning(f"could not determine if layer 1 keepalive messages are supported by {os.path.basename(self._path)}: {e}")
@property
def startup_config_content(self):
@@ -1089,7 +1088,7 @@ class IOUVM(BaseNode):
with open(config_file, "rb") as f:
return f.read().decode("utf-8", errors="replace")
except OSError as e:
- raise IOUError("Can't read startup-config file '{}': {}".format(config_file, e))
+ raise IOUError(f"Can't read startup-config file '{config_file}': {e}")
@startup_config_content.setter
def startup_config_content(self, startup_config):
@@ -1116,15 +1115,15 @@ class IOUVM(BaseNode):
startup_config = startup_config.replace("%h", self._name)
f.write(startup_config)
- vlan_file = os.path.join(self.working_dir, "vlan.dat-{:05d}".format(self.application_id))
+ vlan_file = os.path.join(self.working_dir, f"vlan.dat-{self.application_id:05d}")
if os.path.exists(vlan_file):
try:
os.remove(vlan_file)
except OSError as e:
- log.error("Could not delete VLAN file '{}': {}".format(vlan_file, e))
+ log.error(f"Could not delete VLAN file '{vlan_file}': {e}")
except OSError as e:
- raise IOUError("Can't write startup-config file '{}': {}".format(startup_config_path, e))
+ raise IOUError(f"Can't write startup-config file '{startup_config_path}': {e}")
@property
def private_config_content(self):
@@ -1140,7 +1139,7 @@ class IOUVM(BaseNode):
with open(config_file, "rb") as f:
return f.read().decode("utf-8", errors="replace")
except OSError as e:
- raise IOUError("Can't read private-config file '{}': {}".format(config_file, e))
+ raise IOUError(f"Can't read private-config file '{config_file}': {e}")
@private_config_content.setter
def private_config_content(self, private_config):
@@ -1167,7 +1166,7 @@ class IOUVM(BaseNode):
private_config = private_config.replace("%h", self._name)
f.write(private_config)
except OSError as e:
- raise IOUError("Can't write private-config file '{}': {}".format(private_config_path, e))
+ raise IOUError(f"Can't write private-config file '{private_config_path}': {e}")
@property
def startup_config_file(self):
@@ -1254,20 +1253,20 @@ class IOUVM(BaseNode):
:returns: tuple (startup-config, private-config)
"""
- nvram_file = os.path.join(self.working_dir, "nvram_{:05d}".format(self.application_id))
+ nvram_file = os.path.join(self.working_dir, f"nvram_{self.application_id:05d}")
if not os.path.exists(nvram_file):
return None, None
try:
with open(nvram_file, "rb") as file:
nvram_content = file.read()
except OSError as e:
- log.warning("Cannot read nvram file {}: {}".format(nvram_file, e))
+ log.warning(f"Cannot read nvram file {nvram_file}: {e}")
return None, None
try:
startup_config_content, private_config_content = nvram_export(nvram_content)
except ValueError as e:
- log.warning("Could not export configs from nvram file {}: {}".format(nvram_file, e))
+ log.warning(f"Could not export configs from nvram file {nvram_file}: {e}")
return None, None
return startup_config_content, private_config_content
@@ -1284,20 +1283,20 @@ class IOUVM(BaseNode):
try:
config = startup_config_content.decode("utf-8", errors="replace")
with open(config_path, "wb") as f:
- log.info("saving startup-config to {}".format(config_path))
+ log.info(f"saving startup-config to {config_path}")
f.write(config.encode("utf-8"))
except (binascii.Error, OSError) as e:
- raise IOUError("Could not save the startup configuration {}: {}".format(config_path, e))
+ raise IOUError(f"Could not save the startup configuration {config_path}: {e}")
if private_config_content and private_config_content != b'\nend\n':
config_path = os.path.join(self.working_dir, "private-config.cfg")
try:
config = private_config_content.decode("utf-8", errors="replace")
with open(config_path, "wb") as f:
- log.info("saving private-config to {}".format(config_path))
+ log.info(f"saving private-config to {config_path}")
f.write(config.encode("utf-8"))
except (binascii.Error, OSError) as e:
- raise IOUError("Could not save the private configuration {}: {}".format(config_path, e))
+ raise IOUError(f"Could not save the private configuration {config_path}: {e}")
async def start_capture(self, adapter_number, port_number, output_file, data_link_type="DLT_EN10MB"):
"""
@@ -1322,7 +1321,7 @@ class IOUVM(BaseNode):
output_file=output_file))
if self.ubridge:
- bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
+ bridge_name = f"IOL-BRIDGE-{self.application_id + 512}"
await self._ubridge_send('iol_bridge start_capture {name} {bay} {unit} "{output_file}" {data_link_type}'.format(name=bridge_name,
bay=adapter_number,
unit=port_number,
@@ -1346,7 +1345,7 @@ class IOUVM(BaseNode):
adapter_number=adapter_number,
port_number=port_number))
if self.ubridge:
- bridge_name = "IOL-BRIDGE-{}".format(self.application_id + 512)
+ bridge_name = f"IOL-BRIDGE-{self.application_id + 512}"
await self._ubridge_send('iol_bridge stop_capture {name} {bay} {unit}'.format(name=bridge_name,
bay=adapter_number,
unit=port_number))
diff --git a/gns3server/compute/iou/utils/iou_export.py b/gns3server/compute/iou/utils/iou_export.py
index 317b6e94..242b3c9d 100644
--- a/gns3server/compute/iou/utils/iou_export.py
+++ b/gns3server/compute/iou/utils/iou_export.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
# To use python v2.7 change the first line to:
#!/usr/bin/env python
@@ -211,14 +210,14 @@ if __name__ == '__main__':
fd = open(args.nvram, 'rb')
nvram = fd.read()
fd.close()
- except (IOError, OSError) as err:
- sys.stderr.write("Error reading file: {}\n".format(err))
+ except OSError as err:
+ sys.stderr.write(f"Error reading file: {err}\n")
sys.exit(1)
try:
startup, private = nvram_export(nvram)
except ValueError as err:
- sys.stderr.write("nvram_export: {}\n".format(err))
+ sys.stderr.write(f"nvram_export: {err}\n")
sys.exit(3)
try:
@@ -232,6 +231,6 @@ if __name__ == '__main__':
fd = open(args.private, 'wb')
fd.write(private)
fd.close()
- except (IOError, OSError) as err:
- sys.stderr.write("Error writing file: {}\n".format(err))
+ except OSError as err:
+ sys.stderr.write(f"Error writing file: {err}\n")
sys.exit(1)
diff --git a/gns3server/compute/iou/utils/iou_import.py b/gns3server/compute/iou/utils/iou_import.py
index 3edad939..c0a5c069 100644
--- a/gns3server/compute/iou/utils/iou_import.py
+++ b/gns3server/compute/iou/utils/iou_import.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-# -*- coding: utf-8 -*-
# To use python v2.7 change the first line to:
#!/usr/bin/env python
@@ -55,7 +54,7 @@ def checksum(data, start, end):
chk += word
# add remaining words, ignoring old checksum at offset 4
- struct_format = '>{:d}H'.format((end - start - 6) // 2)
+ struct_format = f'>{(end - start - 6) // 2:d}H'
for word in struct.unpack_from(struct_format, data, start+6):
chk += word
@@ -209,20 +208,20 @@ if __name__ == '__main__':
fd = open(args.private, 'rb')
private = fd.read()
fd.close()
- except (IOError, OSError) as err:
- sys.stderr.write("Error reading file: {}\n".format(err))
+ except OSError as err:
+ sys.stderr.write(f"Error reading file: {err}\n")
sys.exit(1)
try:
nvram = nvram_import(nvram, startup, private, args.create)
except ValueError as err:
- sys.stderr.write("nvram_import: {}\n".format(err))
+ sys.stderr.write(f"nvram_import: {err}\n")
sys.exit(3)
try:
fd = open(args.nvram, 'wb')
fd.write(nvram)
fd.close()
- except (IOError, OSError) as err:
- sys.stderr.write("Error writing file: {}\n".format(err))
+ except OSError as err:
+ sys.stderr.write(f"Error writing file: {err}\n")
sys.exit(1)
diff --git a/gns3server/compute/nios/nio.py b/gns3server/compute/nios/nio.py
index 324c2639..8ad5bd87 100644
--- a/gns3server/compute/nios/nio.py
+++ b/gns3server/compute/nios/nio.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
@@ -20,7 +19,7 @@ Base interface for NIOs.
"""
-class NIO(object):
+class NIO:
"""
IOU NIO.
diff --git a/gns3server/compute/nios/nio_ethernet.py b/gns3server/compute/nios/nio_ethernet.py
index a9604424..a4b95682 100644
--- a/gns3server/compute/nios/nio_ethernet.py
+++ b/gns3server/compute/nios/nio_ethernet.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/nios/nio_tap.py b/gns3server/compute/nios/nio_tap.py
index 9f51ce13..5fb0a6c8 100644
--- a/gns3server/compute/nios/nio_tap.py
+++ b/gns3server/compute/nios/nio_tap.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/nios/nio_udp.py b/gns3server/compute/nios/nio_udp.py
index a87875fe..f82e8665 100644
--- a/gns3server/compute/nios/nio_udp.py
+++ b/gns3server/compute/nios/nio_udp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/port_manager.py b/gns3server/compute/port_manager.py
index cdb295cc..4100a673 100644
--- a/gns3server/compute/port_manager.py
+++ b/gns3server/compute/port_manager.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -24,10 +23,10 @@ log = logging.getLogger(__name__)
# These ports are disallowed by Chrome and Firefox to avoid issues, we skip them as well
-BANNED_PORTS = set((1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87, 95, 101, 102, 103,
+BANNED_PORTS = {1, 7, 9, 11, 13, 15, 17, 19, 20, 21, 22, 23, 25, 37, 42, 43, 53, 77, 79, 87, 95, 101, 102, 103,
104, 109, 110, 111, 113, 115, 117, 119, 123, 135, 139, 143, 179, 389, 465, 512, 513, 514, 515, 526,
530, 531, 532, 540, 556, 563, 587, 601, 636, 993, 995, 2049, 3659, 4045, 6000, 6665, 6666, 6667,
- 6668, 6669))
+ 6668, 6669}
class PortManager:
@@ -303,7 +302,7 @@ class PortManager:
f"{self._udp_port_range[0]}-{self._udp_port_range[1]}")
self._used_udp_ports.add(port)
project.record_udp_port(port)
- log.debug("UDP port {} has been reserved".format(port))
+ log.debug(f"UDP port {port} has been reserved")
def release_udp_port(self, port, project):
"""
diff --git a/gns3server/compute/project.py b/gns3server/compute/project.py
index 425dc881..33149fe7 100644
--- a/gns3server/compute/project.py
+++ b/gns3server/compute/project.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -50,7 +49,7 @@ class Project:
try:
UUID(project_id, version=4)
except ValueError:
- raise ComputeError("{} is not a valid UUID".format(project_id))
+ raise ComputeError(f"{project_id} is not a valid UUID")
else:
project_id = str(uuid4())
self._id = project_id
@@ -66,16 +65,16 @@ class Project:
try:
os.makedirs(path, exist_ok=True)
except OSError as e:
- raise ComputeError("Could not create project directory: {}".format(e))
+ raise ComputeError(f"Could not create project directory: {e}")
self.path = path
try:
if os.path.exists(self.tmp_working_directory()):
shutil.rmtree(self.tmp_working_directory())
except OSError as e:
- raise ComputeError("Could not clean project directory: {}".format(e))
+ raise ComputeError(f"Could not clean project directory: {e}")
- log.info("Project {id} with path '{path}' created".format(path=self._path, id=self._id))
+ log.info(f"Project {self._id} with path '{self._path}' created")
def __json__(self):
@@ -188,7 +187,7 @@ class Project:
try:
os.makedirs(workdir, exist_ok=True)
except OSError as e:
- raise ComputeError("Could not create module working directory: {}".format(e))
+ raise ComputeError(f"Could not create module working directory: {e}")
return workdir
def module_working_path(self, module_name):
@@ -215,7 +214,7 @@ class Project:
try:
os.makedirs(workdir, exist_ok=True)
except OSError as e:
- raise ComputeError("Could not create the node working directory: {}".format(e))
+ raise ComputeError(f"Could not create the node working directory: {e}")
return workdir
def node_working_path(self, node):
@@ -245,7 +244,7 @@ class Project:
try:
os.makedirs(workdir, exist_ok=True)
except OSError as e:
- raise ComputeError("Could not create the capture working directory: {}".format(e))
+ raise ComputeError(f"Could not create the capture working directory: {e}")
return workdir
def add_node(self, node):
@@ -270,13 +269,13 @@ class Project:
try:
UUID(node_id, version=4)
except ValueError:
- raise ComputeError("Node ID {} is not a valid UUID".format(node_id))
+ raise ComputeError(f"Node ID {node_id} is not a valid UUID")
for node in self._nodes:
if node.id == node_id:
return node
- raise ComputeNotFoundError("Node ID {} doesn't exist".format(node_id))
+ raise ComputeNotFoundError(f"Node ID {node_id} doesn't exist")
async def remove_node(self, node):
"""
@@ -305,10 +304,10 @@ class Project:
Closes the project, but keep project data on disk
"""
- project_nodes_id = set([n.id for n in self.nodes])
+ project_nodes_id = {n.id for n in self.nodes}
for module in self.compute():
- module_nodes_id = set([n.id for n in module.instance().nodes])
+ module_nodes_id = {n.id for n in module.instance().nodes}
# We close the project only for the modules using it
if len(module_nodes_id & project_nodes_id):
await module.instance().project_closing(self)
@@ -316,7 +315,7 @@ class Project:
await self._close_and_clean(False)
for module in self.compute():
- module_nodes_id = set([n.id for n in module.instance().nodes])
+ module_nodes_id = {n.id for n in module.instance().nodes}
# We close the project only for the modules using it
if len(module_nodes_id & project_nodes_id):
await module.instance().project_closed(self)
@@ -344,22 +343,22 @@ class Project:
try:
future.result()
except (Exception, GeneratorExit) as e:
- log.error("Could not close node {}".format(e), exc_info=1)
+ log.error(f"Could not close node: {e}", exc_info=1)
if cleanup and os.path.exists(self.path):
self._deleted = True
try:
await wait_run_in_executor(shutil.rmtree, self.path)
- log.info("Project {id} with path '{path}' deleted".format(path=self._path, id=self._id))
+ log.info(f"Project {self._id} with path '{self._path}' deleted")
except OSError as e:
- raise ComputeError("Could not delete the project directory: {}".format(e))
+ raise ComputeError(f"Could not delete the project directory: {e}")
else:
- log.info("Project {id} with path '{path}' closed".format(path=self._path, id=self._id))
+ log.info(f"Project {self._id} with path '{self._path}' closed")
if self._used_tcp_ports:
- log.warning("Project {} has TCP ports still in use: {}".format(self.id, self._used_tcp_ports))
+ log.warning(f"Project {self.id} has TCP ports still in use: {self._used_tcp_ports}")
if self._used_udp_ports:
- log.warning("Project {} has UDP ports still in use: {}".format(self.id, self._used_udp_ports))
+ log.warning(f"Project {self.id} has UDP ports still in use: {self._used_udp_ports}")
# clean the remaining ports that have not been cleaned by their respective node.
port_manager = PortManager.instance()
diff --git a/gns3server/compute/project_manager.py b/gns3server/compute/project_manager.py
index c5f455d2..23291e28 100644
--- a/gns3server/compute/project_manager.py
+++ b/gns3server/compute/project_manager.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -71,10 +70,10 @@ class ProjectManager:
try:
UUID(project_id, version=4)
except ValueError:
- raise ComputeError("Project ID {} is not a valid UUID".format(project_id))
+ raise ComputeError(f"Project ID {project_id} is not a valid UUID")
if project_id not in self._projects:
- raise ComputeNotFoundError("Project ID {} doesn't exist".format(project_id))
+ raise ComputeNotFoundError(f"Project ID {project_id} doesn't exist")
return self._projects[project_id]
def _check_available_disk_space(self, project):
@@ -87,13 +86,15 @@ class ProjectManager:
try:
used_disk_space = psutil.disk_usage(project.path).percent
except FileNotFoundError:
- log.warning('Could not find "{}" when checking for used disk space'.format(project.path))
+ log.warning(f"Could not find '{project.path}' when checking for used disk space")
return
# send a warning if used disk space is >= 90%
if used_disk_space >= 90:
- message = 'Only {:.2f}% or less of free disk space detected in "{}" on "{}"'.format(100 - used_disk_space,
- project.path,
- platform.node())
+ message = 'Only {:.2f}% or less of free disk space detected in "{}" on "{}"'.format(
+ 100 - used_disk_space,
+ project.path,
+ platform.node()
+ )
log.warning(message)
project.emit("log.warning", {"message": message})
@@ -119,7 +120,7 @@ class ProjectManager:
"""
if project_id not in self._projects:
- raise ComputeNotFoundError("Project ID {} doesn't exist".format(project_id))
+ raise ComputeNotFoundError(f"Project ID {project_id} doesn't exist")
del self._projects[project_id]
def check_hardware_virtualization(self, source_node):
diff --git a/gns3server/compute/qemu/__init__.py b/gns3server/compute/qemu/__init__.py
index bca490cc..fea96961 100644
--- a/gns3server/compute/qemu/__init__.py
+++ b/gns3server/compute/qemu/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -149,7 +148,7 @@ class Qemu(BaseManager):
qemus = []
for path in Qemu.paths_list():
- log.debug("Searching for Qemu binaries in '{}'".format(path))
+ log.debug(f"Searching for Qemu binaries in '{path}'")
try:
for f in os.listdir(path):
if f.endswith("-spice"):
@@ -159,7 +158,7 @@ class Qemu(BaseManager):
os.path.isfile(os.path.join(path, f)):
if archs is not None:
for arch in archs:
- if f.endswith(arch) or f.endswith("{}.exe".format(arch)) or f.endswith("{}w.exe".format(arch)):
+ if f.endswith(arch) or f.endswith(f"{arch}.exe") or f.endswith(f"{arch}w.exe"):
qemu_path = os.path.join(path, f)
version = await Qemu.get_qemu_version(qemu_path)
qemus.append({"path": qemu_path, "version": version})
@@ -215,19 +214,19 @@ class Qemu(BaseManager):
if match:
return version
except (UnicodeDecodeError, OSError) as e:
- log.warning("could not read {}: {}".format(version_file, e))
+ log.warning(f"could not read {version_file}: {e}")
return ""
else:
try:
output = await subprocess_check_output(qemu_path, "-version", "-nographic")
- match = re.search("version\s+([0-9a-z\-\.]+)", output)
+ match = re.search(r"version\s+([0-9a-z\-\.]+)", output)
if match:
version = match.group(1)
return version
else:
- raise QemuError("Could not determine the Qemu version for {}".format(qemu_path))
+ raise QemuError(f"Could not determine the Qemu version for {qemu_path}")
except (OSError, subprocess.SubprocessError) as e:
- raise QemuError("Error while looking for the Qemu version: {}".format(e))
+ raise QemuError(f"Error while looking for the Qemu version: {e}")
@staticmethod
async def _get_qemu_img_version(qemu_img_path):
@@ -244,9 +243,9 @@ class Qemu(BaseManager):
version = match.group(1)
return version
else:
- raise QemuError("Could not determine the Qemu-img version for {}".format(qemu_img_path))
+ raise QemuError(f"Could not determine the Qemu-img version for {qemu_img_path}")
except (OSError, subprocess.SubprocessError) as e:
- raise QemuError("Error while looking for the Qemu-img version: {}".format(e))
+ raise QemuError(f"Error while looking for the Qemu-img version: {e}")
@staticmethod
def get_haxm_windows_version():
@@ -264,7 +263,7 @@ class Qemu(BaseManager):
for index in range(winreg.QueryInfoKey(hkey)[0]):
product_id = winreg.EnumKey(hkey, index)
try:
- product_key = winreg.OpenKey(hkey, r"{}\InstallProperties".format(product_id))
+ product_key = winreg.OpenKey(hkey, fr"{product_id}\InstallProperties")
try:
if winreg.QueryValueEx(product_key, "DisplayName")[0].endswith("Hardware Accelerated Execution Manager"):
version = winreg.QueryValueEx(product_key, "DisplayVersion")[0]
@@ -287,7 +286,7 @@ class Qemu(BaseManager):
:returns: working directory name
"""
- return os.path.join("qemu", "vm-{}".format(legacy_vm_id))
+ return os.path.join("qemu", f"vm-{legacy_vm_id}")
async def create_disk(self, qemu_img, path, options):
"""
@@ -309,21 +308,21 @@ class Qemu(BaseManager):
try:
if os.path.exists(path):
- raise QemuError("Could not create disk image '{}', file already exists".format(path))
+ raise QemuError(f"Could not create disk image '{path}', file already exists")
except UnicodeEncodeError:
raise QemuError("Could not create disk image '{}', "
"path contains characters not supported by filesystem".format(path))
command = [qemu_img, "create", "-f", img_format]
for option in sorted(options.keys()):
- command.extend(["-o", "{}={}".format(option, options[option])])
+ command.extend(["-o", f"{option}={options[option]}"])
command.append(path)
- command.append("{}M".format(img_size))
+ command.append(f"{img_size}M")
process = await asyncio.create_subprocess_exec(*command)
await process.wait()
except (OSError, subprocess.SubprocessError) as e:
- raise QemuError("Could not create disk image {}:{}".format(path, e))
+ raise QemuError(f"Could not create disk image {path}:{e}")
async def resize_disk(self, qemu_img, path, extend):
"""
@@ -341,13 +340,13 @@ class Qemu(BaseManager):
try:
if not os.path.exists(path):
- raise QemuError("Qemu disk '{}' does not exist".format(path))
- command = [qemu_img, "resize", path, "+{}M".format(extend)]
+ raise QemuError(f"Qemu disk '{path}' does not exist")
+ command = [qemu_img, "resize", path, f"+{extend}M"]
process = await asyncio.create_subprocess_exec(*command)
await process.wait()
- log.info("Qemu disk '{}' extended by {} MB".format(path, extend))
+ log.info(f"Qemu disk '{path}' extended by {extend} MB")
except (OSError, subprocess.SubprocessError) as e:
- raise QemuError("Could not update disk image {}:{}".format(path, e))
+ raise QemuError(f"Could not update disk image {path}:{e}")
def _init_config_disk(self):
"""
@@ -357,12 +356,12 @@ class Qemu(BaseManager):
try:
self.get_abs_image_path(self.config_disk)
except (NodeError, ImageMissingError):
- config_disk_zip = get_resource("compute/qemu/resources/{}.zip".format(self.config_disk))
+ config_disk_zip = get_resource(f"compute/qemu/resources/{self.config_disk}.zip")
if config_disk_zip and os.path.exists(config_disk_zip):
directory = self.get_images_directory()
try:
unpack_zip(config_disk_zip, directory)
except OSError as e:
- log.warning("Config disk creation: {}".format(e))
+ log.warning(f"Config disk creation: {e}")
else:
- log.warning("Config disk: image '{}' missing".format(self.config_disk))
+ log.warning(f"Config disk: image '{self.config_disk}' missing")
diff --git a/gns3server/compute/qemu/qemu_error.py b/gns3server/compute/qemu/qemu_error.py
index afabc921..90659215 100644
--- a/gns3server/compute/qemu/qemu_error.py
+++ b/gns3server/compute/qemu/qemu_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/qemu/qemu_vm.py b/gns3server/compute/qemu/qemu_vm.py
index 567221c2..ee81cb07 100644
--- a/gns3server/compute/qemu/qemu_vm.py
+++ b/gns3server/compute/qemu/qemu_vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -142,10 +141,10 @@ class QemuVM(BaseNode):
try:
self.config_disk_image = self.manager.get_abs_image_path(self.config_disk_name)
except (NodeError, ImageMissingError):
- log.warning("Config disk: image '{}' missing".format(self.config_disk_name))
+ log.warning(f"Config disk: image '{self.config_disk_name}' missing")
self.config_disk_name = ""
- log.info('QEMU VM "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has been created')
@property
def guest_cid(self):
@@ -200,7 +199,7 @@ class QemuVM(BaseNode):
qemu_path += "w.exe"
new_qemu_path = shutil.which(qemu_path, path=os.pathsep.join(self._manager.paths_list()))
if new_qemu_path is None:
- raise QemuError("QEMU binary path {} is not found in the path".format(qemu_path))
+ raise QemuError(f"QEMU binary path {qemu_path} is not found in the path")
qemu_path = new_qemu_path
self._check_qemu_path(qemu_path)
@@ -220,7 +219,7 @@ class QemuVM(BaseNode):
try:
QemuPlatform(self._platform.split(".")[0])
except ValueError:
- raise QemuError("Platform {} is unknown".format(self._platform))
+ raise QemuError(f"Platform {self._platform} is unknown")
log.info('QEMU VM "{name}" [{id}] has set the QEMU path to {qemu_path}'.format(name=self._name,
id=self._id,
qemu_path=qemu_path))
@@ -230,9 +229,9 @@ class QemuVM(BaseNode):
if qemu_path is None:
raise QemuError("QEMU binary path is not set")
if not os.path.exists(qemu_path):
- raise QemuError("QEMU binary '{}' is not accessible".format(qemu_path))
+ raise QemuError(f"QEMU binary '{qemu_path}' is not accessible")
if not os.access(qemu_path, os.X_OK):
- raise QemuError("QEMU binary '{}' is not executable".format(qemu_path))
+ raise QemuError(f"QEMU binary '{qemu_path}' is not executable")
@property
def platform(self):
@@ -246,9 +245,9 @@ class QemuVM(BaseNode):
self._platform = platform
if sys.platform.startswith("win"):
- self.qemu_path = "qemu-system-{}w.exe".format(platform)
+ self.qemu_path = f"qemu-system-{platform}w.exe"
else:
- self.qemu_path = "qemu-system-{}".format(platform)
+ self.qemu_path = f"qemu-system-{platform}"
def _disk_setter(self, variable, value):
"""
@@ -262,7 +261,7 @@ class QemuVM(BaseNode):
if not self.linked_clone:
for node in self.manager.nodes:
if node != self and getattr(node, variable) == value:
- raise QemuError("Sorry a node without the linked base setting enabled can only be used once on your server. {} is already used by {}".format(value, node.name))
+ raise QemuError(f"Sorry a node without the linked base setting enabled can only be used once on your server. {value} is already used by {node.name}")
setattr(self, "_" + variable, value)
log.info('QEMU VM "{name}" [{id}] has set the QEMU {variable} path to {disk_image}'.format(name=self._name,
variable=variable,
@@ -488,13 +487,13 @@ class QemuVM(BaseNode):
if self._cdrom_image:
self._cdrom_option() # this will check the cdrom image is accessible
await self._control_vm("eject -f ide1-cd0")
- await self._control_vm("change ide1-cd0 {}".format(self._cdrom_image))
+ await self._control_vm(f"change ide1-cd0 {self._cdrom_image}")
log.info('QEMU VM "{name}" [{id}] has changed the cdrom image path to {cdrom_image}'.format(name=self._name,
id=self._id,
cdrom_image=self._cdrom_image))
else:
await self._control_vm("eject -f ide1-cd0")
- log.info('QEMU VM "{name}" [{id}] has ejected the cdrom image'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has ejected the cdrom image')
@property
def bios_image(self):
@@ -619,7 +618,7 @@ class QemuVM(BaseNode):
if not mac_address:
# use the node UUID to generate a random MAC address
- self._mac_address = "0c:%s:%s:%s:%s:00" % (self.project.id[-4:-2], self.project.id[-2:], self.id[-4:-2], self.id[-2:])
+ self._mac_address = f"0c:{self.project.id[-4:-2]}:{self.project.id[-2:]}:{self.id[-4:-2]}:{self.id[-2:]}:00"
else:
self._mac_address = mac_address
@@ -646,9 +645,9 @@ class QemuVM(BaseNode):
"""
if legacy_networking:
- log.info('QEMU VM "{name}" [{id}] has enabled legacy networking'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has enabled legacy networking')
else:
- log.info('QEMU VM "{name}" [{id}] has disabled legacy networking'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has disabled legacy networking')
self._legacy_networking = legacy_networking
@property
@@ -670,9 +669,9 @@ class QemuVM(BaseNode):
"""
if replicate_network_connection_state:
- log.info('QEMU VM "{name}" [{id}] has enabled network connection state replication'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has enabled network connection state replication')
else:
- log.info('QEMU VM "{name}" [{id}] has disabled network connection state replication'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has disabled network connection state replication')
self._replicate_network_connection_state = replicate_network_connection_state
@property
@@ -694,9 +693,9 @@ class QemuVM(BaseNode):
"""
if create_config_disk:
- log.info('QEMU VM "{name}" [{id}] has enabled the config disk creation feature'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has enabled the config disk creation feature')
else:
- log.info('QEMU VM "{name}" [{id}] has disabled the config disk creation feature'.format(name=self._name, id=self._id))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has disabled the config disk creation feature')
self._create_config_disk = create_config_disk
@property
@@ -717,7 +716,7 @@ class QemuVM(BaseNode):
:param on_close: string
"""
- log.info('QEMU VM "{name}" [{id}] set the close action to "{action}"'.format(name=self._name, id=self._id, action=on_close))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] set the close action to "{on_close}"')
self._on_close = on_close
@property
@@ -787,7 +786,7 @@ class QemuVM(BaseNode):
:param ram: RAM amount in MB
"""
- log.info('QEMU VM "{name}" [{id}] has set the RAM to {ram}'.format(name=self._name, id=self._id, ram=ram))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has set the RAM to {ram}')
self._ram = ram
@property
@@ -808,7 +807,7 @@ class QemuVM(BaseNode):
:param cpus: number of vCPUs.
"""
- log.info('QEMU VM "{name}" [{id}] has set the number of vCPUs to {cpus}'.format(name=self._name, id=self._id, cpus=cpus))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has set the number of vCPUs to {cpus}')
self._cpus = cpus
@property
@@ -829,7 +828,7 @@ class QemuVM(BaseNode):
:param maxcpus: maximum number of hotpluggable vCPUs
"""
- log.info('QEMU VM "{name}" [{id}] has set maximum number of hotpluggable vCPUs to {maxcpus}'.format(name=self._name, id=self._id, maxcpus=maxcpus))
+ log.info(f'QEMU VM "{self._name}" [{self._id}] has set maximum number of hotpluggable vCPUs to {maxcpus}')
self._maxcpus = maxcpus
@property
@@ -958,9 +957,9 @@ class QemuVM(BaseNode):
import win32con
import win32process
except ImportError:
- log.error("pywin32 must be installed to change the priority class for QEMU VM {}".format(self._name))
+ log.error(f"pywin32 must be installed to change the priority class for QEMU VM {self._name}")
else:
- log.info("Setting QEMU VM {} priority class to {}".format(self._name, self._process_priority))
+ log.info(f"Setting QEMU VM {self._name} priority class to {self._process_priority}")
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, 0, self._process.pid)
if self._process_priority == "realtime":
priority = win32process.REALTIME_PRIORITY_CLASS
@@ -977,7 +976,7 @@ class QemuVM(BaseNode):
try:
win32process.SetPriorityClass(handle, priority)
except win32process.error as e:
- log.error('Could not change process priority for QEMU VM "{}": {}'.format(self._name, e))
+ log.error(f'Could not change process priority for QEMU VM "{self._name}": {e}')
else:
if self._process_priority == "realtime":
priority = -20
@@ -995,7 +994,7 @@ class QemuVM(BaseNode):
process = await asyncio.create_subprocess_exec('renice', '-n', str(priority), '-p', str(self._process.pid))
await process.wait()
except (OSError, subprocess.SubprocessError) as e:
- log.error('Could not change process priority for QEMU VM "{}": {}'.format(self._name, e))
+ log.error(f'Could not change process priority for QEMU VM "{self._name}": {e}')
def _stop_cpulimit(self):
"""
@@ -1007,7 +1006,7 @@ class QemuVM(BaseNode):
try:
self._process.wait(3)
except subprocess.TimeoutExpired:
- log.error("Could not kill cpulimit process {}".format(self._cpulimit_process.pid))
+ log.error(f"Could not kill cpulimit process {self._cpulimit_process.pid}")
def _set_cpu_throttling(self):
"""
@@ -1022,12 +1021,12 @@ class QemuVM(BaseNode):
cpulimit_exec = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "cpulimit", "cpulimit.exe")
else:
cpulimit_exec = "cpulimit"
- subprocess.Popen([cpulimit_exec, "--lazy", "--pid={}".format(self._process.pid), "--limit={}".format(self._cpu_throttling)], cwd=self.working_dir)
- log.info("CPU throttled to {}%".format(self._cpu_throttling))
+ subprocess.Popen([cpulimit_exec, "--lazy", f"--pid={self._process.pid}", f"--limit={self._cpu_throttling}"], cwd=self.working_dir)
+ log.info(f"CPU throttled to {self._cpu_throttling}%")
except FileNotFoundError:
raise QemuError("cpulimit could not be found, please install it or deactivate CPU throttling")
except (OSError, subprocess.SubprocessError) as e:
- raise QemuError("Could not throttle CPU: {}".format(e))
+ raise QemuError(f"Could not throttle CPU: {e}")
async def create(self):
"""
@@ -1042,7 +1041,7 @@ class QemuVM(BaseNode):
await cancellable_wait_run_in_executor(md5sum, self._hdc_disk_image)
await cancellable_wait_run_in_executor(md5sum, self._hdd_disk_image)
- super(QemuVM, self).create()
+ super().create()
async def start(self):
"""
@@ -1059,7 +1058,7 @@ class QemuVM(BaseNode):
try:
info = socket.getaddrinfo(self._monitor_host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
if not info:
- raise QemuError("getaddrinfo returns an empty list on {}".format(self._monitor_host))
+ raise QemuError(f"getaddrinfo returns an empty list on {self._monitor_host}")
for res in info:
af, socktype, proto, _, sa = res
# let the OS find an unused port for the Qemu monitor
@@ -1068,7 +1067,7 @@ class QemuVM(BaseNode):
sock.bind(sa)
self._monitor = sock.getsockname()[1]
except OSError as e:
- raise QemuError("Could not find free port for the Qemu monitor: {}".format(e))
+ raise QemuError(f"Could not find free port for the Qemu monitor: {e}")
# check if there is enough RAM to run
self.check_available_ram(self.ram)
@@ -1076,24 +1075,24 @@ class QemuVM(BaseNode):
command = await self._build_command()
command_string = " ".join(shlex_quote(s) for s in command)
try:
- log.info("Starting QEMU with: {}".format(command_string))
+ log.info(f"Starting QEMU with: {command_string}")
self._stdout_file = os.path.join(self.working_dir, "qemu.log")
- log.info("logging to {}".format(self._stdout_file))
+ log.info(f"logging to {self._stdout_file}")
with open(self._stdout_file, "w", encoding="utf-8") as fd:
- fd.write("Start QEMU with {}\n\nExecution log:\n".format(command_string))
+ fd.write(f"Start QEMU with {command_string}\n\nExecution log:\n")
self.command_line = ' '.join(command)
self._process = await asyncio.create_subprocess_exec(*command,
stdout=fd,
stderr=subprocess.STDOUT,
cwd=self.working_dir)
- log.info('QEMU VM "{}" started PID={}'.format(self._name, self._process.pid))
+ log.info(f'QEMU VM "{self._name}" started PID={self._process.pid}')
self._command_line_changed = False
self.status = "started"
monitor_process(self._process, self._termination_callback)
except (OSError, subprocess.SubprocessError, UnicodeEncodeError) as e:
stdout = self.read_stdout()
- log.error("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout))
- raise QemuError("Could not start QEMU {}: {}\n{}".format(self.qemu_path, e, stdout))
+ log.error(f"Could not start QEMU {self.qemu_path}: {e}\n{stdout}")
+ raise QemuError(f"Could not start QEMU {self.qemu_path}: {e}\n{stdout}")
await self._set_process_priority()
if self._cpu_throttling:
@@ -1107,13 +1106,13 @@ class QemuVM(BaseNode):
for adapter_number, adapter in enumerate(self._ethernet_adapters):
nio = adapter.get_nio(0)
if nio:
- await self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
+ await self.add_ubridge_udp_connection(f"QEMU-{self._id}-{adapter_number}",
self._local_udp_tunnels[adapter_number][1],
nio)
if nio.suspend and self._replicate_network_connection_state:
- set_link_commands.append("set_link gns3-{} off".format(adapter_number))
+ set_link_commands.append(f"set_link gns3-{adapter_number} off")
elif self._replicate_network_connection_state:
- set_link_commands.append("set_link gns3-{} off".format(adapter_number))
+ set_link_commands.append(f"set_link gns3-{adapter_number} off")
if "-loadvm" not in command_string and self._replicate_network_connection_state:
# only set the link statuses if not restoring a previous VM state
@@ -1123,7 +1122,7 @@ class QemuVM(BaseNode):
if self.is_running():
await self.start_wrap_console()
except OSError as e:
- raise QemuError("Could not start Telnet QEMU console {}\n".format(e))
+ raise QemuError(f"Could not start Telnet QEMU console {e}\n")
async def _termination_callback(self, returncode):
"""
@@ -1137,7 +1136,7 @@ class QemuVM(BaseNode):
await self.stop()
# A return code of 1 seem fine on Windows
if returncode != 0 and (not sys.platform.startswith("win") or returncode != 1):
- self.project.emit("log.error", {"message": "QEMU process has stopped, return code: {}\n{}".format(returncode, self.read_stdout())})
+ self.project.emit("log.error", {"message": f"QEMU process has stopped, return code: {returncode}\n{self.read_stdout()}"})
async def stop(self):
"""
@@ -1149,7 +1148,7 @@ class QemuVM(BaseNode):
# stop the QEMU process
self._hw_virtualization = False
if self.is_running():
- log.info('Stopping QEMU VM "{}" PID={}'.format(self._name, self._process.pid))
+ log.info(f'Stopping QEMU VM "{self._name}" PID={self._process.pid}')
try:
if self.on_close == "save_vm_state":
@@ -1178,7 +1177,7 @@ class QemuVM(BaseNode):
except ProcessLookupError:
pass
if self._process.returncode is None:
- log.warning('QEMU VM "{}" PID={} is still running'.format(self._name, self._process.pid))
+ log.warning(f'QEMU VM "{self._name}" PID={self._process.pid} is still running')
self._process = None
self._stop_cpulimit()
if self.on_close != "save_vm_state":
@@ -1201,7 +1200,7 @@ class QemuVM(BaseNode):
while time.time() - begin < timeout:
await asyncio.sleep(0.01)
try:
- log.debug("Connecting to Qemu monitor on {}:{}".format(self._monitor_host, self._monitor))
+ log.debug(f"Connecting to Qemu monitor on {self._monitor_host}:{self._monitor}")
reader, writer = await asyncio.open_connection(self._monitor_host, self._monitor)
except (asyncio.TimeoutError, OSError) as e:
last_exception = e
@@ -1213,7 +1212,7 @@ class QemuVM(BaseNode):
log.warning("Could not connect to QEMU monitor on {}:{}: {}".format(self._monitor_host, self._monitor,
last_exception))
else:
- log.info("Connected to QEMU monitor on {}:{} after {:.4f} seconds".format(self._monitor_host, self._monitor, time.time() - begin))
+ log.info(f"Connected to QEMU monitor on {self._monitor_host}:{self._monitor} after {time.time() - begin:.4f} seconds")
return reader, writer
async def _control_vm(self, command, expected=None):
@@ -1228,7 +1227,7 @@ class QemuVM(BaseNode):
result = None
if self.is_running() and self._monitor:
- log.info("Execute QEMU monitor command: {}".format(command))
+ log.info(f"Execute QEMU monitor command: {command}")
reader, writer = await self._open_qemu_monitor_connection_vm()
if reader is None and writer is None:
return result
@@ -1242,9 +1241,9 @@ class QemuVM(BaseNode):
if not line or cmd_byte in line:
break
except asyncio.TimeoutError:
- log.warning("Missing echo of command '{}'".format(command))
+ log.warning(f"Missing echo of command '{command}'")
except OSError as e:
- log.warning("Could not write to QEMU monitor: {}".format(e))
+ log.warning(f"Could not write to QEMU monitor: {e}")
writer.close()
return result
if expected:
@@ -1258,9 +1257,9 @@ class QemuVM(BaseNode):
result = line.decode("utf-8").strip()
break
except asyncio.TimeoutError:
- log.warning("Timeout while waiting for result of command '{}'".format(command))
+ log.warning(f"Timeout while waiting for result of command '{command}'")
except (ConnectionError, EOFError) as e:
- log.warning("Could not read from QEMU monitor: {}".format(e))
+ log.warning(f"Could not read from QEMU monitor: {e}")
writer.close()
return result
@@ -1278,7 +1277,7 @@ class QemuVM(BaseNode):
return
for command in commands:
- log.info("Execute QEMU monitor command: {}".format(command))
+ log.info(f"Execute QEMU monitor command: {command}")
try:
cmd_byte = command.encode('ascii')
writer.write(cmd_byte + b"\n")
@@ -1287,9 +1286,9 @@ class QemuVM(BaseNode):
if not line or cmd_byte in line:
break
except asyncio.TimeoutError:
- log.warning("Missing echo of command '{}'".format(command))
+ log.warning(f"Missing echo of command '{command}'")
except OSError as e:
- log.warning("Could not write to QEMU monitor: {}".format(e))
+ log.warning(f"Could not write to QEMU monitor: {e}")
writer.close()
async def close(self):
@@ -1357,7 +1356,7 @@ class QemuVM(BaseNode):
self.status = "suspended"
log.debug("QEMU VM has been suspended")
else:
- log.info("QEMU VM is not running to be suspended, current status is {}".format(vm_status))
+ log.info(f"QEMU VM is not running to be suspended, current status is {vm_status}")
async def reload(self):
"""
@@ -1384,7 +1383,7 @@ class QemuVM(BaseNode):
self.status = "started"
log.debug("QEMU VM has been resumed")
else:
- log.info("QEMU VM is not paused to be resumed, current status is {}".format(vm_status))
+ log.info(f"QEMU VM is not paused to be resumed, current status is {vm_status}")
async def adapter_add_nio_binding(self, adapter_number, nio):
"""
@@ -1402,11 +1401,11 @@ class QemuVM(BaseNode):
if self.is_running():
try:
- await self.add_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
+ await self.add_ubridge_udp_connection(f"QEMU-{self._id}-{adapter_number}",
self._local_udp_tunnels[adapter_number][1],
nio)
if self._replicate_network_connection_state:
- await self._control_vm("set_link gns3-{} on".format(adapter_number))
+ await self._control_vm(f"set_link gns3-{adapter_number} on")
except (IndexError, KeyError):
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
@@ -1427,14 +1426,14 @@ class QemuVM(BaseNode):
if self.is_running():
try:
- await self.update_ubridge_udp_connection("QEMU-{}-{}".format(self._id, adapter_number),
+ await self.update_ubridge_udp_connection(f"QEMU-{self._id}-{adapter_number}",
self._local_udp_tunnels[adapter_number][1],
nio)
if self._replicate_network_connection_state:
if nio.suspend:
- await self._control_vm("set_link gns3-{} off".format(adapter_number))
+ await self._control_vm(f"set_link gns3-{adapter_number} off")
else:
- await self._control_vm("set_link gns3-{} on".format(adapter_number))
+ await self._control_vm(f"set_link gns3-{adapter_number} on")
except IndexError:
raise QemuError('Adapter {adapter_number} does not exist on QEMU VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
@@ -1457,8 +1456,8 @@ class QemuVM(BaseNode):
await self.stop_capture(adapter_number)
if self.is_running():
if self._replicate_network_connection_state:
- await self._control_vm("set_link gns3-{} off".format(adapter_number))
- await self._ubridge_send("bridge delete {name}".format(name="QEMU-{}-{}".format(self._id, adapter_number)))
+ await self._control_vm(f"set_link gns3-{adapter_number} off")
+ await self._ubridge_send("bridge delete {name}".format(name=f"QEMU-{self._id}-{adapter_number}"))
nio = adapter.get_nio(0)
if isinstance(nio, NIOUDP):
@@ -1489,7 +1488,7 @@ class QemuVM(BaseNode):
nio = adapter.get_nio(0)
if not nio:
- raise QemuError("Adapter {} is not connected".format(adapter_number))
+ raise QemuError(f"Adapter {adapter_number} is not connected")
return nio
@@ -1503,11 +1502,11 @@ class QemuVM(BaseNode):
nio = self.get_nio(adapter_number)
if nio.capturing:
- raise QemuError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
+ raise QemuError(f"Packet capture is already activated on adapter {adapter_number}")
nio.start_packet_capture(output_file)
if self.ubridge:
- await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="QEMU-{}-{}".format(self._id, adapter_number),
+ await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=f"QEMU-{self._id}-{adapter_number}",
output_file=output_file))
log.info("QEMU VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name,
@@ -1527,7 +1526,7 @@ class QemuVM(BaseNode):
nio.stop_packet_capture()
if self.ubridge:
- await self._ubridge_send('bridge stop_capture {name}'.format(name="QEMU-{}-{}".format(self._id, adapter_number)))
+ await self._ubridge_send('bridge stop_capture {name}'.format(name=f"QEMU-{self._id}-{adapter_number}"))
log.info("QEMU VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name,
id=self.id,
@@ -1555,7 +1554,7 @@ class QemuVM(BaseNode):
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
- log.warning("Could not read {}: {}".format(self._stdout_file, e))
+ log.warning(f"Could not read {self._stdout_file}: {e}")
return output
def read_qemu_img_stdout(self):
@@ -1569,7 +1568,7 @@ class QemuVM(BaseNode):
with open(self._qemu_img_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
- log.warning("Could not read {}: {}".format(self._qemu_img_stdout_file, e))
+ log.warning(f"Could not read {self._qemu_img_stdout_file}: {e}")
return output
def is_running(self):
@@ -1604,14 +1603,14 @@ class QemuVM(BaseNode):
"""
if self.is_running() and self.console_type != new_console_type:
- raise QemuError('"{name}" must be stopped to change the console type to {new_console_type}'.format(name=self._name, new_console_type=new_console_type))
+ raise QemuError(f'"{self._name}" must be stopped to change the console type to {new_console_type}')
super(QemuVM, QemuVM).console_type.__set__(self, new_console_type)
def _serial_options(self, internal_console_port, external_console_port):
if external_console_port:
- return ["-serial", "telnet:127.0.0.1:{},server,nowait".format(internal_console_port)]
+ return ["-serial", f"telnet:127.0.0.1:{internal_console_port},server,nowait"]
else:
return []
@@ -1619,7 +1618,7 @@ class QemuVM(BaseNode):
if port:
vnc_port = port - 5900 # subtract by 5900 to get the display number
- return ["-vnc", "{}:{}".format(self._manager.port_manager.console_host, vnc_port)]
+ return ["-vnc", f"{self._manager.port_manager.console_host}:{vnc_port}"]
else:
return []
@@ -1636,7 +1635,7 @@ class QemuVM(BaseNode):
else:
raise QemuError("IPv6 must be enabled in order to use the SPICE console")
return ["-spice",
- "addr={},port={},disable-ticketing".format(console_host, port),
+ f"addr={console_host},port={port},disable-ticketing",
"-vga", "qxl"]
else:
return []
@@ -1667,12 +1666,12 @@ class QemuVM(BaseNode):
elif self._console_type == "spice+agent":
return self._spice_with_agent_options(self.console)
elif self._console_type != "none":
- raise QemuError("Console type {} is unknown".format(self._console_type))
+ raise QemuError(f"Console type {self._console_type} is unknown")
def _aux_options(self):
if self._aux_type != "none" and self._aux_type == self._console_type:
- raise QemuError("Auxiliary console type {} cannot be the same as console type".format(self._aux_type))
+ raise QemuError(f"Auxiliary console type {self._aux_type} cannot be the same as console type")
if self._aux_type == "telnet" and self._wrap_aux:
return self._serial_options(self._internal_aux_port, self.aux)
@@ -1683,13 +1682,13 @@ class QemuVM(BaseNode):
elif self._aux_type == "spice+agent":
return self._spice_with_agent_options(self.aux)
elif self._aux_type != "none":
- raise QemuError("Auxiliary console type {} is unknown".format(self._aux_type))
+ raise QemuError(f"Auxiliary console type {self._aux_type} is unknown")
return []
def _monitor_options(self):
if self._monitor:
- return ["-monitor", "tcp:{}:{},server,nowait".format(self._monitor_host, self._monitor)]
+ return ["-monitor", f"tcp:{self._monitor_host}:{self._monitor},server,nowait"]
else:
return []
@@ -1707,29 +1706,29 @@ class QemuVM(BaseNode):
if f.startswith("qemu-img"):
qemu_img_path = os.path.join(qemu_path_dir, f)
except OSError as e:
- raise QemuError("Error while looking for qemu-img in {}: {}".format(qemu_path_dir, e))
+ raise QemuError(f"Error while looking for qemu-img in {qemu_path_dir}: {e}")
if not qemu_img_path:
- raise QemuError("Could not find qemu-img in {}".format(qemu_path_dir))
+ raise QemuError(f"Could not find qemu-img in {qemu_path_dir}")
return qemu_img_path
async def _qemu_img_exec(self, command):
self._qemu_img_stdout_file = os.path.join(self.working_dir, "qemu-img.log")
- log.info("logging to {}".format(self._qemu_img_stdout_file))
+ log.info(f"logging to {self._qemu_img_stdout_file}")
command_string = " ".join(shlex_quote(s) for s in command)
- log.info("Executing qemu-img with: {}".format(command_string))
+ log.info(f"Executing qemu-img with: {command_string}")
with open(self._qemu_img_stdout_file, "w", encoding="utf-8") as fd:
process = await asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir)
retcode = await process.wait()
- log.info("{} returned with {}".format(self._get_qemu_img(), retcode))
+ log.info(f"{self._get_qemu_img()} returned with {retcode}")
return retcode
async def _create_linked_clone(self, disk_name, disk_image, disk):
try:
qemu_img_path = self._get_qemu_img()
- command = [qemu_img_path, "create", "-o", "backing_file={}".format(disk_image), "-f", "qcow2", disk]
+ command = [qemu_img_path, "create", "-o", f"backing_file={disk_image}", "-f", "qcow2", disk]
retcode = await self._qemu_img_exec(command)
if retcode:
stdout = self.read_qemu_img_stdout()
@@ -1738,7 +1737,7 @@ class QemuVM(BaseNode):
stdout))
except (OSError, subprocess.SubprocessError) as e:
stdout = self.read_qemu_img_stdout()
- raise QemuError("Could not create '{}' disk image: {}\n{}".format(disk_name, e, stdout))
+ raise QemuError(f"Could not create '{disk_name}' disk image: {e}\n{stdout}")
async def _mcopy(self, image, *args):
try:
@@ -1747,11 +1746,11 @@ class QemuVM(BaseNode):
mbr = img_file.read(512)
part_type, offset, signature = struct.unpack("<450xB3xL52xH", mbr)
if signature != 0xAA55:
- raise OSError("mcopy failure: {}: invalid MBR".format(image))
+ raise OSError(f"mcopy failure: {image}: invalid MBR")
if part_type not in (1, 4, 6, 11, 12, 14):
raise OSError("mcopy failure: {}: invalid partition type {:02X}"
.format(image, part_type))
- part_image = image + "@@{}S".format(offset)
+ part_image = image + f"@@{offset}S"
process = await asyncio.create_subprocess_exec(
"mcopy", "-i", part_image, *args,
@@ -1761,13 +1760,13 @@ class QemuVM(BaseNode):
(stdout, _) = await process.communicate()
retcode = process.returncode
except (OSError, subprocess.SubprocessError) as e:
- raise OSError("mcopy failure: {}".format(e))
+ raise OSError(f"mcopy failure: {e}")
if retcode != 0:
stdout = stdout.decode("utf-8").rstrip()
if stdout:
- raise OSError("mcopy failure: {}".format(stdout))
+ raise OSError(f"mcopy failure: {stdout}")
else:
- raise OSError("mcopy failure: return code {}".format(retcode))
+ raise OSError(f"mcopy failure: return code {retcode}")
async def _export_config(self):
disk_name = getattr(self, "config_disk_name")
@@ -1785,8 +1784,8 @@ class QemuVM(BaseNode):
os.remove(zip_file)
pack_zip(zip_file, config_dir)
except OSError as e:
- log.warning("Can't export config: {}".format(e))
- self.project.emit("log.warning", {"message": "{}: Can't export config: {}".format(self._name, e)})
+ log.warning(f"Can't export config: {e}")
+ self.project.emit("log.warning", {"message": f"{self._name}: Can't export config: {e}"})
shutil.rmtree(config_dir, ignore_errors=True)
async def _import_config(self):
@@ -1807,8 +1806,8 @@ class QemuVM(BaseNode):
await self._mcopy(disk_tmp, "-s", "-m", "-o", "--", *config_files, "::/")
os.replace(disk_tmp, disk)
except OSError as e:
- log.warning("Can't import config: {}".format(e))
- self.project.emit("log.warning", {"message": "{}: Can't import config: {}".format(self._name, e)})
+ log.warning(f"Can't import config: {e}")
+ self.project.emit("log.warning", {"message": f"{self._name}: Can't import config: {e}"})
if os.path.exists(disk_tmp):
os.remove(disk_tmp)
os.remove(zip_file)
@@ -1818,7 +1817,7 @@ class QemuVM(BaseNode):
options = []
extra_drive_options = ""
if format:
- extra_drive_options += ",format={}".format(format)
+ extra_drive_options += f",format={format}"
# From Qemu man page: if the filename contains comma, you must double it
# (for instance, "file=my,,file" to use file "my,file").
@@ -1826,27 +1825,27 @@ class QemuVM(BaseNode):
if interface == "sata":
# special case, sata controller doesn't exist in Qemu
- options.extend(["-device", 'ahci,id=ahci{}'.format(disk_index)])
- options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk{}'.format(disk, disk_index, disk_index, extra_drive_options)])
+ options.extend(["-device", f'ahci,id=ahci{disk_index}'])
+ options.extend(["-drive", f'file={disk},if=none,id=drive{disk_index},index={disk_index},media=disk{extra_drive_options}'])
qemu_version = await self.manager.get_qemu_version(self.qemu_path)
if qemu_version and parse_version(qemu_version) >= parse_version("4.2.0"):
# The ‘ide-drive’ device is deprecated since version 4.2.0
# https://qemu.readthedocs.io/en/latest/system/deprecated.html#ide-drive-since-4-2
- options.extend(["-device", 'ide-hd,drive=drive{},bus=ahci{}.0,id=drive{}'.format(disk_index, disk_index, disk_index)])
+ options.extend(["-device", f'ide-hd,drive=drive{disk_index},bus=ahci{disk_index}.0,id=drive{disk_index}'])
else:
- options.extend(["-device", 'ide-drive,drive=drive{},bus=ahci{}.0,id=drive{}'.format(disk_index, disk_index, disk_index)])
+ options.extend(["-device", f'ide-drive,drive=drive{disk_index},bus=ahci{disk_index}.0,id=drive{disk_index}'])
elif interface == "nvme":
- options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk{}'.format(disk, disk_index, disk_index, extra_drive_options)])
- options.extend(["-device", 'nvme,drive=drive{},serial={}'.format(disk_index, disk_index)])
+ options.extend(["-drive", f'file={disk},if=none,id=drive{disk_index},index={disk_index},media=disk{extra_drive_options}'])
+ options.extend(["-device", f'nvme,drive=drive{disk_index},serial={disk_index}'])
elif interface == "scsi":
- options.extend(["-device", 'virtio-scsi-pci,id=scsi{}'.format(disk_index)])
- options.extend(["-drive", 'file={},if=none,id=drive{},index={},media=disk{}'.format(disk, disk_index, disk_index, extra_drive_options)])
- options.extend(["-device", 'scsi-hd,drive=drive{}'.format(disk_index)])
+ options.extend(["-device", f'virtio-scsi-pci,id=scsi{disk_index}'])
+ options.extend(["-drive", f'file={disk},if=none,id=drive{disk_index},index={disk_index},media=disk{extra_drive_options}'])
+ options.extend(["-device", f'scsi-hd,drive=drive{disk_index}'])
#elif interface == "sd":
# options.extend(["-drive", 'file={},id=drive{},index={}{}'.format(disk, disk_index, disk_index, extra_drive_options)])
# options.extend(["-device", 'sd-card,drive=drive{},id=drive{}'.format(disk_index, disk_index, disk_index)])
else:
- options.extend(["-drive", 'file={},if={},index={},media=disk,id=drive{}{}'.format(disk, interface, disk_index, disk_index, extra_drive_options)])
+ options.extend(["-drive", f'file={disk},if={interface},index={disk_index},media=disk,id=drive{disk_index}{extra_drive_options}'])
return options
async def _disk_options(self):
@@ -1860,42 +1859,42 @@ class QemuVM(BaseNode):
if drive == 'd' and self._create_config_disk:
continue
- disk_image = getattr(self, "_hd{}_disk_image".format(drive))
+ disk_image = getattr(self, f"_hd{drive}_disk_image")
if not disk_image:
continue
- interface = getattr(self, "hd{}_disk_interface".format(drive))
+ interface = getattr(self, f"hd{drive}_disk_interface")
# fail-safe: use "ide" if there is a disk image and no interface type has been explicitly configured
if interface == "none":
interface = "ide"
- setattr(self, "hd{}_disk_interface".format(drive), interface)
+ setattr(self, f"hd{drive}_disk_interface", interface)
disk_name = "hd" + drive
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
if os.path.islink(disk_image):
- raise QemuError("{} disk image '{}' linked to '{}' is not accessible".format(disk_name, disk_image, os.path.realpath(disk_image)))
+ raise QemuError(f"{disk_name} disk image '{disk_image}' linked to '{os.path.realpath(disk_image)}' is not accessible")
else:
- raise QemuError("{} disk image '{}' is not accessible".format(disk_name, disk_image))
+ raise QemuError(f"{disk_name} disk image '{disk_image}' is not accessible")
else:
try:
# check for corrupt disk image
retcode = await self._qemu_img_exec([qemu_img_path, "check", disk_image])
if retcode == 3:
# image has leaked clusters, but is not corrupted, let's try to fix it
- log.warning("Qemu image {} has leaked clusters".format(disk_image))
- if (await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", "{}".format(disk_image)])) == 3:
- self.project.emit("log.warning", {"message": "Qemu image '{}' has leaked clusters and could not be fixed".format(disk_image)})
+ log.warning(f"Qemu image {disk_image} has leaked clusters")
+ if (await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", f"{disk_image}"])) == 3:
+ self.project.emit("log.warning", {"message": f"Qemu image '{disk_image}' has leaked clusters and could not be fixed"})
elif retcode == 2:
# image is corrupted, let's try to fix it
- log.warning("Qemu image {} is corrupted".format(disk_image))
- if (await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", "{}".format(disk_image)])) == 2:
- self.project.emit("log.warning", {"message": "Qemu image '{}' is corrupted and could not be fixed".format(disk_image)})
+ log.warning(f"Qemu image {disk_image} is corrupted")
+ if (await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", f"{disk_image}"])) == 2:
+ self.project.emit("log.warning", {"message": f"Qemu image '{disk_image}' is corrupted and could not be fixed"})
except (OSError, subprocess.SubprocessError) as e:
stdout = self.read_qemu_img_stdout()
- raise QemuError("Could not check '{}' disk image: {}\n{}".format(disk_name, e, stdout))
+ raise QemuError(f"Could not check '{disk_name}' disk image: {e}\n{stdout}")
if self.linked_clone:
- disk = os.path.join(self.working_dir, "{}_disk.qcow2".format(disk_name))
+ disk = os.path.join(self.working_dir, f"{disk_name}_disk.qcow2")
if not os.path.exists(disk):
# create the disk
await self._create_linked_clone(disk_name, disk_image, disk)
@@ -1905,7 +1904,7 @@ class QemuVM(BaseNode):
qcow2 = Qcow2(disk)
await qcow2.rebase(qemu_img_path, disk_image)
except (Qcow2Error, OSError) as e:
- raise QemuError("Could not use qcow2 disk image '{}' for {} {}".format(disk_image, disk_name, e))
+ raise QemuError(f"Could not use qcow2 disk image '{disk_image}' for {disk_name} {e}")
else:
disk = disk_image
@@ -1927,7 +1926,7 @@ class QemuVM(BaseNode):
shutil.copyfile(disk_image, disk)
disk_exists = True
except OSError as e:
- log.warning("Could not create '{}' disk image: {}".format(disk_name, e))
+ log.warning(f"Could not create '{disk_name}' disk image: {e}")
if disk_exists:
options.extend(await self._disk_interface_options(disk, 3, self.hdd_disk_interface, "raw"))
@@ -1936,18 +1935,18 @@ class QemuVM(BaseNode):
async def resize_disk(self, drive_name, extend):
if self.is_running():
- raise QemuError("Cannot resize {} while the VM is running".format(drive_name))
+ raise QemuError(f"Cannot resize {drive_name} while the VM is running")
if self.linked_clone:
- disk_image_path = os.path.join(self.working_dir, "{}_disk.qcow2".format(drive_name))
+ disk_image_path = os.path.join(self.working_dir, f"{drive_name}_disk.qcow2")
if not os.path.exists(disk_image_path):
- disk_image = getattr(self, "_{}_disk_image".format(drive_name))
+ disk_image = getattr(self, f"_{drive_name}_disk_image")
await self._create_linked_clone(drive_name, disk_image, disk_image_path)
else:
- disk_image_path = getattr(self, "{}_disk_image".format(drive_name))
+ disk_image_path = getattr(self, f"{drive_name}_disk_image")
if not os.path.exists(disk_image_path):
- raise QemuError("Disk path '{}' does not exist".format(disk_image_path))
+ raise QemuError(f"Disk path '{disk_image_path}' does not exist")
qemu_img_path = self._get_qemu_img()
await self.manager.resize_disk(qemu_img_path, disk_image_path, extend)
@@ -1957,9 +1956,9 @@ class QemuVM(BaseNode):
if self._cdrom_image:
if not os.path.isfile(self._cdrom_image) or not os.path.exists(self._cdrom_image):
if os.path.islink(self._cdrom_image):
- raise QemuError("cdrom image '{}' linked to '{}' is not accessible".format(self._cdrom_image, os.path.realpath(self._cdrom_image)))
+ raise QemuError(f"cdrom image '{self._cdrom_image}' linked to '{os.path.realpath(self._cdrom_image)}' is not accessible")
else:
- raise QemuError("cdrom image '{}' is not accessible".format(self._cdrom_image))
+ raise QemuError(f"cdrom image '{self._cdrom_image}' is not accessible")
if self._hdc_disk_image:
raise QemuError("You cannot use a disk image on hdc disk and a CDROM image at the same time")
options.extend(["-cdrom", self._cdrom_image.replace(",", ",,")])
@@ -1971,9 +1970,9 @@ class QemuVM(BaseNode):
if self._bios_image:
if not os.path.isfile(self._bios_image) or not os.path.exists(self._bios_image):
if os.path.islink(self._bios_image):
- raise QemuError("bios image '{}' linked to '{}' is not accessible".format(self._bios_image, os.path.realpath(self._bios_image)))
+ raise QemuError(f"bios image '{self._bios_image}' linked to '{os.path.realpath(self._bios_image)}' is not accessible")
else:
- raise QemuError("bios image '{}' is not accessible".format(self._bios_image))
+ raise QemuError(f"bios image '{self._bios_image}' is not accessible")
options.extend(["-bios", self._bios_image.replace(",", ",,")])
return options
@@ -1983,16 +1982,16 @@ class QemuVM(BaseNode):
if self._initrd:
if not os.path.isfile(self._initrd) or not os.path.exists(self._initrd):
if os.path.islink(self._initrd):
- raise QemuError("initrd file '{}' linked to '{}' is not accessible".format(self._initrd, os.path.realpath(self._initrd)))
+ raise QemuError(f"initrd file '{self._initrd}' linked to '{os.path.realpath(self._initrd)}' is not accessible")
else:
- raise QemuError("initrd file '{}' is not accessible".format(self._initrd))
+ raise QemuError(f"initrd file '{self._initrd}' is not accessible")
options.extend(["-initrd", self._initrd.replace(",", ",,")])
if self._kernel_image:
if not os.path.isfile(self._kernel_image) or not os.path.exists(self._kernel_image):
if os.path.islink(self._kernel_image):
- raise QemuError("kernel image '{}' linked to '{}' is not accessible".format(self._kernel_image, os.path.realpath(self._kernel_image)))
+ raise QemuError(f"kernel image '{self._kernel_image}' linked to '{os.path.realpath(self._kernel_image)}' is not accessible")
else:
- raise QemuError("kernel image '{}' is not accessible".format(self._kernel_image))
+ raise QemuError(f"kernel image '{self._kernel_image}' is not accessible")
options.extend(["-kernel", self._kernel_image.replace(",", ",,")])
if self._kernel_command_line:
options.extend(["-append", self._kernel_command_line])
@@ -2041,7 +2040,7 @@ class QemuVM(BaseNode):
if self._legacy_networking:
# legacy QEMU networking syntax (-net)
if nio:
- network_options.extend(["-net", "nic,vlan={},macaddr={},model={}".format(adapter_number, mac, adapter_type)])
+ network_options.extend(["-net", f"nic,vlan={adapter_number},macaddr={mac},model={adapter_type}"])
if isinstance(nio, NIOUDP):
if patched_qemu:
# use patched Qemu syntax
@@ -2059,24 +2058,24 @@ class QemuVM(BaseNode):
"127.0.0.1",
nio.lport)])
elif isinstance(nio, NIOTAP):
- network_options.extend(["-net", "tap,name=gns3-{},ifname={}".format(adapter_number, nio.tap_device)])
+ network_options.extend(["-net", f"tap,name=gns3-{adapter_number},ifname={nio.tap_device}"])
else:
- network_options.extend(["-net", "nic,vlan={},macaddr={},model={}".format(adapter_number, mac, adapter_type)])
+ network_options.extend(["-net", f"nic,vlan={adapter_number},macaddr={mac},model={adapter_type}"])
else:
# newer QEMU networking syntax
- device_string = "{},mac={}".format(adapter_type, mac)
+ device_string = f"{adapter_type},mac={mac}"
bridge_id = math.floor(pci_device_id / 32)
if bridge_id > 0:
if pci_bridges_created < bridge_id:
- network_options.extend(["-device", "i82801b11-bridge,id=dmi_pci_bridge{bridge_id}".format(bridge_id=bridge_id)])
+ network_options.extend(["-device", f"i82801b11-bridge,id=dmi_pci_bridge{bridge_id}"])
network_options.extend(["-device", "pci-bridge,id=pci-bridge{bridge_id},bus=dmi_pci_bridge{bridge_id},chassis_nr=0x1,addr=0x{bridge_id},shpc=off".format(bridge_id=bridge_id)])
pci_bridges_created += 1
addr = pci_device_id % 32
- device_string = "{},bus=pci-bridge{bridge_id},addr=0x{addr:02x}".format(device_string, bridge_id=bridge_id, addr=addr)
+ device_string = f"{device_string},bus=pci-bridge{bridge_id},addr=0x{addr:02x}"
pci_device_id += 1
if nio:
- network_options.extend(["-device", "{},netdev=gns3-{}".format(device_string, adapter_number)])
+ network_options.extend(["-device", f"{device_string},netdev=gns3-{adapter_number}"])
if isinstance(nio, NIOUDP):
network_options.extend(["-netdev", "socket,id=gns3-{},udp={}:{},localaddr={}:{}".format(adapter_number,
nio.rhost,
@@ -2084,7 +2083,7 @@ class QemuVM(BaseNode):
"127.0.0.1",
nio.lport)])
elif isinstance(nio, NIOTAP):
- network_options.extend(["-netdev", "tap,id=gns3-{},ifname={},script=no,downscript=no".format(adapter_number, nio.tap_device)])
+ network_options.extend(["-netdev", f"tap,id=gns3-{adapter_number},ifname={nio.tap_device},script=no,downscript=no"])
else:
network_options.extend(["-device", device_string])
@@ -2136,13 +2135,13 @@ class QemuVM(BaseNode):
# HAXM is only available starting with Qemu version 2.9.0
version = await self.manager.get_qemu_version(self.qemu_path)
if version and parse_version(version) < parse_version("2.9.0"):
- raise QemuError("HAXM acceleration can only be enable for Qemu version 2.9.0 and above (current version: {})".format(version))
+ raise QemuError(f"HAXM acceleration can only be enable for Qemu version 2.9.0 and above (current version: {version})")
# check if HAXM is installed
version = self.manager.get_haxm_windows_version()
if version is None:
raise QemuError("HAXM acceleration support is not installed on this host")
- log.info("HAXM support version {} detected".format(version))
+ log.info(f"HAXM support version {version} detected")
# check if the HAXM service is running
from gns3server.utils.windows_service import check_windows_service_is_running
@@ -2167,12 +2166,12 @@ class QemuVM(BaseNode):
drives = ["a", "b", "c", "d"]
qemu_img_path = self._get_qemu_img()
for disk_index, drive in enumerate(drives):
- disk_image = getattr(self, "_hd{}_disk_image".format(drive))
+ disk_image = getattr(self, f"_hd{drive}_disk_image")
if not disk_image:
continue
try:
if self.linked_clone:
- disk = os.path.join(self.working_dir, "hd{}_disk.qcow2".format(drive))
+ disk = os.path.join(self.working_dir, f"hd{drive}_disk.qcow2")
else:
disk = disk_image
if not os.path.exists(disk):
@@ -2182,7 +2181,7 @@ class QemuVM(BaseNode):
try:
json_data = json.loads(output)
except ValueError as e:
- raise QemuError("Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {}".format(e))
+ raise QemuError(f"Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {e}")
if "snapshots" in json_data:
for snapshot in json_data["snapshots"]:
if snapshot["name"] == snapshot_name:
@@ -2191,23 +2190,23 @@ class QemuVM(BaseNode):
retcode = await self._qemu_img_exec(command)
if retcode:
stdout = self.read_qemu_img_stdout()
- log.warning("Could not delete saved VM state from disk {}: {}".format(disk, stdout))
+ log.warning(f"Could not delete saved VM state from disk {disk}: {stdout}")
else:
- log.info("Deleted saved VM state from disk {}".format(disk))
+ log.info(f"Deleted saved VM state from disk {disk}")
except subprocess.SubprocessError as e:
- raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e))
+ raise QemuError(f"Error while looking for the Qemu VM saved state snapshot: {e}")
async def _saved_state_option(self, snapshot_name="GNS3_SAVED_STATE"):
drives = ["a", "b", "c", "d"]
qemu_img_path = self._get_qemu_img()
for disk_index, drive in enumerate(drives):
- disk_image = getattr(self, "_hd{}_disk_image".format(drive))
+ disk_image = getattr(self, f"_hd{drive}_disk_image")
if not disk_image:
continue
try:
if self.linked_clone:
- disk = os.path.join(self.working_dir, "hd{}_disk.qcow2".format(drive))
+ disk = os.path.join(self.working_dir, f"hd{drive}_disk.qcow2")
else:
disk = disk_image
if not os.path.exists(disk):
@@ -2217,7 +2216,7 @@ class QemuVM(BaseNode):
try:
json_data = json.loads(output)
except ValueError as e:
- raise QemuError("Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {}".format(e))
+ raise QemuError(f"Invalid JSON data returned by qemu-img while looking for the Qemu VM saved state snapshot: {e}")
if "snapshots" in json_data:
for snapshot in json_data["snapshots"]:
if snapshot["name"] == snapshot_name:
@@ -2227,7 +2226,7 @@ class QemuVM(BaseNode):
return ["-loadvm", snapshot_name.replace(",", ",,")]
except subprocess.SubprocessError as e:
- raise QemuError("Error while looking for the Qemu VM saved state snapshot: {}".format(e))
+ raise QemuError(f"Error while looking for the Qemu VM saved state snapshot: {e}")
return []
async def _build_command(self):
@@ -2248,12 +2247,12 @@ class QemuVM(BaseNode):
additional_options = additional_options.replace("%console-port%", str(self._console))
command = [self.qemu_path]
command.extend(["-name", vm_name])
- command.extend(["-m", "{}M".format(self._ram)])
+ command.extend(["-m", f"{self._ram}M"])
# set the maximum number of the hotpluggable CPUs to match the number of CPUs to avoid issues.
maxcpus = self._maxcpus
if self._cpus > maxcpus:
maxcpus = self._cpus
- command.extend(["-smp", "cpus={},maxcpus={},sockets=1".format(self._cpus, maxcpus)])
+ command.extend(["-smp", f"cpus={self._cpus},maxcpus={maxcpus},sockets=1"])
if (await self._run_with_hardware_acceleration(self.qemu_path, self._options)):
if sys.platform.startswith("linux"):
command.extend(["-enable-kvm"])
@@ -2264,28 +2263,28 @@ class QemuVM(BaseNode):
command.extend(["-machine", "smm=off"])
elif sys.platform.startswith("win") or sys.platform.startswith("darwin"):
command.extend(["-enable-hax"])
- command.extend(["-boot", "order={}".format(self._boot_priority)])
+ command.extend(["-boot", f"order={self._boot_priority}"])
command.extend(self._bios_option())
command.extend(self._cdrom_option())
- command.extend((await self._disk_options()))
+ command.extend(await self._disk_options())
command.extend(self._linux_boot_options())
if "-uuid" not in additional_options:
command.extend(["-uuid", self._id])
command.extend(self._console_options())
command.extend(self._aux_options())
command.extend(self._monitor_options())
- command.extend((await self._network_options()))
+ command.extend(await self._network_options())
if self.on_close != "save_vm_state":
await self._clear_save_vm_stated()
else:
- command.extend((await self._saved_state_option()))
+ command.extend(await self._saved_state_option())
if self._console_type == "telnet":
- command.extend((await self._disable_graphics()))
+ command.extend(await self._disable_graphics())
if additional_options:
try:
command.extend(shlex.split(additional_options))
except ValueError as e:
- raise QemuError("Invalid additional options: {} error {}".format(additional_options, e))
+ raise QemuError(f"Invalid additional options: {additional_options} error {e}")
return command
def __json__(self):
diff --git a/gns3server/compute/qemu/utils/guest_cid.py b/gns3server/compute/qemu/utils/guest_cid.py
index 22218b31..32d83ea8 100644
--- a/gns3server/compute/qemu/utils/guest_cid.py
+++ b/gns3server/compute/qemu/utils/guest_cid.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 GNS3 Technologies Inc.
#
@@ -30,7 +29,7 @@ def get_next_guest_cid(nodes):
:return: integer first free cid
"""
- used = set([n.guest_cid for n in nodes])
+ used = {n.guest_cid for n in nodes}
pool = set(range(3, 65535))
try:
return (pool - used).pop()
diff --git a/gns3server/compute/qemu/utils/qcow2.py b/gns3server/compute/qemu/utils/qcow2.py
index efbb54fe..4df55aa6 100644
--- a/gns3server/compute/qemu/utils/qcow2.py
+++ b/gns3server/compute/qemu/utils/qcow2.py
@@ -65,10 +65,10 @@ class Qcow2:
self.magic, self.version, self.backing_file_offset, self.backing_file_size = struct.unpack_from(struct_format, content)
except struct.error:
- raise Qcow2Error("Invalid file header for {}".format(self._path))
+ raise Qcow2Error(f"Invalid file header for {self._path}")
if self.magic != 1363560955: # The first 4 bytes contain the characters 'Q', 'F', 'I' followed by 0xfb.
- raise Qcow2Error("Invalid magic for {}".format(self._path))
+ raise Qcow2Error(f"Invalid magic for {self._path}")
@property
def backing_file(self):
diff --git a/gns3server/compute/traceng/__init__.py b/gns3server/compute/traceng/__init__.py
index 66d7b627..ff172156 100644
--- a/gns3server/compute/traceng/__init__.py
+++ b/gns3server/compute/traceng/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/traceng/traceng_error.py b/gns3server/compute/traceng/traceng_error.py
index 623f5f59..2a6a1323 100644
--- a/gns3server/compute/traceng/traceng_error.py
+++ b/gns3server/compute/traceng/traceng_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/traceng/traceng_vm.py b/gns3server/compute/traceng/traceng_vm.py
index f8f8c926..e8092a14 100644
--- a/gns3server/compute/traceng/traceng_vm.py
+++ b/gns3server/compute/traceng/traceng_vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 GNS3 Technologies Inc.
#
@@ -105,10 +104,10 @@ class TraceNGVM(BaseNode):
self.ubridge_path
if not os.path.isfile(path):
- raise TraceNGError("TraceNG program '{}' is not accessible".format(path))
+ raise TraceNGError(f"TraceNG program '{path}' is not accessible")
if not os.access(path, os.X_OK):
- raise TraceNGError("TraceNG program '{}' is not executable".format(path))
+ raise TraceNGError(f"TraceNG program '{path}' is not executable")
def __json__(self):
@@ -159,7 +158,7 @@ class TraceNGVM(BaseNode):
if ip_address:
ipaddress.IPv4Address(ip_address)
except ipaddress.AddressValueError:
- raise TraceNGError("Invalid IP address: {}\n".format(ip_address))
+ raise TraceNGError(f"Invalid IP address: {ip_address}\n")
self._ip_address = ip_address
log.info("{module}: {name} [{id}] set IP address to {ip_address}".format(module=self.manager.module_name,
@@ -204,7 +203,7 @@ class TraceNGVM(BaseNode):
command = self._build_command(destination)
await self._stop_ubridge() # make use we start with a fresh uBridge instance
try:
- log.info("Starting TraceNG: {}".format(command))
+ log.info(f"Starting TraceNG: {command}")
flags = 0
if hasattr(subprocess, "CREATE_NEW_CONSOLE"):
flags = subprocess.CREATE_NEW_CONSOLE
@@ -216,14 +215,14 @@ class TraceNGVM(BaseNode):
await self._start_ubridge()
if nio:
- await self.add_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio)
+ await self.add_ubridge_udp_connection(f"TraceNG-{self._id}", self._local_udp_tunnel[1], nio)
- log.info("TraceNG instance {} started PID={}".format(self.name, self._process.pid))
+ log.info(f"TraceNG instance {self.name} started PID={self._process.pid}")
self._started = True
self.status = "started"
except (OSError, subprocess.SubprocessError) as e:
- log.error("Could not start TraceNG {}: {}\n".format(self._traceng_path(), e))
- raise TraceNGError("Could not start TraceNG {}: {}\n".format(self._traceng_path(), e))
+ log.error(f"Could not start TraceNG {self._traceng_path()}: {e}\n")
+ raise TraceNGError(f"Could not start TraceNG {self._traceng_path()}: {e}\n")
def _termination_callback(self, returncode):
"""
@@ -238,7 +237,7 @@ class TraceNGVM(BaseNode):
self.status = "stopped"
self._process = None
if returncode != 0:
- self.project.emit("log.error", {"message": "TraceNG process has stopped, return code: {}\n".format(returncode)})
+ self.project.emit("log.error", {"message": f"TraceNG process has stopped, return code: {returncode}\n"})
async def stop(self):
"""
@@ -256,9 +255,9 @@ class TraceNGVM(BaseNode):
try:
self._process.kill()
except OSError as e:
- log.error("Cannot stop the TraceNG process: {}".format(e))
+ log.error(f"Cannot stop the TraceNG process: {e}")
if self._process.returncode is None:
- log.warning('TraceNG VM "{}" with PID={} is still running'.format(self._name, self._process.pid))
+ log.warning(f'TraceNG VM "{self._name}" with PID={self._process.pid} is still running')
self._process = None
self._started = False
@@ -277,7 +276,7 @@ class TraceNGVM(BaseNode):
Terminate the process if running
"""
- log.info("Stopping TraceNG instance {} PID={}".format(self.name, self._process.pid))
+ log.info(f"Stopping TraceNG instance {self.name} PID={self._process.pid}")
#if sys.platform.startswith("win32"):
# self._process.send_signal(signal.CTRL_BREAK_EVENT)
#else:
@@ -311,7 +310,7 @@ class TraceNGVM(BaseNode):
port_number=port_number))
if self.is_running():
- await self.add_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio)
+ await self.add_ubridge_udp_connection(f"TraceNG-{self._id}", self._local_udp_tunnel[1], nio)
self._ethernet_adapter.add_nio(port_number, nio)
log.info('TraceNG "{name}" [{id}]: {nio} added to port {port_number}'.format(name=self._name,
@@ -333,7 +332,7 @@ class TraceNGVM(BaseNode):
raise TraceNGError("Port {port_number} doesn't exist on adapter {adapter}".format(adapter=self._ethernet_adapter,
port_number=port_number))
if self.is_running():
- await self.update_ubridge_udp_connection("TraceNG-{}".format(self._id), self._local_udp_tunnel[1], nio)
+ await self.update_ubridge_udp_connection(f"TraceNG-{self._id}", self._local_udp_tunnel[1], nio)
async def port_remove_nio_binding(self, port_number):
"""
@@ -350,7 +349,7 @@ class TraceNGVM(BaseNode):
await self.stop_capture(port_number)
if self.is_running():
- await self._ubridge_send("bridge delete {name}".format(name="TraceNG-{}".format(self._id)))
+ await self._ubridge_send("bridge delete {name}".format(name=f"TraceNG-{self._id}"))
nio = self._ethernet_adapter.get_nio(port_number)
if isinstance(nio, NIOUDP):
@@ -377,7 +376,7 @@ class TraceNGVM(BaseNode):
port_number=port_number))
nio = self._ethernet_adapter.get_nio(port_number)
if not nio:
- raise TraceNGError("Port {} is not connected".format(port_number))
+ raise TraceNGError(f"Port {port_number} is not connected")
return nio
async def start_capture(self, port_number, output_file):
@@ -390,11 +389,11 @@ class TraceNGVM(BaseNode):
nio = self.get_nio(port_number)
if nio.capturing:
- raise TraceNGError("Packet capture is already activated on port {port_number}".format(port_number=port_number))
+ raise TraceNGError(f"Packet capture is already activated on port {port_number}")
nio.start_packet_capture(output_file)
if self.ubridge:
- await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="TraceNG-{}".format(self._id),
+ await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=f"TraceNG-{self._id}",
output_file=output_file))
log.info("TraceNG '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name,
@@ -414,7 +413,7 @@ class TraceNGVM(BaseNode):
nio.stop_packet_capture()
if self.ubridge:
- await self._ubridge_send('bridge stop_capture {name}'.format(name="TraceNG-{}".format(self._id)))
+ await self._ubridge_send('bridge stop_capture {name}'.format(name=f"TraceNG-{self._id}"))
log.info("TraceNG '{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name,
id=self.id,
@@ -450,7 +449,7 @@ class TraceNGVM(BaseNode):
try:
command.extend(["-b", socket.gethostbyname(nio.rhost)]) # destination host, we need to resolve the hostname because TraceNG doesn't support it
except socket.gaierror as e:
- raise TraceNGError("Can't resolve hostname {}: {}".format(nio.rhost, e))
+ raise TraceNGError(f"Can't resolve hostname {nio.rhost}: {e}")
command.extend(["-s", "ICMP"]) # Use ICMP probe type by default
command.extend(["-f", self._ip_address]) # source IP address to trace from
diff --git a/gns3server/compute/ubridge/hypervisor.py b/gns3server/compute/ubridge/hypervisor.py
index ba98ef33..eacef080 100644
--- a/gns3server/compute/ubridge/hypervisor.py
+++ b/gns3server/compute/ubridge/hypervisor.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -58,7 +57,7 @@ class Hypervisor(UBridgeHypervisor):
port = None
info = socket.getaddrinfo(host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
if not info:
- raise UbridgeError("getaddrinfo returns an empty list on {}".format(host))
+ raise UbridgeError(f"getaddrinfo returns an empty list on {host}")
for res in info:
af, socktype, proto, _, sa = res
# let the OS find an unused port for the uBridge hypervisor
@@ -68,7 +67,7 @@ class Hypervisor(UBridgeHypervisor):
port = sock.getsockname()[1]
break
except OSError as e:
- raise UbridgeError("Could not find free port for the uBridge hypervisor: {}".format(e))
+ raise UbridgeError(f"Could not find free port for the uBridge hypervisor: {e}")
super().__init__(host, port)
self._project = project
@@ -146,11 +145,11 @@ class Hypervisor(UBridgeHypervisor):
# to work for IOU nodes.
minimum_required_version = "0.9.14"
if parse_version(self._version) < parse_version(minimum_required_version):
- raise UbridgeError("uBridge executable version must be >= {}".format(minimum_required_version))
+ raise UbridgeError(f"uBridge executable version must be >= {minimum_required_version}")
else:
- raise UbridgeError("Could not determine uBridge version for {}".format(self._path))
+ raise UbridgeError(f"Could not determine uBridge version for {self._path}")
except (OSError, subprocess.SubprocessError) as e:
- raise UbridgeError("Error while looking for uBridge version: {}".format(e))
+ raise UbridgeError(f"Error while looking for uBridge version: {e}")
async def start(self):
"""
@@ -166,9 +165,9 @@ class Hypervisor(UBridgeHypervisor):
await self._check_ubridge_version(env)
try:
command = self._build_command()
- log.info("starting ubridge: {}".format(command))
+ log.info(f"starting ubridge: {command}")
self._stdout_file = os.path.join(self._working_dir, "ubridge.log")
- log.info("logging to {}".format(self._stdout_file))
+ log.info(f"logging to {self._stdout_file}")
with open(self._stdout_file, "w", encoding="utf-8") as fd:
self._process = await asyncio.create_subprocess_exec(*command,
stdout=fd,
@@ -176,14 +175,14 @@ class Hypervisor(UBridgeHypervisor):
cwd=self._working_dir,
env=env)
- log.info("ubridge started PID={}".format(self._process.pid))
+ log.info(f"ubridge started PID={self._process.pid}")
# recv: Bad address is received by uBridge when a docker image stops by itself
# see https://github.com/GNS3/gns3-gui/issues/2957
#monitor_process(self._process, self._termination_callback)
except (OSError, subprocess.SubprocessError) as e:
ubridge_stdout = self.read_stdout()
- log.error("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
- raise UbridgeError("Could not start ubridge: {}\n{}".format(e, ubridge_stdout))
+ log.error(f"Could not start ubridge: {e}\n{ubridge_stdout}")
+ raise UbridgeError(f"Could not start ubridge: {e}\n{ubridge_stdout}")
def _termination_callback(self, returncode):
"""
@@ -193,7 +192,7 @@ class Hypervisor(UBridgeHypervisor):
"""
if returncode != 0:
- error_msg = "uBridge process has stopped, return code: {}\n{}\n".format(returncode, self.read_stdout())
+ error_msg = f"uBridge process has stopped, return code: {returncode}\n{self.read_stdout()}\n"
log.error(error_msg)
self._project.emit("log.error", {"message": error_msg})
else:
@@ -205,13 +204,13 @@ class Hypervisor(UBridgeHypervisor):
"""
if self.is_running():
- log.info("Stopping uBridge process PID={}".format(self._process.pid))
+ log.info(f"Stopping uBridge process PID={self._process.pid}")
await UBridgeHypervisor.stop(self)
try:
await wait_for_process_termination(self._process, timeout=3)
except asyncio.TimeoutError:
if self._process and self._process.returncode is None:
- log.warning("uBridge process {} is still running... killing it".format(self._process.pid))
+ log.warning(f"uBridge process {self._process.pid} is still running... killing it")
try:
self._process.kill()
except ProcessLookupError:
@@ -221,7 +220,7 @@ class Hypervisor(UBridgeHypervisor):
try:
os.remove(self._stdout_file)
except OSError as e:
- log.warning("could not delete temporary uBridge log file: {}".format(e))
+ log.warning(f"could not delete temporary uBridge log file: {e}")
self._process = None
self._started = False
@@ -237,7 +236,7 @@ class Hypervisor(UBridgeHypervisor):
with open(self._stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
- log.warning("could not read {}: {}".format(self._stdout_file, e))
+ log.warning(f"could not read {self._stdout_file}: {e}")
return output
def is_running(self):
@@ -258,7 +257,7 @@ class Hypervisor(UBridgeHypervisor):
"""
command = [self._path]
- command.extend(["-H", "{}:{}".format(self._host, self._port)])
+ command.extend(["-H", f"{self._host}:{self._port}"])
if log.getEffectiveLevel() == logging.DEBUG:
command.extend(["-d", "1"])
return command
diff --git a/gns3server/compute/ubridge/ubridge_error.py b/gns3server/compute/ubridge/ubridge_error.py
index 9fcea58a..3b02ec7b 100644
--- a/gns3server/compute/ubridge/ubridge_error.py
+++ b/gns3server/compute/ubridge/ubridge_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/ubridge/ubridge_hypervisor.py b/gns3server/compute/ubridge/ubridge_hypervisor.py
index 1cbb6294..4e927d22 100644
--- a/gns3server/compute/ubridge/ubridge_hypervisor.py
+++ b/gns3server/compute/ubridge/ubridge_hypervisor.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -78,9 +77,9 @@ class UBridgeHypervisor:
break
if not connection_success:
- raise UbridgeError("Couldn't connect to hypervisor on {}:{} :{}".format(host, self._port, last_exception))
+ raise UbridgeError(f"Couldn't connect to hypervisor on {host}:{self._port} :{last_exception}")
else:
- log.info("Connected to uBridge hypervisor on {}:{} after {:.4f} seconds".format(host, self._port, time.time() - begin))
+ log.info(f"Connected to uBridge hypervisor on {host}:{self._port} after {time.time() - begin:.4f} seconds")
try:
await asyncio.sleep(0.1)
@@ -123,7 +122,7 @@ class UBridgeHypervisor:
await self._writer.drain()
self._writer.close()
except OSError as e:
- log.debug("Stopping hypervisor {}:{} {}".format(self._host, self._port, e))
+ log.debug(f"Stopping hypervisor {self._host}:{self._port} {e}")
self._reader = self._writer = None
async def reset(self):
@@ -201,7 +200,7 @@ class UBridgeHypervisor:
try:
command = command.strip() + '\n'
- log.debug("sending {}".format(command))
+ log.debug(f"sending {command}")
self._writer.write(command.encode())
await self._writer.drain()
except OSError as e:
@@ -225,7 +224,7 @@ class UBridgeHypervisor:
# Sometimes WinError 64 (ERROR_NETNAME_DELETED) is returned here on Windows.
# These happen if connection reset is received before IOCP could complete
# a previous operation. Ignore and try again....
- log.warning("Connection reset received while reading uBridge response: {}".format(e))
+ log.warning(f"Connection reset received while reading uBridge response: {e}")
continue
if not chunk:
if retries > max_retries:
@@ -270,5 +269,5 @@ class UBridgeHypervisor:
if self.success_re.search(data[index]):
data[index] = data[index][4:]
- log.debug("returned result {}".format(data))
+ log.debug(f"returned result {data}")
return data
diff --git a/gns3server/compute/virtualbox/__init__.py b/gns3server/compute/virtualbox/__init__.py
index 62f9131f..709e28e3 100644
--- a/gns3server/compute/virtualbox/__init__.py
+++ b/gns3server/compute/virtualbox/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -80,16 +79,16 @@ class VirtualBox(BaseManager):
vboxmanage_path = shutil.which("vboxmanage")
if vboxmanage_path and not os.path.exists(vboxmanage_path):
- log.error("VBoxManage path '{}' doesn't exist".format(vboxmanage_path))
+ log.error(f"VBoxManage path '{vboxmanage_path}' doesn't exist")
if not vboxmanage_path:
raise VirtualBoxError("Could not find VBoxManage, please reboot if VirtualBox has just been installed")
if not os.path.isfile(vboxmanage_path):
- raise VirtualBoxError("VBoxManage '{}' is not accessible".format(vboxmanage_path))
+ raise VirtualBoxError(f"VBoxManage '{vboxmanage_path}' is not accessible")
if not os.access(vboxmanage_path, os.X_OK):
raise VirtualBoxError("VBoxManage is not executable")
if os.path.basename(vboxmanage_path) not in ["VBoxManage", "VBoxManage.exe", "vboxmanage"]:
- raise VirtualBoxError("Invalid VBoxManage executable name {}".format(os.path.basename(vboxmanage_path)))
+ raise VirtualBoxError(f"Invalid VBoxManage executable name {os.path.basename(vboxmanage_path)}")
self._vboxmanage_path = vboxmanage_path
return vboxmanage_path
@@ -109,20 +108,20 @@ class VirtualBox(BaseManager):
command = [vboxmanage_path, "--nologo", subcommand]
command.extend(args)
command_string = " ".join(command)
- log.info("Executing VBoxManage with command: {}".format(command_string))
+ log.info(f"Executing VBoxManage with command: {command_string}")
try:
process = await asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
except (OSError, subprocess.SubprocessError) as e:
- raise VirtualBoxError("Could not execute VBoxManage: {}".format(e))
+ raise VirtualBoxError(f"Could not execute VBoxManage: {e}")
try:
stdout_data, stderr_data = await asyncio.wait_for(process.communicate(), timeout=timeout)
except asyncio.TimeoutError:
- raise VirtualBoxError("VBoxManage has timed out after {} seconds!".format(timeout))
+ raise VirtualBoxError(f"VBoxManage has timed out after {timeout} seconds!")
if process.returncode:
vboxmanage_error = stderr_data.decode("utf-8", errors="ignore")
- raise VirtualBoxError("VirtualBox has returned an error: {}".format(vboxmanage_error))
+ raise VirtualBoxError(f"VirtualBox has returned an error: {vboxmanage_error}")
return stdout_data.decode("utf-8", errors="ignore").splitlines()
@@ -161,11 +160,11 @@ class VirtualBox(BaseManager):
await super().project_closed(project)
hdd_files_to_close = await self._find_inaccessible_hdd_files()
for hdd_file in hdd_files_to_close:
- log.info("Closing VirtualBox VM disk file {}".format(os.path.basename(hdd_file)))
+ log.info(f"Closing VirtualBox VM disk file {os.path.basename(hdd_file)}")
try:
await self.execute("closemedium", ["disk", hdd_file])
except VirtualBoxError as e:
- log.warning("Could not close VirtualBox VM disk file {}: {}".format(os.path.basename(hdd_file), e))
+ log.warning(f"Could not close VirtualBox VM disk file {os.path.basename(hdd_file)}: {e}")
continue
async def list_vms(self, allow_clone=False):
@@ -212,4 +211,4 @@ class VirtualBox(BaseManager):
:returns: working directory name
"""
- return os.path.join("vbox", "{}".format(name))
+ return os.path.join("vbox", f"{name}")
diff --git a/gns3server/compute/virtualbox/virtualbox_error.py b/gns3server/compute/virtualbox/virtualbox_error.py
index d950f09d..91f87e38 100644
--- a/gns3server/compute/virtualbox/virtualbox_error.py
+++ b/gns3server/compute/virtualbox/virtualbox_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/virtualbox/virtualbox_vm.py b/gns3server/compute/virtualbox/virtualbox_vm.py
index 80424ef8..acd248fb 100644
--- a/gns3server/compute/virtualbox/virtualbox_vm.py
+++ b/gns3server/compute/virtualbox/virtualbox_vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -123,7 +122,7 @@ class VirtualBoxVM(BaseNode):
name, value = info.split('=', 1)
if name == "VMState":
return value.strip('"')
- raise VirtualBoxError("Could not get VM state for {}".format(self._vmname))
+ raise VirtualBoxError(f"Could not get VM state for {self._vmname}")
async def _control_vm(self, params):
"""
@@ -165,10 +164,10 @@ class VirtualBoxVM(BaseNode):
found = True
if node.project != self.project:
if trial >= 30:
- raise VirtualBoxError("Sorry a node without the linked clone setting enabled can only be used once on your server.\n{} is already used by {} in project {}".format(self.vmname, node.name, self.project.name))
+ raise VirtualBoxError(f"Sorry a node without the linked clone setting enabled can only be used once on your server.\n{self.vmname} is already used by {node.name} in project {self.project.name}")
else:
if trial >= 5:
- raise VirtualBoxError("Sorry a node without the linked clone setting enabled can only be used once on your server.\n{} is already used by {} in this project".format(self.vmname, node.name))
+ raise VirtualBoxError(f"Sorry a node without the linked clone setting enabled can only be used once on your server.\n{self.vmname} is already used by {node.name} in this project")
if not found:
return
trial += 1
@@ -179,7 +178,7 @@ class VirtualBoxVM(BaseNode):
vm_info = await self._get_vm_info()
self._uuid = vm_info.get("UUID", self._uuid)
if not self._uuid:
- raise VirtualBoxError("Could not find any UUID for VM '{}'".format(self._vmname))
+ raise VirtualBoxError(f"Could not find any UUID for VM '{self._vmname}'")
if "memory" in vm_info:
self._ram = int(vm_info["memory"])
@@ -190,10 +189,10 @@ class VirtualBoxVM(BaseNode):
await self._get_system_properties()
if "API version" not in self._system_properties:
- raise VirtualBoxError("Can't access to VirtualBox API version:\n{}".format(self._system_properties))
+ raise VirtualBoxError(f"Can't access to VirtualBox API version:\n{self._system_properties}")
if parse_version(self._system_properties["API version"]) < parse_version("4_3"):
raise VirtualBoxError("The VirtualBox API version is lower than 4.3")
- log.info("VirtualBox VM '{name}' [{id}] created".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] created")
if self.linked_clone:
if self.id and os.path.isdir(os.path.join(self.working_dir, self._vmname)):
@@ -225,7 +224,7 @@ class VirtualBoxVM(BaseNode):
raise VirtualBoxError("Cannot modify VirtualBox linked nodes file. "
"File {} is corrupted.".format(self._linked_vbox_file()))
except OSError as e:
- raise VirtualBoxError("Cannot modify VirtualBox linked nodes file '{}': {}".format(self._linked_vbox_file(), e))
+ raise VirtualBoxError(f"Cannot modify VirtualBox linked nodes file '{self._linked_vbox_file()}': {e}")
machine = tree.getroot().find("{http://www.virtualbox.org/}Machine")
if machine is not None and machine.get("uuid") != "{" + self.id + "}":
@@ -279,7 +278,7 @@ class VirtualBoxVM(BaseNode):
await self._set_network_options()
await self._set_serial_console()
else:
- raise VirtualBoxError("VirtualBox VM '{}' is not powered off (current state is '{}')".format(self.name, vm_state))
+ raise VirtualBoxError(f"VirtualBox VM '{self.name}' is not powered off (current state is '{vm_state}')")
# check if there is enough RAM to run
self.check_available_ram(self.ram)
@@ -289,8 +288,8 @@ class VirtualBoxVM(BaseNode):
args.extend(["--type", "headless"])
result = await self.manager.execute("startvm", args)
self.status = "started"
- log.info("VirtualBox VM '{name}' [{id}] started".format(name=self.name, id=self.id))
- log.debug("Start result: {}".format(result))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] started")
+ log.debug(f"Start result: {result}")
# add a guest property to let the VM know about the GNS3 name
await self.manager.execute("guestproperty", ["set", self._uuid, "NameInGNS3", self.name])
@@ -301,7 +300,7 @@ class VirtualBoxVM(BaseNode):
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
- await self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
+ await self.add_ubridge_udp_connection(f"VBOX-{self._id}-{adapter_number}",
self._local_udp_tunnels[adapter_number][1],
nio)
@@ -320,7 +319,7 @@ class VirtualBoxVM(BaseNode):
await self._stop_ubridge()
await self._stop_remote_console()
vm_state = await self._get_vm_state()
- log.info("Stopping VirtualBox VM '{name}' [{id}] (current state is {vm_state})".format(name=self.name, id=self.id, vm_state=vm_state))
+ log.info(f"Stopping VirtualBox VM '{self.name}' [{self.id}] (current state is {vm_state})")
if vm_state in ("running", "paused"):
if self.on_close == "save_vm_state":
@@ -328,7 +327,7 @@ class VirtualBoxVM(BaseNode):
await self.manager.execute("guestproperty", ["set", self._uuid, "SavedByGNS3", "yes"])
result = await self._control_vm("savestate")
self.status = "stopped"
- log.debug("Stop result: {}".format(result))
+ log.debug(f"Stop result: {result}")
elif self.on_close == "shutdown_signal":
# use ACPI to shutdown the VM
result = await self._control_vm("acpipowerbutton")
@@ -343,17 +342,17 @@ class VirtualBoxVM(BaseNode):
await self._control_vm("poweroff")
break
self.status = "stopped"
- log.debug("ACPI shutdown result: {}".format(result))
+ log.debug(f"ACPI shutdown result: {result}")
else:
# power off the VM
result = await self._control_vm("poweroff")
self.status = "stopped"
- log.debug("Stop result: {}".format(result))
+ log.debug(f"Stop result: {result}")
elif vm_state == "aborted":
self.status = "stopped"
if self.status == "stopped":
- log.info("VirtualBox VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] stopped")
await asyncio.sleep(0.5) # give some time for VirtualBox to unlock the VM
if self.on_close != "save_vm_state":
# do some cleaning when the VM is powered off
@@ -361,14 +360,14 @@ class VirtualBoxVM(BaseNode):
# deactivate the first serial port
await self._modify_vm("--uart1 off")
except VirtualBoxError as e:
- log.warning("Could not deactivate the first serial port: {}".format(e))
+ log.warning(f"Could not deactivate the first serial port: {e}")
for adapter_number in range(0, self._adapters):
nio = self._ethernet_adapters[adapter_number].get_nio(0)
if nio:
- await self._modify_vm("--nictrace{} off".format(adapter_number + 1))
- await self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
- await self._modify_vm("--nic{} null".format(adapter_number + 1))
+ await self._modify_vm(f"--nictrace{adapter_number + 1} off")
+ await self._modify_vm(f"--cableconnected{adapter_number + 1} off")
+ await self._modify_vm(f"--nic{adapter_number + 1} null")
await super().stop()
async def suspend(self):
@@ -380,7 +379,7 @@ class VirtualBoxVM(BaseNode):
if vm_state == "running":
await self._control_vm("pause")
self.status = "suspended"
- log.info("VirtualBox VM '{name}' [{id}] suspended".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] suspended")
else:
log.warning("VirtualBox VM '{name}' [{id}] cannot be suspended, current state: {state}".format(name=self.name,
id=self.id,
@@ -393,7 +392,7 @@ class VirtualBoxVM(BaseNode):
await self._control_vm("resume")
self.status = "started"
- log.info("VirtualBox VM '{name}' [{id}] resumed".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] resumed")
async def reload(self):
"""
@@ -401,8 +400,8 @@ class VirtualBoxVM(BaseNode):
"""
result = await self._control_vm("reset")
- log.info("VirtualBox VM '{name}' [{id}] reloaded".format(name=self.name, id=self.id))
- log.debug("Reload result: {}".format(result))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] reloaded")
+ log.debug(f"Reload result: {result}")
async def _get_all_hdd_files(self):
@@ -424,7 +423,7 @@ class VirtualBoxVM(BaseNode):
hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json")
try:
- with open(hdd_info_file, "r", encoding="utf-8") as f:
+ with open(hdd_info_file, encoding="utf-8") as f:
hdd_table = json.load(f)
except (ValueError, OSError) as e:
# The VM has never be started
@@ -513,7 +512,7 @@ class VirtualBoxVM(BaseNode):
if not (await super().close()):
return False
- log.debug("VirtualBox VM '{name}' [{id}] is closing".format(name=self.name, id=self.id))
+ log.debug(f"VirtualBox VM '{self.name}' [{self.id}] is closing")
if self._console:
self._manager.port_manager.release_tcp_port(self._console, self._project)
self._console = None
@@ -553,10 +552,10 @@ class VirtualBoxVM(BaseNode):
error=e))
continue
- log.info("VirtualBox VM '{name}' [{id}] unregistering".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] unregistering")
await self.manager.execute("unregistervm", [self._name])
- log.info("VirtualBox VM '{name}' [{id}] closed".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] closed")
self._closed = True
@property
@@ -578,9 +577,9 @@ class VirtualBoxVM(BaseNode):
"""
if headless:
- log.info("VirtualBox VM '{name}' [{id}] has enabled the headless mode".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] has enabled the headless mode")
else:
- log.info("VirtualBox VM '{name}' [{id}] has disabled the headless mode".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] has disabled the headless mode")
self._headless = headless
@property
@@ -601,7 +600,7 @@ class VirtualBoxVM(BaseNode):
:param on_close: string
"""
- log.info('VirtualBox VM "{name}" [{id}] set the close action to "{action}"'.format(name=self._name, id=self._id, action=on_close))
+ log.info(f'VirtualBox VM "{self._name}" [{self._id}] set the close action to "{on_close}"')
self._on_close = on_close
@property
@@ -624,9 +623,9 @@ class VirtualBoxVM(BaseNode):
if ram == 0:
return
- await self._modify_vm('--memory {}'.format(ram))
+ await self._modify_vm(f'--memory {ram}')
- log.info("VirtualBox VM '{name}' [{id}] has set amount of RAM to {ram}".format(name=self.name, id=self.id, ram=ram))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] has set amount of RAM to {ram}")
self._ram = ram
@property
@@ -651,14 +650,14 @@ class VirtualBoxVM(BaseNode):
if self.linked_clone:
if self.status == "started":
- raise VirtualBoxError("Cannot change the name of running VM {}".format(self._name))
+ raise VirtualBoxError(f"Cannot change the name of running VM {self._name}")
# We can't rename a VM to name that already exists
vms = await self.manager.list_vms(allow_clone=True)
if vmname in [vm["vmname"] for vm in vms]:
- raise VirtualBoxError("Cannot change the name to {}, it is already used in VirtualBox".format(vmname))
- await self._modify_vm('--name "{}"'.format(vmname))
+ raise VirtualBoxError(f"Cannot change the name to {vmname}, it is already used in VirtualBox")
+ await self._modify_vm(f'--name "{vmname}"')
- log.info("VirtualBox VM '{name}' [{id}] has set the VM name to '{vmname}'".format(name=self.name, id=self.id, vmname=vmname))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] has set the VM name to '{vmname}'")
self._vmname = vmname
@property
@@ -684,14 +683,14 @@ class VirtualBoxVM(BaseNode):
self._maximum_adapters = 8 # default maximum network adapter count for PIIX3 chipset
if "chipset" in vm_info:
chipset = vm_info["chipset"]
- max_adapter_string = "Maximum {} Network Adapter count".format(chipset.upper())
+ max_adapter_string = f"Maximum {chipset.upper()} Network Adapter count"
if max_adapter_string in self._system_properties:
try:
self._maximum_adapters = int(self._system_properties[max_adapter_string])
except ValueError:
- log.error("Could not convert system property to integer: {} = {}".format(max_adapter_string, self._system_properties[max_adapter_string]))
+ log.error(f"Could not convert system property to integer: {max_adapter_string} = {self._system_properties[max_adapter_string]}")
else:
- log.warning("Could not find system property '{}' for chipset {}".format(max_adapter_string, chipset))
+ log.warning(f"Could not find system property '{max_adapter_string}' for chipset {chipset}")
log.info("VirtualBox VM '{name}' [{id}] can have a maximum of {max} network adapters for chipset {chipset}".format(name=self.name,
id=self.id,
@@ -729,9 +728,9 @@ class VirtualBoxVM(BaseNode):
"""
if use_any_adapter:
- log.info("VirtualBox VM '{name}' [{id}] is allowed to use any adapter".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] is allowed to use any adapter")
else:
- log.info("VirtualBox VM '{name}' [{id}] is not allowed to use any adapter".format(name=self.name, id=self.id))
+ log.info(f"VirtualBox VM '{self.name}' [{self.id}] is not allowed to use any adapter")
self._use_any_adapter = use_any_adapter
@property
@@ -782,13 +781,13 @@ class VirtualBoxVM(BaseNode):
"""
if sys.platform.startswith("win"):
- pipe_name = r"\\.\pipe\gns3_vbox\{}".format(self.id)
+ pipe_name = fr"\\.\pipe\gns3_vbox\{self.id}"
else:
- pipe_name = os.path.join(tempfile.gettempdir(), "gns3_vbox", "{}".format(self.id))
+ pipe_name = os.path.join(tempfile.gettempdir(), "gns3_vbox", f"{self.id}")
try:
os.makedirs(os.path.dirname(pipe_name), exist_ok=True)
except OSError as e:
- raise VirtualBoxError("Could not create the VirtualBox pipe directory: {}".format(e))
+ raise VirtualBoxError(f"Could not create the VirtualBox pipe directory: {e}")
return pipe_name
async def _set_serial_console(self):
@@ -825,7 +824,7 @@ class VirtualBoxVM(BaseNode):
nics = []
vm_info = await self._get_vm_info()
for adapter_number in range(0, maximum_adapters):
- entry = "nic{}".format(adapter_number + 1)
+ entry = f"nic{adapter_number + 1}"
if entry in vm_info:
value = vm_info[entry]
nics.append(value.lower())
@@ -843,11 +842,11 @@ class VirtualBoxVM(BaseNode):
attachment = nic_attachments[adapter_number]
if attachment == "null":
# disconnect the cable if no backend is attached.
- await self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
+ await self._modify_vm(f"--cableconnected{adapter_number + 1} off")
if attachment == "none":
# set the backend to null to avoid a difference in the number of interfaces in the Guest.
- await self._modify_vm("--nic{} null".format(adapter_number + 1))
- await self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
+ await self._modify_vm(f"--nic{adapter_number + 1} null")
+ await self._modify_vm(f"--cableconnected{adapter_number + 1} off")
# use a local UDP tunnel to connect to uBridge instead
if adapter_number not in self._local_udp_tunnels:
@@ -858,7 +857,7 @@ class VirtualBoxVM(BaseNode):
if not self._use_any_adapter and attachment in ("nat", "bridged", "intnet", "hostonly", "natnetwork"):
continue
- await self._modify_vm("--nictrace{} off".format(adapter_number + 1))
+ await self._modify_vm(f"--nictrace{adapter_number + 1} off")
custom_adapter = self._get_custom_adapter_settings(adapter_number)
adapter_type = custom_adapter.get("adapter_type", self._adapter_type)
@@ -876,31 +875,31 @@ class VirtualBoxVM(BaseNode):
vbox_adapter_type = "82545EM"
if adapter_type == "Paravirtualized Network (virtio-net)":
vbox_adapter_type = "virtio"
- args = [self._uuid, "--nictype{}".format(adapter_number + 1), vbox_adapter_type]
+ args = [self._uuid, f"--nictype{adapter_number + 1}", vbox_adapter_type]
await self.manager.execute("modifyvm", args)
if isinstance(nio, NIOUDP):
- log.debug("setting UDP params on adapter {}".format(adapter_number))
- await self._modify_vm("--nic{} generic".format(adapter_number + 1))
- await self._modify_vm("--nicgenericdrv{} UDPTunnel".format(adapter_number + 1))
- await self._modify_vm("--nicproperty{} sport={}".format(adapter_number + 1, nio.lport))
- await self._modify_vm("--nicproperty{} dest={}".format(adapter_number + 1, nio.rhost))
- await self._modify_vm("--nicproperty{} dport={}".format(adapter_number + 1, nio.rport))
+ log.debug(f"setting UDP params on adapter {adapter_number}")
+ await self._modify_vm(f"--nic{adapter_number + 1} generic")
+ await self._modify_vm(f"--nicgenericdrv{adapter_number + 1} UDPTunnel")
+ await self._modify_vm(f"--nicproperty{adapter_number + 1} sport={nio.lport}")
+ await self._modify_vm(f"--nicproperty{adapter_number + 1} dest={nio.rhost}")
+ await self._modify_vm(f"--nicproperty{adapter_number + 1} dport={nio.rport}")
if nio.suspend:
- await self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
+ await self._modify_vm(f"--cableconnected{adapter_number + 1} off")
else:
- await self._modify_vm("--cableconnected{} on".format(adapter_number + 1))
+ await self._modify_vm(f"--cableconnected{adapter_number + 1} on")
if nio.capturing:
- await self._modify_vm("--nictrace{} on".format(adapter_number + 1))
- await self._modify_vm('--nictracefile{} "{}"'.format(adapter_number + 1, nio.pcap_output_file))
+ await self._modify_vm(f"--nictrace{adapter_number + 1} on")
+ await self._modify_vm(f'--nictracefile{adapter_number + 1} "{nio.pcap_output_file}"')
if not self._ethernet_adapters[adapter_number].get_nio(0):
- await self._modify_vm("--cableconnected{} off".format(adapter_number + 1))
+ await self._modify_vm(f"--cableconnected{adapter_number + 1} off")
for adapter_number in range(self._adapters, self._maximum_adapters):
- log.debug("disabling remaining adapter {}".format(adapter_number))
- await self._modify_vm("--nic{} none".format(adapter_number + 1))
+ log.debug(f"disabling remaining adapter {adapter_number}")
+ await self._modify_vm(f"--nic{adapter_number + 1} none")
async def _create_linked_clone(self):
"""
@@ -915,7 +914,7 @@ class VirtualBoxVM(BaseNode):
if not gns3_snapshot_exists:
result = await self.manager.execute("snapshot", [self._uuid, "take", "GNS3 Linked Base for clones"])
- log.debug("GNS3 snapshot created: {}".format(result))
+ log.debug(f"GNS3 snapshot created: {result}")
args = [self._uuid,
"--snapshot",
@@ -929,7 +928,7 @@ class VirtualBoxVM(BaseNode):
"--register"]
result = await self.manager.execute("clonevm", args)
- log.debug("VirtualBox VM: {} cloned".format(result))
+ log.debug(f"VirtualBox VM: {result} cloned")
# refresh the UUID and vmname to match with the clone
self._vmname = self._name
@@ -941,7 +940,7 @@ class VirtualBoxVM(BaseNode):
try:
args = [self._uuid, "take", "reset"]
result = await self.manager.execute("snapshot", args)
- log.debug("Snapshot 'reset' created: {}".format(result))
+ log.debug(f"Snapshot 'reset' created: {result}")
# It seem sometimes this failed due to internal race condition of Vbox
# we have no real explanation of this.
except VirtualBoxError:
@@ -959,7 +958,7 @@ class VirtualBoxVM(BaseNode):
try:
self._remote_pipe = await asyncio_open_serial(pipe_name)
except OSError as e:
- raise VirtualBoxError("Could not open serial pipe '{}': {}".format(pipe_name, e))
+ raise VirtualBoxError(f"Could not open serial pipe '{pipe_name}': {e}")
server = AsyncioTelnetServer(reader=self._remote_pipe,
writer=self._remote_pipe,
binary=True,
@@ -967,7 +966,7 @@ class VirtualBoxVM(BaseNode):
try:
self._telnet_server = await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)
except OSError as e:
- self.project.emit("log.warning", {"message": "Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)})
+ self.project.emit("log.warning", {"message": f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.console}: {e}"})
async def _stop_remote_console(self):
"""
@@ -996,7 +995,7 @@ class VirtualBoxVM(BaseNode):
"""
if self.is_running() and self.console_type != new_console_type:
- raise VirtualBoxError('"{name}" must be stopped to change the console type to {new_console_type}'.format(name=self._name, new_console_type=new_console_type))
+ raise VirtualBoxError(f'"{self._name}" must be stopped to change the console type to {new_console_type}')
super(VirtualBoxVM, VirtualBoxVM).console_type.__set__(self, new_console_type)
@@ -1027,21 +1026,21 @@ class VirtualBoxVM(BaseNode):
# dynamically configure an UDP tunnel attachment if the VM is already running
local_nio = self._local_udp_tunnels[adapter_number][0]
if local_nio and isinstance(local_nio, NIOUDP):
- await self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1))
- await self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, local_nio.lport))
- await self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, local_nio.rhost))
- await self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, local_nio.rport))
- await self._control_vm("setlinkstate{} on".format(adapter_number + 1))
+ await self._control_vm(f"nic{adapter_number + 1} generic UDPTunnel")
+ await self._control_vm(f"nicproperty{adapter_number + 1} sport={local_nio.lport}")
+ await self._control_vm(f"nicproperty{adapter_number + 1} dest={local_nio.rhost}")
+ await self._control_vm(f"nicproperty{adapter_number + 1} dport={local_nio.rport}")
+ await self._control_vm(f"setlinkstate{adapter_number + 1} on")
if self.is_running():
try:
- await self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
+ await self.add_ubridge_udp_connection(f"VBOX-{self._id}-{adapter_number}",
self._local_udp_tunnels[adapter_number][1],
nio)
except KeyError:
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
adapter_number=adapter_number))
- await self._control_vm("setlinkstate{} on".format(adapter_number + 1))
+ await self._control_vm(f"setlinkstate{adapter_number + 1} on")
adapter.add_nio(0, nio)
log.info("VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name,
@@ -1059,13 +1058,13 @@ class VirtualBoxVM(BaseNode):
if self.is_running():
try:
- await self.update_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
+ await self.update_ubridge_udp_connection(f"VBOX-{self._id}-{adapter_number}",
self._local_udp_tunnels[adapter_number][1],
nio)
if nio.suspend:
- await self._control_vm("setlinkstate{} off".format(adapter_number + 1))
+ await self._control_vm(f"setlinkstate{adapter_number + 1} off")
else:
- await self._control_vm("setlinkstate{} on".format(adapter_number + 1))
+ await self._control_vm(f"setlinkstate{adapter_number + 1} on")
except IndexError:
raise VirtualBoxError('Adapter {adapter_number} does not exist on VirtualBox VM "{name}"'.format(name=self._name,
adapter_number=adapter_number))
@@ -1087,10 +1086,10 @@ class VirtualBoxVM(BaseNode):
await self.stop_capture(adapter_number)
if self.is_running():
- await self._ubridge_send("bridge delete {name}".format(name="VBOX-{}-{}".format(self._id, adapter_number)))
+ await self._ubridge_send("bridge delete {name}".format(name=f"VBOX-{self._id}-{adapter_number}"))
vm_state = await self._get_vm_state()
if vm_state == "running":
- await self._control_vm("setlinkstate{} off".format(adapter_number + 1))
+ await self._control_vm(f"setlinkstate{adapter_number + 1} off")
nio = adapter.get_nio(0)
if isinstance(nio, NIOUDP):
@@ -1121,7 +1120,7 @@ class VirtualBoxVM(BaseNode):
nio = adapter.get_nio(0)
if not nio:
- raise VirtualBoxError("Adapter {} is not connected".format(adapter_number))
+ raise VirtualBoxError(f"Adapter {adapter_number} is not connected")
return nio
@@ -1141,11 +1140,11 @@ class VirtualBoxVM(BaseNode):
nio = self.get_nio(adapter_number)
if nio.capturing:
- raise VirtualBoxError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
+ raise VirtualBoxError(f"Packet capture is already activated on adapter {adapter_number}")
nio.start_packet_capture(output_file)
if self.ubridge:
- await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VBOX-{}-{}".format(self._id, adapter_number),
+ await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=f"VBOX-{self._id}-{adapter_number}",
output_file=output_file))
log.info("VirtualBox VM '{name}' [{id}]: starting packet capture on adapter {adapter_number}".format(name=self.name,
@@ -1165,7 +1164,7 @@ class VirtualBoxVM(BaseNode):
nio.stop_packet_capture()
if self.ubridge:
- await self._ubridge_send('bridge stop_capture {name}'.format(name="VBOX-{}-{}".format(self._id, adapter_number)))
+ await self._ubridge_send('bridge stop_capture {name}'.format(name=f"VBOX-{self._id}-{adapter_number}"))
log.info("VirtualBox VM '{name}' [{id}]: stopping packet capture on adapter {adapter_number}".format(name=self.name,
id=self.id,
diff --git a/gns3server/compute/vmware/__init__.py b/gns3server/compute/vmware/__init__.py
index badfbe4b..8a64299d 100644
--- a/gns3server/compute/vmware/__init__.py
+++ b/gns3server/compute/vmware/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -112,11 +111,11 @@ class VMware(BaseManager):
if not vmrun_path:
raise VMwareError("Could not find VMware vmrun, please make sure it is installed")
if not os.path.isfile(vmrun_path):
- raise VMwareError("vmrun {} is not accessible".format(vmrun_path))
+ raise VMwareError(f"vmrun {vmrun_path} is not accessible")
if not os.access(vmrun_path, os.X_OK):
raise VMwareError("vmrun is not executable")
if os.path.basename(vmrun_path).lower() not in ["vmrun", "vmrun.exe"]:
- raise VMwareError("Invalid vmrun executable name {}".format(os.path.basename(vmrun_path)))
+ raise VMwareError(f"Invalid vmrun executable name {os.path.basename(vmrun_path)}")
self._vmrun_path = vmrun_path
return vmrun_path
@@ -200,13 +199,13 @@ class VMware(BaseManager):
if ws_version is None:
player_version = self._find_vmware_version_registry(r"SOFTWARE\Wow6432Node\VMware, Inc.\VMware Player")
if player_version:
- log.debug("VMware Player version {} detected".format(player_version))
+ log.debug(f"VMware Player version {player_version} detected")
await self._check_vmware_player_requirements(player_version)
else:
log.warning("Could not find VMware version")
self._host_type = "ws"
else:
- log.debug("VMware Workstation version {} detected".format(ws_version))
+ log.debug(f"VMware Workstation version {ws_version} detected")
await self._check_vmware_workstation_requirements(ws_version)
else:
if sys.platform.startswith("darwin"):
@@ -226,20 +225,20 @@ class VMware(BaseManager):
if match:
# VMware Workstation has been detected
version = match.group(1)
- log.debug("VMware Workstation version {} detected".format(version))
+ log.debug(f"VMware Workstation version {version} detected")
await self._check_vmware_workstation_requirements(version)
match = re.search(r"VMware Player ([0-9]+)\.", output)
if match:
# VMware Player has been detected
version = match.group(1)
- log.debug("VMware Player version {} detected".format(version))
+ log.debug(f"VMware Player version {version} detected")
await self._check_vmware_player_requirements(version)
if version is None:
- log.warning("Could not find VMware version. Output of VMware: {}".format(output))
- raise VMwareError("Could not find VMware version. Output of VMware: {}".format(output))
+ log.warning(f"Could not find VMware version. Output of VMware: {output}")
+ raise VMwareError(f"Could not find VMware version. Output of VMware: {output}")
except (OSError, subprocess.SubprocessError) as e:
- log.error("Error while looking for the VMware version: {}".format(e))
- raise VMwareError("Error while looking for the VMware version: {}".format(e))
+ log.error(f"Error while looking for the VMware version: {e}")
+ raise VMwareError(f"Error while looking for the VMware version: {e}")
@staticmethod
def _get_vmnet_interfaces_registry():
@@ -260,7 +259,7 @@ class VMware(BaseManager):
winreg.CloseKey(hkeyvmnet)
winreg.CloseKey(hkey)
except OSError as e:
- raise VMwareError("Could not read registry key {}: {}".format(regkey, e))
+ raise VMwareError(f"Could not read registry key {regkey}: {e}")
return vmnet_interfaces
@staticmethod
@@ -275,15 +274,15 @@ class VMware(BaseManager):
vmware_networking_file = "/etc/vmware/networking"
vmnet_interfaces = []
try:
- with open(vmware_networking_file, "r", encoding="utf-8") as f:
+ with open(vmware_networking_file, encoding="utf-8") as f:
for line in f.read().splitlines():
match = re.search(r"VNET_([0-9]+)_VIRTUAL_ADAPTER", line)
if match:
- vmnet = "vmnet{}".format(match.group(1))
+ vmnet = f"vmnet{match.group(1)}"
if vmnet not in ("vmnet0", "vmnet1", "vmnet8"):
vmnet_interfaces.append(vmnet)
except OSError as e:
- raise VMwareError("Cannot open {}: {}".format(vmware_networking_file, e))
+ raise VMwareError(f"Cannot open {vmware_networking_file}: {e}")
return vmnet_interfaces
@staticmethod
@@ -321,7 +320,7 @@ class VMware(BaseManager):
def allocate_vmnet(self):
if not self._vmnets:
- raise VMwareError("No VMnet interface available between vmnet{} and vmnet{}. Go to preferences VMware / Network / Configure to add more interfaces.".format(self._vmnet_start_range, self._vmnet_end_range))
+ raise VMwareError(f"No VMnet interface available between vmnet{self._vmnet_start_range} and vmnet{self._vmnet_end_range}. Go to preferences VMware / Network / Configure to add more interfaces.")
return self._vmnets.pop(0)
def refresh_vmnet_list(self, ubridge=True):
@@ -336,7 +335,7 @@ class VMware(BaseManager):
for vmware_vm in self._nodes.values():
for used_vmnet in vmware_vm.vmnets:
if used_vmnet in vmnet_interfaces:
- log.debug("{} is already in use".format(used_vmnet))
+ log.debug(f"{used_vmnet} is already in use")
vmnet_interfaces.remove(used_vmnet)
# remove vmnets that are not managed
@@ -387,21 +386,21 @@ class VMware(BaseManager):
command = [vmrun_path, "-T", self.host_type, subcommand]
command.extend(args)
command_string = " ".join([shlex_quote(c) for c in command])
- log.log(log_level, "Executing vmrun with command: {}".format(command_string))
+ log.log(log_level, f"Executing vmrun with command: {command_string}")
try:
process = await asyncio.create_subprocess_exec(*command, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)
except (OSError, subprocess.SubprocessError) as e:
- raise VMwareError("Could not execute vmrun: {}".format(e))
+ raise VMwareError(f"Could not execute vmrun: {e}")
try:
stdout_data, _ = await asyncio.wait_for(process.communicate(), timeout=timeout)
except asyncio.TimeoutError:
- raise VMwareError("vmrun has timed out after {} seconds!\nTry to run {} in a terminal to see more details.\n\nMake sure GNS3 and VMware run under the same user and whitelist vmrun.exe in your antivirus.".format(timeout, command_string))
+ raise VMwareError(f"vmrun has timed out after {timeout} seconds!\nTry to run {command_string} in a terminal to see more details.\n\nMake sure GNS3 and VMware run under the same user and whitelist vmrun.exe in your antivirus.")
if process.returncode:
# vmrun print errors on stdout
vmrun_error = stdout_data.decode("utf-8", errors="ignore")
- raise VMwareError("vmrun has returned an error: {}\nTry to run {} in a terminal to see more details.\nAnd make sure GNS3 and VMware run under the same user.".format(vmrun_error, command_string))
+ raise VMwareError(f"vmrun has returned an error: {vmrun_error}\nTry to run {command_string} in a terminal to see more details.\nAnd make sure GNS3 and VMware run under the same user.")
return stdout_data.decode("utf-8", errors="ignore").splitlines()
@@ -427,15 +426,15 @@ class VMware(BaseManager):
version = None
if match:
version = match.group(1)
- log.debug("VMware vmrun version {} detected, minimum required: {}".format(version, minimum_required_version))
+ log.debug(f"VMware vmrun version {version} detected, minimum required: {minimum_required_version}")
if parse_version(version) < parse_version(minimum_required_version):
- raise VMwareError("VMware vmrun executable version must be >= version {}".format(minimum_required_version))
+ raise VMwareError(f"VMware vmrun executable version must be >= version {minimum_required_version}")
if version is None:
- log.warning("Could not find VMware vmrun version. Output: {}".format(output))
- raise VMwareError("Could not find VMware vmrun version. Output: {}".format(output))
+ log.warning(f"Could not find VMware vmrun version. Output: {output}")
+ raise VMwareError(f"Could not find VMware vmrun version. Output: {output}")
except (OSError, subprocess.SubprocessError) as e:
- log.error("Error while looking for the VMware vmrun version: {}".format(e))
- raise VMwareError("Error while looking for the VMware vmrun version: {}".format(e))
+ log.error(f"Error while looking for the VMware vmrun version: {e}")
+ raise VMwareError(f"Error while looking for the VMware vmrun version: {e}")
async def remove_from_vmware_inventory(self, vmx_path):
"""
@@ -450,7 +449,7 @@ class VMware(BaseManager):
try:
inventory_pairs = self.parse_vmware_file(inventory_path)
except OSError as e:
- log.warning('Could not read VMware inventory file "{}": {}'.format(inventory_path, e))
+ log.warning(f'Could not read VMware inventory file "{inventory_path}": {e}')
return
vmlist_entry = None
@@ -467,7 +466,7 @@ class VMware(BaseManager):
try:
self.write_vmware_file(inventory_path, inventory_pairs)
except OSError as e:
- raise VMwareError('Could not write VMware inventory file "{}": {}'.format(inventory_path, e))
+ raise VMwareError(f'Could not write VMware inventory file "{inventory_path}": {e}')
@staticmethod
def parse_vmware_file(path):
@@ -495,9 +494,9 @@ class VMware(BaseManager):
codecs.lookup(file_encoding)
encoding = file_encoding
except LookupError:
- log.warning("Invalid file encoding detected in '{}': {}".format(path, file_encoding))
+ log.warning(f"Invalid file encoding detected in '{path}': {file_encoding}")
except ValueError:
- log.warning("Couldn't find file encoding in {}, using {}...".format(path, encoding))
+ log.warning(f"Couldn't find file encoding in {path}, using {encoding}...")
# read the file with the correct encoding
with open(path, encoding=encoding, errors="ignore") as f:
@@ -525,10 +524,10 @@ class VMware(BaseManager):
codecs.lookup(file_encoding)
encoding = file_encoding
except LookupError:
- log.warning("Invalid file encoding detected in '{}': {}".format(path, file_encoding))
+ log.warning(f"Invalid file encoding detected in '{path}': {file_encoding}")
with open(path, "w", encoding=encoding, errors="ignore") as f:
for key, value in pairs.items():
- entry = '{} = "{}"\n'.format(key, value)
+ entry = f'{key} = "{value}"\n'
f.write(entry)
@staticmethod
@@ -547,15 +546,15 @@ class VMware(BaseManager):
codecs.lookup(file_encoding)
encoding = file_encoding
except LookupError:
- log.warning("Invalid file encoding detected in '{}': {}".format(path, file_encoding))
+ log.warning(f"Invalid file encoding detected in '{path}': {file_encoding}")
with open(path, "w", encoding=encoding, errors="ignore") as f:
if sys.platform.startswith("linux"):
# write the shebang on the first line on Linux
vmware_path = VMware._get_linux_vmware_binary()
if vmware_path:
- f.write("#!{}\n".format(vmware_path))
+ f.write(f"#!{vmware_path}\n")
for key, value in pairs.items():
- entry = '{} = "{}"\n'.format(key, value)
+ entry = f'{key} = "{value}"\n'
f.write(entry)
def _get_vms_from_inventory(self, inventory_path):
@@ -569,7 +568,7 @@ class VMware(BaseManager):
vm_entries = {}
vmware_vms = []
- log.info('Searching for VMware VMs in inventory file "{}"'.format(inventory_path))
+ log.info(f'Searching for VMware VMs in inventory file "{inventory_path}"')
try:
pairs = self.parse_vmware_file(inventory_path)
for key, value in pairs.items():
@@ -582,7 +581,7 @@ class VMware(BaseManager):
vm_entries[vm_entry] = {}
vm_entries[vm_entry][variable_name.strip()] = value
except OSError as e:
- log.warning("Could not read VMware inventory file {}: {}".format(inventory_path, e))
+ log.warning(f"Could not read VMware inventory file {inventory_path}: {e}")
for vm_settings in vm_entries.values():
if "displayname" in vm_settings and "config" in vm_settings:
@@ -601,19 +600,19 @@ class VMware(BaseManager):
"""
vmware_vms = []
- log.info('Searching for VMware VMs in directory "{}"'.format(directory))
+ log.info(f'Searching for VMware VMs in directory "{directory}"')
for path, _, filenames in os.walk(directory):
for filename in filenames:
if os.path.splitext(filename)[1] == ".vmx":
vmx_path = os.path.join(path, filename)
- log.debug('Reading VMware VMX file "{}"'.format(vmx_path))
+ log.debug(f'Reading VMware VMX file "{vmx_path}"')
try:
pairs = self.parse_vmware_file(vmx_path)
if "displayname" in pairs:
log.debug('Found VM named "{}"'.format(pairs["displayname"]))
vmware_vms.append({"vmname": pairs["displayname"], "vmx_path": vmx_path})
except OSError as e:
- log.warning('Could not read VMware VMX file "{}": {}'.format(vmx_path, e))
+ log.warning(f'Could not read VMware VMX file "{vmx_path}": {e}')
continue
return vmware_vms
@@ -661,7 +660,7 @@ class VMware(BaseManager):
path = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(None, 5, None, 0, path)
documents_folder = path.value
- return ['{}\My Virtual Machines'.format(documents_folder), '{}\Virtual Machines'.format(documents_folder)]
+ return [fr'{documents_folder}\My Virtual Machines', fr'{documents_folder}\Virtual Machines']
elif sys.platform.startswith("darwin"):
return [os.path.expanduser("~/Documents/Virtual Machines.localized")]
else:
@@ -688,7 +687,7 @@ class VMware(BaseManager):
try:
pairs = self.parse_vmware_file(vmware_preferences_path)
except OSError as e:
- log.warning('Could not read VMware preferences file "{}": {}'.format(vmware_preferences_path, e))
+ log.warning(f'Could not read VMware preferences file "{vmware_preferences_path}": {e}')
if "prefvmx.defaultvmpath" in pairs:
default_vm_path = pairs["prefvmx.defaultvmpath"]
if not os.path.isdir(default_vm_path):
@@ -710,7 +709,7 @@ class VMware(BaseManager):
for key, value in pairs.items():
m = re.match(r'pref.mruVM(\d+)\.filename', key)
if m:
- display_name = "pref.mruVM{}.displayName".format(m.group(1))
+ display_name = f"pref.mruVM{m.group(1)}.displayName"
if display_name in pairs:
found = False
for vmware_vm in vmware_vms:
diff --git a/gns3server/compute/vmware/vmware_error.py b/gns3server/compute/vmware/vmware_error.py
index 4c390a31..1a4ec0cf 100644
--- a/gns3server/compute/vmware/vmware_error.py
+++ b/gns3server/compute/vmware/vmware_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/vmware/vmware_vm.py b/gns3server/compute/vmware/vmware_vm.py
index 4abfa281..5b8a63fa 100644
--- a/gns3server/compute/vmware/vmware_vm.py
+++ b/gns3server/compute/vmware/vmware_vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -65,7 +64,7 @@ class VMwareVM(BaseNode):
self._use_any_adapter = False
if not os.path.exists(vmx_path):
- raise VMwareError('VMware VM "{name}" [{id}]: could not find VMX file "{vmx_path}"'.format(name=name, id=node_id, vmx_path=vmx_path))
+ raise VMwareError(f'VMware VM "{name}" [{node_id}]: could not find VMX file "{vmx_path}"')
@property
def ethernet_adapters(self):
@@ -101,7 +100,7 @@ class VMwareVM(BaseNode):
args = [self._vmx_path]
args.extend(additional_args)
result = await self.manager.execute(subcommand, args)
- log.debug("Control VM '{}' result: {}".format(subcommand, result))
+ log.debug(f"Control VM '{subcommand}' result: {result}")
return result
def _read_vmx_file(self):
@@ -112,7 +111,7 @@ class VMwareVM(BaseNode):
try:
self._vmx_pairs = self.manager.parse_vmware_file(self._vmx_path)
except OSError as e:
- raise VMwareError('Could not read VMware VMX file "{}": {}'.format(self._vmx_path, e))
+ raise VMwareError(f'Could not read VMware VMX file "{self._vmx_path}": {e}')
def _write_vmx_file(self):
"""
@@ -122,7 +121,7 @@ class VMwareVM(BaseNode):
try:
self.manager.write_vmx_file(self._vmx_path, self._vmx_pairs)
except OSError as e:
- raise VMwareError('Could not write VMware VMX file "{}": {}'.format(self._vmx_path, e))
+ raise VMwareError(f'Could not write VMware VMX file "{self._vmx_path}": {e}')
async def is_running(self):
@@ -148,10 +147,10 @@ class VMwareVM(BaseNode):
found = True
if node.project != self.project:
if trial >= 30:
- raise VMwareError("Sorry a node without the linked clone setting enabled can only be used once on your server.\n{} is already used by {} in project {}".format(self.vmx_path, node.name, self.project.name))
+ raise VMwareError(f"Sorry a node without the linked clone setting enabled can only be used once on your server.\n{self.vmx_path} is already used by {node.name} in project {self.project.name}")
else:
if trial >= 5:
- raise VMwareError("Sorry a node without the linked clone setting enabled can only be used once on your server.\n{} is already used by {} in this project".format(self.vmx_path, node.name))
+ raise VMwareError(f"Sorry a node without the linked clone setting enabled can only be used once on your server.\n{self.vmx_path} is already used by {node.name} in this project")
if not found:
return
trial += 1
@@ -172,18 +171,18 @@ class VMwareVM(BaseNode):
base_snapshot_name = "GNS3 Linked Base for clones"
vmsd_path = os.path.splitext(self._vmx_path)[0] + ".vmsd"
if not os.path.exists(vmsd_path):
- raise VMwareError("{} doesn't not exist".format(vmsd_path))
+ raise VMwareError(f"{vmsd_path} doesn't not exist")
try:
vmsd_pairs = self.manager.parse_vmware_file(vmsd_path)
except OSError as e:
- raise VMwareError('Could not read VMware VMSD file "{}": {}'.format(vmsd_path, e))
+ raise VMwareError(f'Could not read VMware VMSD file "{vmsd_path}": {e}')
gns3_snapshot_exists = False
for value in vmsd_pairs.values():
if value == base_snapshot_name:
gns3_snapshot_exists = True
break
if not gns3_snapshot_exists:
- log.info("Creating snapshot '{}'".format(base_snapshot_name))
+ log.info(f"Creating snapshot '{base_snapshot_name}'")
await self._control_vm("snapshot", base_snapshot_name)
# create the linked clone based on the base snapshot
@@ -191,13 +190,13 @@ class VMwareVM(BaseNode):
await self._control_vm("clone",
new_vmx_path,
"linked",
- "-snapshot={}".format(base_snapshot_name),
- "-cloneName={}".format(self.name))
+ f"-snapshot={base_snapshot_name}",
+ f"-cloneName={self.name}")
try:
vmsd_pairs = self.manager.parse_vmware_file(vmsd_path)
except OSError as e:
- raise VMwareError('Could not read VMware VMSD file "{}": {}'.format(vmsd_path, e))
+ raise VMwareError(f'Could not read VMware VMSD file "{vmsd_path}": {e}')
snapshot_name = None
for name, value in vmsd_pairs.items():
@@ -206,25 +205,25 @@ class VMwareVM(BaseNode):
break
if snapshot_name is None:
- raise VMwareError("Could not find the linked base snapshot in {}".format(vmsd_path))
+ raise VMwareError(f"Could not find the linked base snapshot in {vmsd_path}")
- num_clones_entry = "{}.numClones".format(snapshot_name)
+ num_clones_entry = f"{snapshot_name}.numClones"
if num_clones_entry in vmsd_pairs:
try:
nb_of_clones = int(vmsd_pairs[num_clones_entry])
except ValueError:
- raise VMwareError("Value of {} in {} is not a number".format(num_clones_entry, vmsd_path))
+ raise VMwareError(f"Value of {num_clones_entry} in {vmsd_path} is not a number")
vmsd_pairs[num_clones_entry] = str(nb_of_clones - 1)
for clone_nb in range(0, nb_of_clones):
- clone_entry = "{}.clone{}".format(snapshot_name, clone_nb)
+ clone_entry = f"{snapshot_name}.clone{clone_nb}"
if clone_entry in vmsd_pairs:
del vmsd_pairs[clone_entry]
try:
self.manager.write_vmware_file(vmsd_path, vmsd_pairs)
except OSError as e:
- raise VMwareError('Could not write VMware VMSD file "{}": {}'.format(vmsd_path, e))
+ raise VMwareError(f'Could not write VMware VMSD file "{vmsd_path}": {e}')
# update the VMX file path
self._vmx_path = new_vmx_path
@@ -248,7 +247,7 @@ class VMwareVM(BaseNode):
for adapter_number in range(0, self._adapters):
# we want the vmnet interface to be connected when starting the VM
- connected = "ethernet{}.startConnected".format(adapter_number)
+ connected = f"ethernet{adapter_number}.startConnected"
if self._get_vmx_setting(connected):
del self._vmx_pairs[connected]
@@ -266,23 +265,23 @@ class VMwareVM(BaseNode):
vmware_adapter_type = "e1000"
else:
vmware_adapter_type = adapter_type
- ethernet_adapter = {"ethernet{}.present".format(adapter_number): "TRUE",
- "ethernet{}.addresstype".format(adapter_number): "generated",
- "ethernet{}.generatedaddressoffset".format(adapter_number): "0",
- "ethernet{}.virtualdev".format(adapter_number): vmware_adapter_type}
+ ethernet_adapter = {f"ethernet{adapter_number}.present": "TRUE",
+ f"ethernet{adapter_number}.addresstype": "generated",
+ f"ethernet{adapter_number}.generatedaddressoffset": "0",
+ f"ethernet{adapter_number}.virtualdev": vmware_adapter_type}
self._vmx_pairs.update(ethernet_adapter)
- connection_type = "ethernet{}.connectiontype".format(adapter_number)
+ connection_type = f"ethernet{adapter_number}.connectiontype"
if not self._use_any_adapter and connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("nat", "bridged", "hostonly"):
continue
- self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
+ self._vmx_pairs[f"ethernet{adapter_number}.connectiontype"] = "custom"
# make sure we have a vmnet per adapter if we use uBridge
allocate_vmnet = False
# first check if a vmnet is already assigned to the adapter
- vnet = "ethernet{}.vnet".format(adapter_number)
+ vnet = f"ethernet{adapter_number}.vnet"
if vnet in self._vmx_pairs:
vmnet = os.path.basename(self._vmx_pairs[vnet])
if self.manager.is_managed_vmnet(vmnet) or vmnet in ("vmnet0", "vmnet1", "vmnet8"):
@@ -303,21 +302,21 @@ class VMwareVM(BaseNode):
# mark the vmnet as managed by us
if vmnet not in self._vmnets:
self._vmnets.append(vmnet)
- self._vmx_pairs["ethernet{}.vnet".format(adapter_number)] = vmnet
+ self._vmx_pairs[f"ethernet{adapter_number}.vnet"] = vmnet
# disable remaining network adapters
for adapter_number in range(self._adapters, self._maximum_adapters):
- if self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"):
- log.debug("disabling remaining adapter {}".format(adapter_number))
- self._vmx_pairs["ethernet{}.startconnected".format(adapter_number)] = "FALSE"
+ if self._get_vmx_setting(f"ethernet{adapter_number}.present", "TRUE"):
+ log.debug(f"disabling remaining adapter {adapter_number}")
+ self._vmx_pairs[f"ethernet{adapter_number}.startconnected"] = "FALSE"
def _get_vnet(self, adapter_number):
"""
Return the vnet will use in ubridge
"""
- vnet = "ethernet{}.vnet".format(adapter_number)
+ vnet = f"ethernet{adapter_number}.vnet"
if vnet not in self._vmx_pairs:
- raise VMwareError("vnet {} not in VMX file".format(vnet))
+ raise VMwareError(f"vnet {vnet} not in VMX file")
return vnet
async def _add_ubridge_connection(self, nio, adapter_number):
@@ -329,12 +328,12 @@ class VMwareVM(BaseNode):
"""
vnet = self._get_vnet(adapter_number)
- await self._ubridge_send("bridge create {name}".format(name=vnet))
+ await self._ubridge_send(f"bridge create {vnet}")
vmnet_interface = os.path.basename(self._vmx_pairs[vnet])
if sys.platform.startswith("darwin"):
# special case on OSX, we cannot bind VMnet interfaces using the libpcap
- await self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=vnet, interface=vmnet_interface))
+ await self._ubridge_send(f'bridge add_nio_fusion_vmnet {vnet} "{vmnet_interface}"')
else:
block_host_traffic = self.manager.config.VMware.block_host_traffic
await self._add_ubridge_ethernet_connection(vnet, vmnet_interface, block_host_traffic)
@@ -346,9 +345,9 @@ class VMwareVM(BaseNode):
rport=nio.rport))
if nio.capturing:
- await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=vnet, pcap_file=nio.pcap_output_file))
+ await self._ubridge_send(f'bridge start_capture {vnet} "{nio.pcap_output_file}"')
- await self._ubridge_send('bridge start {name}'.format(name=vnet))
+ await self._ubridge_send(f'bridge start {vnet}')
await self._ubridge_apply_filters(vnet, nio.filters)
async def _update_ubridge_connection(self, adapter_number, nio):
@@ -371,10 +370,10 @@ class VMwareVM(BaseNode):
:param adapter_number: adapter number
"""
- vnet = "ethernet{}.vnet".format(adapter_number)
+ vnet = f"ethernet{adapter_number}.vnet"
if vnet not in self._vmx_pairs:
- raise VMwareError("vnet {} not in VMX file".format(vnet))
- await self._ubridge_send("bridge delete {name}".format(name=vnet))
+ raise VMwareError(f"vnet {vnet} not in VMX file")
+ await self._ubridge_send(f"bridge delete {vnet}")
async def _start_ubridge_capture(self, adapter_number, output_file):
"""
@@ -384,9 +383,9 @@ class VMwareVM(BaseNode):
:param output_file: PCAP destination file for the capture
"""
- vnet = "ethernet{}.vnet".format(adapter_number)
+ vnet = f"ethernet{adapter_number}.vnet"
if vnet not in self._vmx_pairs:
- raise VMwareError("vnet {} not in VMX file".format(vnet))
+ raise VMwareError(f"vnet {vnet} not in VMX file")
if not self._ubridge_hypervisor:
raise VMwareError("Cannot start the packet capture: uBridge is not running")
await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=vnet,
@@ -399,12 +398,12 @@ class VMwareVM(BaseNode):
:param adapter_number: adapter number
"""
- vnet = "ethernet{}.vnet".format(adapter_number)
+ vnet = f"ethernet{adapter_number}.vnet"
if vnet not in self._vmx_pairs:
- raise VMwareError("vnet {} not in VMX file".format(vnet))
+ raise VMwareError(f"vnet {vnet} not in VMX file")
if not self._ubridge_hypervisor:
raise VMwareError("Cannot stop the packet capture: uBridge is not running")
- await self._ubridge_send("bridge stop_capture {name}".format(name=vnet))
+ await self._ubridge_send(f"bridge stop_capture {vnet}")
def check_hw_virtualization(self):
"""
@@ -464,7 +463,7 @@ class VMwareVM(BaseNode):
self._started = True
self.status = "started"
- log.info("VMware VM '{name}' [{id}] started".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] started")
async def stop(self):
"""
@@ -492,25 +491,25 @@ class VMwareVM(BaseNode):
self._vmnets.clear()
# remove the adapters managed by GNS3
for adapter_number in range(0, self._adapters):
- vnet = "ethernet{}.vnet".format(adapter_number)
- if self._get_vmx_setting(vnet) or self._get_vmx_setting("ethernet{}.connectiontype".format(adapter_number)) is None:
+ vnet = f"ethernet{adapter_number}.vnet"
+ if self._get_vmx_setting(vnet) or self._get_vmx_setting(f"ethernet{adapter_number}.connectiontype") is None:
if vnet in self._vmx_pairs:
vmnet = os.path.basename(self._vmx_pairs[vnet])
if not self.manager.is_managed_vmnet(vmnet):
continue
- log.debug("removing adapter {}".format(adapter_number))
+ log.debug(f"removing adapter {adapter_number}")
self._vmx_pairs[vnet] = "vmnet1"
- self._vmx_pairs["ethernet{}.connectiontype".format(adapter_number)] = "custom"
+ self._vmx_pairs[f"ethernet{adapter_number}.connectiontype"] = "custom"
# re-enable any remaining network adapters
for adapter_number in range(self._adapters, self._maximum_adapters):
- if self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"):
- log.debug("enabling remaining adapter {}".format(adapter_number))
- self._vmx_pairs["ethernet{}.startconnected".format(adapter_number)] = "TRUE"
+ if self._get_vmx_setting(f"ethernet{adapter_number}.present", "TRUE"):
+ log.debug(f"enabling remaining adapter {adapter_number}")
+ self._vmx_pairs[f"ethernet{adapter_number}.startconnected"] = "TRUE"
self._write_vmx_file()
await super().stop()
- log.info("VMware VM '{name}' [{id}] stopped".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] stopped")
async def suspend(self):
"""
@@ -521,7 +520,7 @@ class VMwareVM(BaseNode):
raise VMwareError("Pausing a VM is only supported by VMware Workstation")
await self._control_vm("pause")
self.status = "suspended"
- log.info("VMware VM '{name}' [{id}] paused".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] paused")
async def resume(self):
"""
@@ -532,7 +531,7 @@ class VMwareVM(BaseNode):
raise VMwareError("Unpausing a VM is only supported by VMware Workstation")
await self._control_vm("unpause")
self.status = "started"
- log.info("VMware VM '{name}' [{id}] resumed".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] resumed")
async def reload(self):
"""
@@ -540,7 +539,7 @@ class VMwareVM(BaseNode):
"""
await self._control_vm("reset")
- log.info("VMware VM '{name}' [{id}] reloaded".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] reloaded")
async def close(self):
"""
@@ -583,9 +582,9 @@ class VMwareVM(BaseNode):
"""
if headless:
- log.info("VMware VM '{name}' [{id}] has enabled the headless mode".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] has enabled the headless mode")
else:
- log.info("VMware VM '{name}' [{id}] has disabled the headless mode".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] has disabled the headless mode")
self._headless = headless
@property
@@ -606,7 +605,7 @@ class VMwareVM(BaseNode):
:param on_close: string
"""
- log.info('VMware VM "{name}" [{id}] set the close action to "{action}"'.format(name=self._name, id=self._id, action=on_close))
+ log.info(f'VMware VM "{self._name}" [{self._id}] set the close action to "{on_close}"')
self._on_close = on_close
@property
@@ -627,7 +626,7 @@ class VMwareVM(BaseNode):
:param vmx_path: VMware vmx file
"""
- log.info("VMware VM '{name}' [{id}] has set the vmx file path to '{vmx}'".format(name=self.name, id=self.id, vmx=vmx_path))
+ log.info(f"VMware VM '{self.name}' [{self.id}] has set the vmx file path to '{vmx_path}'")
self._vmx_path = vmx_path
@property
@@ -703,9 +702,9 @@ class VMwareVM(BaseNode):
"""
if use_any_adapter:
- log.info("VMware VM '{name}' [{id}] is allowed to use any adapter".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] is allowed to use any adapter")
else:
- log.info("VMware VM '{name}' [{id}] is not allowed to use any adapter".format(name=self.name, id=self.id))
+ log.info(f"VMware VM '{self.name}' [{self.id}] is not allowed to use any adapter")
self._use_any_adapter = use_any_adapter
async def adapter_add_nio_binding(self, adapter_number, nio):
@@ -724,9 +723,9 @@ class VMwareVM(BaseNode):
self._read_vmx_file()
# check if trying to connect to a nat, bridged or host-only adapter
- if self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"):
+ if self._get_vmx_setting(f"ethernet{adapter_number}.present", "TRUE"):
# check for the connection type
- connection_type = "ethernet{}.connectiontype".format(adapter_number)
+ connection_type = f"ethernet{adapter_number}.connectiontype"
if not self._use_any_adapter and connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("nat", "bridged", "hostonly"):
if (await self.is_running()):
raise VMwareError("Attachment '{attachment}' is configured on network adapter {adapter_number}. "
@@ -811,7 +810,7 @@ class VMwareVM(BaseNode):
nio = adapter.get_nio(0)
if not nio:
- raise VMwareError("Adapter {} is not connected".format(adapter_number))
+ raise VMwareError(f"Adapter {adapter_number} is not connected")
return nio
@@ -823,13 +822,13 @@ class VMwareVM(BaseNode):
"""
if sys.platform.startswith("win"):
- pipe_name = r"\\.\pipe\gns3_vmware\{}".format(self.id)
+ pipe_name = fr"\\.\pipe\gns3_vmware\{self.id}"
else:
- pipe_name = os.path.join(tempfile.gettempdir(), "gns3_vmware", "{}".format(self.id))
+ pipe_name = os.path.join(tempfile.gettempdir(), "gns3_vmware", f"{self.id}")
try:
os.makedirs(os.path.dirname(pipe_name), exist_ok=True)
except OSError as e:
- raise VMwareError("Could not create the VMware pipe directory: {}".format(e))
+ raise VMwareError(f"Could not create the VMware pipe directory: {e}")
return pipe_name
def _set_serial_console(self):
@@ -855,7 +854,7 @@ class VMwareVM(BaseNode):
try:
self._remote_pipe = await asyncio_open_serial(self._get_pipe_name())
except OSError as e:
- raise VMwareError("Could not open serial pipe '{}': {}".format(pipe_name, e))
+ raise VMwareError(f"Could not open serial pipe '{pipe_name}': {e}")
server = AsyncioTelnetServer(reader=self._remote_pipe,
writer=self._remote_pipe,
binary=True,
@@ -863,7 +862,7 @@ class VMwareVM(BaseNode):
try:
self._telnet_server = await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)
except OSError as e:
- self.project.emit("log.warning", {"message": "Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e)})
+ self.project.emit("log.warning", {"message": f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.console}: {e}"})
async def _stop_remote_console(self):
"""
@@ -892,7 +891,7 @@ class VMwareVM(BaseNode):
"""
if self._started and self.console_type != new_console_type:
- raise VMwareError('"{name}" must be stopped to change the console type to {new_console_type}'.format(name=self._name, new_console_type=new_console_type))
+ raise VMwareError(f'"{self._name}" must be stopped to change the console type to {new_console_type}')
super(VMwareVM, VMwareVM).console_type.__set__(self, new_console_type)
@@ -906,7 +905,7 @@ class VMwareVM(BaseNode):
nio = self.get_nio(adapter_number)
if nio.capturing:
- raise VMwareError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
+ raise VMwareError(f"Packet capture is already activated on adapter {adapter_number}")
nio.start_packet_capture(output_file)
if self._started:
diff --git a/gns3server/compute/vpcs/__init__.py b/gns3server/compute/vpcs/__init__.py
index d1ee9abd..90953dd0 100644
--- a/gns3server/compute/vpcs/__init__.py
+++ b/gns3server/compute/vpcs/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -89,4 +88,4 @@ class VPCS(BaseManager):
:returns: working directory name
"""
- return os.path.join("vpcs", "pc-{}".format(legacy_vm_id))
+ return os.path.join("vpcs", f"pc-{legacy_vm_id}")
diff --git a/gns3server/compute/vpcs/vpcs_error.py b/gns3server/compute/vpcs/vpcs_error.py
index 5a721681..0fb6f5a9 100644
--- a/gns3server/compute/vpcs/vpcs_error.py
+++ b/gns3server/compute/vpcs/vpcs_error.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 GNS3 Technologies Inc.
#
diff --git a/gns3server/compute/vpcs/vpcs_vm.py b/gns3server/compute/vpcs/vpcs_vm.py
index a880bedd..65466e02 100644
--- a/gns3server/compute/vpcs/vpcs_vm.py
+++ b/gns3server/compute/vpcs/vpcs_vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -113,10 +112,10 @@ class VPCSVM(BaseNode):
self.ubridge_path
if not os.path.isfile(path):
- raise VPCSError("VPCS program '{}' is not accessible".format(path))
+ raise VPCSError(f"VPCS program '{path}' is not accessible")
if not os.access(path, os.X_OK):
- raise VPCSError("VPCS program '{}' is not executable".format(path))
+ raise VPCSError(f"VPCS program '{path}' is not executable")
await self._check_vpcs_version()
@@ -174,7 +173,7 @@ class VPCSVM(BaseNode):
with open(script_file, "rb") as f:
return f.read().decode("utf-8", errors="replace")
except OSError as e:
- raise VPCSError('Cannot read the startup script file "{}": {}'.format(script_file, e))
+ raise VPCSError(f'Cannot read the startup script file "{script_file}": {e}')
@startup_script.setter
def startup_script(self, startup_script):
@@ -193,7 +192,7 @@ class VPCSVM(BaseNode):
startup_script = startup_script.replace("%h", self._name)
f.write(startup_script)
except OSError as e:
- raise VPCSError('Cannot write the startup script file "{}": {}'.format(startup_script_path, e))
+ raise VPCSError(f'Cannot write the startup script file "{startup_script_path}": {e}')
async def _check_vpcs_version(self):
"""
@@ -208,9 +207,9 @@ class VPCSVM(BaseNode):
if self._vpcs_version < parse_version("0.6.1"):
raise VPCSError("VPCS executable version must be >= 0.6.1 but not a 0.8")
else:
- raise VPCSError("Could not determine the VPCS version for {}".format(self._vpcs_path()))
+ raise VPCSError(f"Could not determine the VPCS version for {self._vpcs_path()}")
except (OSError, subprocess.SubprocessError) as e:
- raise VPCSError("Error while looking for the VPCS version: {}".format(e))
+ raise VPCSError(f"Error while looking for the VPCS version: {e}")
async def start(self):
"""
@@ -222,9 +221,9 @@ class VPCSVM(BaseNode):
nio = self._ethernet_adapter.get_nio(0)
command = self._build_command()
try:
- log.info("Starting VPCS: {}".format(command))
+ log.info(f"Starting VPCS: {command}")
self._vpcs_stdout_file = os.path.join(self.working_dir, "vpcs.log")
- log.info("Logging to {}".format(self._vpcs_stdout_file))
+ log.info(f"Logging to {self._vpcs_stdout_file}")
flags = 0
if sys.platform.startswith("win32"):
flags = subprocess.CREATE_NEW_PROCESS_GROUP
@@ -239,17 +238,17 @@ class VPCSVM(BaseNode):
await self._start_ubridge()
if nio:
- await self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
+ await self.add_ubridge_udp_connection(f"VPCS-{self._id}", self._local_udp_tunnel[1], nio)
await self.start_wrap_console()
- log.info("VPCS instance {} started PID={}".format(self.name, self._process.pid))
+ log.info(f"VPCS instance {self.name} started PID={self._process.pid}")
self._started = True
self.status = "started"
except (OSError, subprocess.SubprocessError) as e:
vpcs_stdout = self.read_vpcs_stdout()
- log.error("Could not start VPCS {}: {}\n{}".format(self._vpcs_path(), e, vpcs_stdout))
- raise VPCSError("Could not start VPCS {}: {}\n{}".format(self._vpcs_path(), e, vpcs_stdout))
+ log.error(f"Could not start VPCS {self._vpcs_path()}: {e}\n{vpcs_stdout}")
+ raise VPCSError(f"Could not start VPCS {self._vpcs_path()}: {e}\n{vpcs_stdout}")
async def _termination_callback(self, returncode):
"""
@@ -266,7 +265,7 @@ class VPCSVM(BaseNode):
await self._stop_ubridge()
await super().stop()
if returncode != 0:
- self.project.emit("log.error", {"message": "VPCS process has stopped, return code: {}\n{}".format(returncode, self.read_vpcs_stdout())})
+ self.project.emit("log.error", {"message": f"VPCS process has stopped, return code: {returncode}\n{self.read_vpcs_stdout()}"})
async def stop(self):
"""
@@ -284,9 +283,9 @@ class VPCSVM(BaseNode):
try:
self._process.kill()
except OSError as e:
- log.error("Cannot stop the VPCS process: {}".format(e))
+ log.error(f"Cannot stop the VPCS process: {e}")
if self._process.returncode is None:
- log.warning('VPCS VM "{}" with PID={} is still running'.format(self._name, self._process.pid))
+ log.warning(f'VPCS VM "{self._name}" with PID={self._process.pid} is still running')
self._process = None
self._started = False
@@ -305,7 +304,7 @@ class VPCSVM(BaseNode):
Terminate the process if running
"""
- log.info("Stopping VPCS instance {} PID={}".format(self.name, self._process.pid))
+ log.info(f"Stopping VPCS instance {self.name} PID={self._process.pid}")
if sys.platform.startswith("win32"):
try:
self._process.send_signal(signal.CTRL_BREAK_EVENT)
@@ -330,7 +329,7 @@ class VPCSVM(BaseNode):
with open(self._vpcs_stdout_file, "rb") as file:
output = file.read().decode("utf-8", errors="replace")
except OSError as e:
- log.warning("Could not read {}: {}".format(self._vpcs_stdout_file, e))
+ log.warning(f"Could not read {self._vpcs_stdout_file}: {e}")
return output
def is_running(self):
@@ -353,7 +352,7 @@ class VPCSVM(BaseNode):
"""
if self.is_running() and self.console_type != new_console_type:
- raise VPCSError('"{name}" must be stopped to change the console type to {new_console_type}'.format(name=self._name, new_console_type=new_console_type))
+ raise VPCSError(f'"{self._name}" must be stopped to change the console type to {new_console_type}')
super(VPCSVM, VPCSVM).console_type.__set__(self, new_console_type)
@@ -370,7 +369,7 @@ class VPCSVM(BaseNode):
port_number=port_number))
if self.is_running():
- await self.add_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
+ await self.add_ubridge_udp_connection(f"VPCS-{self._id}", self._local_udp_tunnel[1], nio)
self._ethernet_adapter.add_nio(port_number, nio)
log.info('VPCS "{name}" [{id}]: {nio} added to port {port_number}'.format(name=self._name,
@@ -392,7 +391,7 @@ class VPCSVM(BaseNode):
raise VPCSError("Port {port_number} doesn't exist on adapter {adapter}".format(adapter=self._ethernet_adapter,
port_number=port_number))
if self.is_running():
- await self.update_ubridge_udp_connection("VPCS-{}".format(self._id), self._local_udp_tunnel[1], nio)
+ await self.update_ubridge_udp_connection(f"VPCS-{self._id}", self._local_udp_tunnel[1], nio)
async def port_remove_nio_binding(self, port_number):
"""
@@ -409,7 +408,7 @@ class VPCSVM(BaseNode):
await self.stop_capture(port_number)
if self.is_running():
- await self._ubridge_send("bridge delete {name}".format(name="VPCS-{}".format(self._id)))
+ await self._ubridge_send("bridge delete {name}".format(name=f"VPCS-{self._id}"))
nio = self._ethernet_adapter.get_nio(port_number)
if isinstance(nio, NIOUDP):
@@ -436,7 +435,7 @@ class VPCSVM(BaseNode):
port_number=port_number))
nio = self._ethernet_adapter.get_nio(port_number)
if not nio:
- raise VPCSError("Port {} is not connected".format(port_number))
+ raise VPCSError(f"Port {port_number} is not connected")
return nio
async def start_capture(self, port_number, output_file):
@@ -449,11 +448,11 @@ class VPCSVM(BaseNode):
nio = self.get_nio(port_number)
if nio.capturing:
- raise VPCSError("Packet capture is already active on port {port_number}".format(port_number=port_number))
+ raise VPCSError(f"Packet capture is already active on port {port_number}")
nio.start_packet_capture(output_file)
if self.ubridge:
- await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name="VPCS-{}".format(self._id),
+ await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=f"VPCS-{self._id}",
output_file=output_file))
log.info("VPCS '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name,
@@ -473,7 +472,7 @@ class VPCSVM(BaseNode):
nio.stop_packet_capture()
if self.ubridge:
- await self._ubridge_send('bridge stop_capture {name}'.format(name="VPCS-{}".format(self._id)))
+ await self._ubridge_send('bridge stop_capture {name}'.format(name=f"VPCS-{self._id}"))
log.info("VPCS '{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name,
id=self.id,
@@ -536,7 +535,7 @@ class VPCSVM(BaseNode):
try:
command.extend(["-t", socket.gethostbyname(nio.rhost)]) # destination host, we need to resolve the hostname because VPCS doesn't support it
except socket.gaierror as e:
- raise VPCSError("Can't resolve hostname {}".format(nio.rhost))
+ raise VPCSError(f"Can't resolve hostname {nio.rhost}")
if self.script_file:
command.extend([os.path.basename(self.script_file)])
diff --git a/gns3server/config.py b/gns3server/config.py
index 2c30fdb2..cc048aa1 100644
--- a/gns3server/config.py
+++ b/gns3server/config.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GNS3 Technologies Inc.
#
@@ -61,7 +60,7 @@ class Config:
self._watch_callback = []
appname = "GNS3"
- version = "{}.{}".format(__version_info__[0], __version_info__[1])
+ version = f"{__version_info__[0]}.{__version_info__[1]}"
if sys.platform.startswith("win"):
@@ -132,7 +131,7 @@ class Config:
if not os.path.exists(new_server_config) and os.path.exists(old_server_config):
shutil.copyfile(old_server_config, new_server_config)
except OSError as e:
- log.error("Cannot migrate old config files: {}".format(e))
+ log.error(f"Cannot migrate old config files: {e}")
self._main_config_file = os.path.join(versioned_user_dir, server_filename)
for file in self._files:
diff --git a/gns3server/controller/__init__.py b/gns3server/controller/__init__.py
index c9f36759..1018cdd4 100644
--- a/gns3server/controller/__init__.py
+++ b/gns3server/controller/__init__.py
@@ -89,7 +89,7 @@ class Controller:
protocol = server_config.protocol
if self._ssl_context and protocol != "https":
- log.warning("Protocol changed to 'https' for local compute because SSL is enabled".format(port))
+ log.warning(f"Protocol changed to 'https' for local compute because SSL is enabled")
protocol = "https"
try:
self._local_server = await self.add_compute(compute_id="local",
@@ -104,7 +104,7 @@ class Controller:
connect=True,
ssl_context=self._ssl_context)
except ControllerError:
- log.fatal("Cannot access to the local server, make sure something else is not running on the TCP port {}".format(port))
+ log.fatal(f"Cannot access to the local server, make sure something else is not running on the TCP port {port}")
sys.exit(1)
if computes:
@@ -134,7 +134,7 @@ class Controller:
log.critical("Could not find the SSL certfile or certkey")
raise SystemExit
except ssl.SSLError as e:
- log.critical("SSL error: {}".format(e))
+ log.critical(f"SSL error: {e}")
raise SystemExit
return ssl_context
@@ -251,7 +251,7 @@ class Controller:
if os.path.exists(iourc_path):
try:
- with open(iourc_path, 'r') as f:
+ with open(iourc_path) as f:
self._iou_license_settings["iourc_content"] = f.read()
log.info(f"iourc file '{iourc_path}' loaded")
except OSError as e:
@@ -342,7 +342,7 @@ class Controller:
for compute in self._computes.values():
if name and compute.name == name and not force:
- raise ControllerError('Compute name "{}" already exists'.format(name))
+ raise ControllerError(f'Compute name "{name}" already exists')
compute = Compute(compute_id=compute_id, controller=self, name=name, **kwargs)
self._computes[compute.id] = compute
@@ -421,7 +421,7 @@ class Controller:
except KeyError:
if compute_id == "vm":
raise ControllerNotFoundError("Cannot use a node on the GNS3 VM server with the GNS3 VM not configured")
- raise ControllerNotFoundError("Compute ID {} doesn't exist".format(compute_id))
+ raise ControllerNotFoundError(f"Compute ID {compute_id} doesn't exist")
def has_compute(self, compute_id):
"""
@@ -443,9 +443,9 @@ class Controller:
for project in self._projects.values():
if name and project.name == name:
if path and path == project.path:
- raise ControllerError('Project "{}" already exists in location "{}"'.format(name, path))
+ raise ControllerError(f'Project "{name}" already exists in location "{path}"')
else:
- raise ControllerError('Project "{}" already exists'.format(name))
+ raise ControllerError(f'Project "{name}" already exists')
project = Project(project_id=project_id, controller=self, name=name, path=path, **kwargs)
self._projects[project.id] = project
return self._projects[project.id]
@@ -459,7 +459,7 @@ class Controller:
try:
return self._projects[project_id]
except KeyError:
- raise ControllerNotFoundError("Project ID {} doesn't exist".format(project_id))
+ raise ControllerNotFoundError(f"Project ID {project_id} doesn't exist")
async def get_loaded_project(self, project_id):
"""
@@ -521,7 +521,7 @@ class Controller:
projects_path = self.projects_directory()
while True:
- new_name = "{}-{}".format(base_name, i)
+ new_name = f"{base_name}-{i}"
if new_name not in names and not os.path.exists(os.path.join(projects_path, new_name)):
break
i += 1
diff --git a/gns3server/controller/appliance.py b/gns3server/controller/appliance.py
index 6664d7ec..a1a27a36 100644
--- a/gns3server/controller/appliance.py
+++ b/gns3server/controller/appliance.py
@@ -37,7 +37,7 @@ class Appliance:
del self._data["appliance_id"]
if self.status != 'broken':
- log.debug('Appliance "{name}" [{id}] loaded'.format(name=self.name, id=self._id))
+ log.debug(f'Appliance "{self.name}" [{self._id}] loaded')
@property
def id(self):
diff --git a/gns3server/controller/appliance_manager.py b/gns3server/controller/appliance_manager.py
index 9fa316f0..bf539906 100644
--- a/gns3server/controller/appliance_manager.py
+++ b/gns3server/controller/appliance_manager.py
@@ -90,7 +90,7 @@ class ApplianceManager:
path = os.path.join(directory, file)
appliance_id = uuid.uuid3(uuid.NAMESPACE_URL, path) # Generate UUID from path to avoid change between reboots
try:
- with open(path, 'r', encoding='utf-8') as f:
+ with open(path, encoding='utf-8') as f:
appliance = Appliance(appliance_id, json.load(f), builtin=builtin)
json_data = appliance.__json__() # Check if loaded without error
if appliance.status != 'broken':
@@ -142,20 +142,20 @@ class ApplianceManager:
Download a custom appliance symbol from our GitHub registry repository.
"""
- symbol_url = "https://raw.githubusercontent.com/GNS3/gns3-registry/master/symbols/{}".format(symbol)
+ symbol_url = f"https://raw.githubusercontent.com/GNS3/gns3-registry/master/symbols/{symbol}"
async with HTTPClient.get(symbol_url) as response:
if response.status != 200:
- log.warning("Could not retrieve appliance symbol {} from GitHub due to HTTP error code {}".format(symbol, response.status))
+ log.warning(f"Could not retrieve appliance symbol {symbol} from GitHub due to HTTP error code {response.status}")
else:
try:
symbol_data = await response.read()
- log.info("Saving {} symbol to {}".format(symbol, destination_path))
+ log.info(f"Saving {symbol} symbol to {destination_path}")
with open(destination_path, 'wb') as f:
f.write(symbol_data)
except asyncio.TimeoutError:
- log.warning("Timeout while downloading '{}'".format(symbol_url))
+ log.warning(f"Timeout while downloading '{symbol_url}'")
except OSError as e:
- log.warning("Could not write appliance symbol '{}': {}".format(destination_path, e))
+ log.warning(f"Could not write appliance symbol '{destination_path}': {e}")
@locking
async def download_appliances(self):
@@ -166,15 +166,15 @@ class ApplianceManager:
try:
headers = {}
if self._appliances_etag:
- log.info("Checking if appliances are up-to-date (ETag {})".format(self._appliances_etag))
+ log.info(f"Checking if appliances are up-to-date (ETag {self._appliances_etag})")
headers["If-None-Match"] = self._appliances_etag
async with HTTPClient.get('https://api.github.com/repos/GNS3/gns3-registry/contents/appliances', headers=headers) as response:
if response.status == 304:
- log.info("Appliances are already up-to-date (ETag {})".format(self._appliances_etag))
+ log.info(f"Appliances are already up-to-date (ETag {self._appliances_etag})")
return
elif response.status != 200:
- raise ControllerError("Could not retrieve appliances from GitHub due to HTTP error code {}".format(response.status))
+ raise ControllerError(f"Could not retrieve appliances from GitHub due to HTTP error code {response.status}")
etag = response.headers.get("ETag")
if etag:
self._appliances_etag = etag
@@ -198,11 +198,11 @@ class ApplianceManager:
continue
path = os.path.join(appliances_dir, appliance_name)
try:
- log.info("Saving {} file to {}".format(appliance_name, path))
+ log.info(f"Saving {appliance_name} file to {path}")
with open(path, 'wb') as f:
f.write(appliance_data)
except OSError as e:
- raise ControllerError("Could not write appliance file '{}': {}".format(path, e))
+ raise ControllerError(f"Could not write appliance file '{path}': {e}")
downloaded_appliance_files.append(appliance_name)
# delete old appliance files
@@ -212,14 +212,14 @@ class ApplianceManager:
continue
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
- log.info("Deleting old appliance file {}".format(file_path))
+ log.info(f"Deleting old appliance file {file_path}")
os.unlink(file_path)
except OSError as e:
- log.warning("Could not delete old appliance file '{}': {}".format(file_path, e))
+ log.warning(f"Could not delete old appliance file '{file_path}': {e}")
continue
except ValueError as e:
- raise ControllerError("Could not read appliances information from GitHub: {}".format(e))
+ raise ControllerError(f"Could not read appliances information from GitHub: {e}")
# download the custom symbols
await self.download_custom_symbols()
diff --git a/gns3server/controller/compute.py b/gns3server/controller/compute.py
index 0dc7b1bc..8d7f9897 100644
--- a/gns3server/controller/compute.py
+++ b/gns3server/controller/compute.py
@@ -190,9 +190,9 @@ class Compute:
# Due to random user generated by 1.4 it's common to have a very long user
if len(user) > 14:
user = user[:11] + "..."
- self._name = "{}://{}@{}:{}".format(self._protocol, user, self._host, self._port)
+ self._name = f"{self._protocol}://{user}@{self._host}:{self._port}"
else:
- self._name = "{}://{}:{}".format(self._protocol, self._host, self._port)
+ self._name = f"{self._protocol}://{self._host}:{self._port}"
@property
def connected(self):
@@ -321,10 +321,10 @@ class Compute:
:returns: A file stream
"""
- url = self._getUrl("/projects/{}/files/{}".format(project.id, path))
+ url = self._getUrl(f"/projects/{project.id}/files/{path}")
response = await self._session().request("GET", url, auth=self._auth)
if response.status == 404:
- raise ControllerNotFoundError("{} not found on compute".format(path))
+ raise ControllerNotFoundError(f"{path} not found on compute")
return response
async def download_image(self, image_type, image):
@@ -336,10 +336,10 @@ class Compute:
:returns: A file stream
"""
- url = self._getUrl("/{}/images/{}".format(image_type, image))
+ url = self._getUrl(f"/{image_type}/images/{image}")
response = await self._session().request("GET", url, auth=self._auth)
if response.status == 404:
- raise ControllerNotFoundError("{} not found on compute".format(image))
+ raise ControllerNotFoundError(f"{image} not found on compute")
return response
async def http_query(self, method, path, data=None, dont_connect=False, **kwargs):
@@ -352,7 +352,7 @@ class Compute:
await self._controller.gns3vm.start()
await self.connect()
if not self._connected and not dont_connect:
- raise ComputeError("Cannot connect to compute '{}' with request {} {}".format(self._name, method, path))
+ raise ComputeError(f"Cannot connect to compute '{self._name}' with request {method} {path}")
response = await self._run_http_query(method, path, data=data, **kwargs)
return response
@@ -373,33 +373,33 @@ class Compute:
if not self._connected and not self._closed and self.host:
try:
- log.info("Connecting to compute '{}'".format(self._id))
+ log.info(f"Connecting to compute '{self._id}'")
response = await self._run_http_query("GET", "/capabilities")
except ComputeError as e:
- log.warning("Cannot connect to compute '{}': {}".format(self._id, e))
+ log.warning(f"Cannot connect to compute '{self._id}': {e}")
# Try to reconnect after 5 seconds if server unavailable only if not during tests (otherwise we create a ressource usage bomb)
if not hasattr(sys, "_called_from_test") or not sys._called_from_test:
if self.id != "local" and self.id != "vm" and not self._controller.compute_has_open_project(self):
- log.warning("Not reconnecting to compute '{}' because there is no project opened on it".format(self._id))
+ log.warning(f"Not reconnecting to compute '{self._id}' because there is no project opened on it")
return
self._connection_failure += 1
# After 5 failure we close the project using the compute to avoid sync issues
if self._connection_failure == 10:
- log.error("Could not connect to compute '{}' after multiple attempts: {}".format(self._id, e))
+ log.error(f"Could not connect to compute '{self._id}' after multiple attempts: {e}")
await self._controller.close_compute_projects(self)
asyncio.get_event_loop().call_later(5, lambda: asyncio.ensure_future(self._try_reconnect()))
return
except web.HTTPNotFound:
- raise ControllerNotFoundError("The server {} is not a GNS3 server or it's a 1.X server".format(self._id))
+ raise ControllerNotFoundError(f"The server {self._id} is not a GNS3 server or it's a 1.X server")
except web.HTTPUnauthorized:
- raise ControllerUnauthorizedError("Invalid auth for server {}".format(self._id))
+ raise ControllerUnauthorizedError(f"Invalid auth for server {self._id}")
except web.HTTPServiceUnavailable:
- raise ControllerNotFoundError("The server {} is unavailable".format(self._id))
+ raise ControllerNotFoundError(f"The server {self._id} is unavailable")
except ValueError:
- raise ComputeError("Invalid server url for server {}".format(self._id))
+ raise ComputeError(f"Invalid server url for server {self._id}")
if "version" not in response.json:
- msg = "The server {} is not a GNS3 server".format(self._id)
+ msg = f"The server {self._id} is not a GNS3 server"
log.error(msg)
await self._http_session.close()
raise ControllerNotFoundError(msg)
@@ -426,7 +426,7 @@ class Compute:
self._last_error = msg
raise ControllerError(msg)
else:
- msg = "{}\nUsing different versions may result in unexpected problems. Please use at your own risk.".format(msg)
+ msg = f"{msg}\nUsing different versions may result in unexpected problems. Please use at your own risk."
self._controller.notification.controller_emit("log.warning", {"message": msg})
self._notifications = asyncio.gather(self._connect_notification())
@@ -443,7 +443,7 @@ class Compute:
ws_url = self._getUrl("/notifications/ws")
try:
async with self._session().ws_connect(ws_url, auth=self._auth, heartbeat=10) as ws:
- log.info("Connected to compute '{}' WebSocket '{}'".format(self._id, ws_url))
+ log.info(f"Connected to compute '{self._id}' WebSocket '{ws_url}'")
async for response in ws:
if response.type == aiohttp.WSMsgType.TEXT:
msg = json.loads(response.data)
@@ -462,19 +462,19 @@ class Compute:
if response.type == aiohttp.WSMsgType.CLOSE:
await ws.close()
elif response.type == aiohttp.WSMsgType.ERROR:
- log.error("Error received on compute '{}' WebSocket '{}': {}".format(self._id, ws_url, ws.exception()))
+ log.error(f"Error received on compute '{self._id}' WebSocket '{ws_url}': {ws.exception()}")
elif response.type == aiohttp.WSMsgType.CLOSED:
pass
break
except aiohttp.ClientError as e:
- log.error("Client response error received on compute '{}' WebSocket '{}': {}".format(self._id, ws_url,e))
+ log.error(f"Client response error received on compute '{self._id}' WebSocket '{ws_url}': {e}")
finally:
self._connected = False
- log.info("Connection closed to compute '{}' WebSocket '{}'".format(self._id, ws_url))
+ log.info(f"Connection closed to compute '{self._id}' WebSocket '{ws_url}'")
# Try to reconnect after 1 second if server unavailable only if not during tests (otherwise we create a ressources usage bomb)
if self.id != "local" and not hasattr(sys, "_called_from_test") or not sys._called_from_test:
- log.info("Reconnecting to to compute '{}' WebSocket '{}'".format(self._id, ws_url))
+ log.info(f"Reconnecting to to compute '{self._id}' WebSocket '{ws_url}'")
asyncio.get_event_loop().call_later(1, lambda: asyncio.ensure_future(self.connect()))
self._cpu_usage_percent = None
@@ -492,10 +492,10 @@ class Compute:
host = str(ipaddress.IPv6Address(host))
if host == "::":
host = "::1"
- host = "[{}]".format(host)
+ host = f"[{host}]"
elif host == "0.0.0.0":
host = "127.0.0.1"
- return "{}://{}:{}/v3/compute{}".format(self._protocol, host, self._port, path)
+ return f"{self._protocol}://{host}:{self._port}/v3/compute{path}"
def get_url(self, path):
""" Returns URL for specific path at Compute"""
@@ -524,10 +524,10 @@ class Compute:
else:
data = json.dumps(data).encode("utf-8")
try:
- log.debug("Attempting request to compute: {method} {url} {headers}".format(method=method, url=url, headers=headers))
+ log.debug(f"Attempting request to compute: {method} {url} {headers}")
response = await self._session().request(method, url, headers=headers, data=data, auth=self._auth, chunked=chunked, timeout=timeout)
except asyncio.TimeoutError:
- raise ComputeError("Timeout error for {} call to {} after {}s".format(method, url, timeout))
+ raise ComputeError(f"Timeout error for {method} call to {url} after {timeout}s")
except (aiohttp.ClientError, aiohttp.ServerDisconnectedError, aiohttp.ClientResponseError, ValueError, KeyError, socket.gaierror) as e:
# aiohttp 2.3.1 raises socket.gaierror when cannot find host
raise ComputeError(str(e))
@@ -546,13 +546,13 @@ class Compute:
msg = ""
if response.status == 401:
- raise ControllerUnauthorizedError("Invalid authentication for compute {}".format(self.id))
+ raise ControllerUnauthorizedError(f"Invalid authentication for compute {self.id}")
elif response.status == 403:
raise ControllerForbiddenError(msg)
elif response.status == 404:
- raise ControllerNotFoundError("{} {} not found".format(method, path))
+ raise ControllerNotFoundError(f"{method} {path} not found")
elif response.status == 408 or response.status == 504:
- raise ControllerTimeoutError("{} {} request timeout".format(method, path))
+ raise ControllerTimeoutError(f"{method} {path} request timeout")
elif response.status == 409:
try:
raise ComputeConflict(json.loads(body))
@@ -560,11 +560,11 @@ class Compute:
except ValueError:
raise ControllerError(msg)
elif response.status == 500:
- raise aiohttp.web.HTTPInternalServerError(text="Internal server error {}".format(url))
+ raise aiohttp.web.HTTPInternalServerError(text=f"Internal server error {url}")
elif response.status == 503:
- raise aiohttp.web.HTTPServiceUnavailable(text="Service unavailable {} {}".format(url, body))
+ raise aiohttp.web.HTTPServiceUnavailable(text=f"Service unavailable {url} {body}")
else:
- raise NotImplementedError("{} status code is not supported for {} '{}'".format(response.status, method, url))
+ raise NotImplementedError(f"{response.status} status code is not supported for {method} '{url}'")
if body and len(body):
if raw:
response.body = body
@@ -572,7 +572,7 @@ class Compute:
try:
response.json = json.loads(body)
except ValueError:
- raise ControllerError("The server {} is not a GNS3 server".format(self._id))
+ raise ControllerError(f"The server {self._id} is not a GNS3 server")
else:
response.json = {}
response.body = b""
@@ -597,7 +597,7 @@ class Compute:
Forward a call to the emulator on compute
"""
try:
- action = "/{}/{}".format(type, path)
+ action = f"/{type}/{path}"
res = await self.http_query(method, action, data=data, timeout=None)
except aiohttp.ServerDisconnectedError:
raise ControllerError(f"Connection lost to {self._id} during {method} {action}")
@@ -609,7 +609,7 @@ class Compute:
"""
images = []
- res = await self.http_query("GET", "/{}/images".format(type), timeout=None)
+ res = await self.http_query("GET", f"/{type}/images", timeout=None)
images = res.json
try:
@@ -621,14 +621,14 @@ class Compute:
else:
images = sorted(images, key=itemgetter('image'))
except OSError as e:
- raise ComputeError("Cannot list images: {}".format(str(e)))
+ raise ComputeError(f"Cannot list images: {str(e)}")
return images
async def list_files(self, project):
"""
List files in the project on computes
"""
- path = "/projects/{}/files".format(project.id)
+ path = f"/projects/{project.id}/files"
res = await self.http_query("GET", path, timeout=None)
return res.json
@@ -676,4 +676,4 @@ class Compute:
if this_network.overlaps(other_network):
return this_interface["ip_address"], other_interface["ip_address"]
- raise ValueError("No common subnet for compute {} and {}".format(self.name, other_compute.name))
+ raise ValueError(f"No common subnet for compute {self.name} and {other_compute.name}")
diff --git a/gns3server/controller/drawing.py b/gns3server/controller/drawing.py
index de929b2f..b7c25f32 100644
--- a/gns3server/controller/drawing.py
+++ b/gns3server/controller/drawing.py
@@ -96,7 +96,7 @@ class Drawing:
try:
root = ET.fromstring(value)
except ET.ParseError as e:
- log.error("Can't parse SVG: {}".format(e))
+ log.error(f"Can't parse SVG: {e}")
return
# SVG is the default namespace no need to prefix it
ET.register_namespace('xmlns', "http://www.w3.org/2000/svg")
@@ -222,4 +222,4 @@ class Drawing:
}
def __repr__(self):
- return "".format(self._id)
+ return f""
diff --git a/gns3server/controller/export_project.py b/gns3server/controller/export_project.py
index d7aea015..fe947558 100644
--- a/gns3server/controller/export_project.py
+++ b/gns3server/controller/export_project.py
@@ -58,7 +58,7 @@ async def export_project(zstream, project, temporary_dir, include_images=False,
project.dump()
if not os.path.exists(project._path):
- raise ControllerNotFoundError("Project could not be found at '{}'".format(project._path))
+ raise ControllerNotFoundError(f"Project could not be found at '{project._path}'")
# First we process the .gns3 in order to be sure we don't have an error
for file in os.listdir(project._path):
@@ -75,7 +75,7 @@ async def export_project(zstream, project, temporary_dir, include_images=False,
try:
open(path).close()
except OSError as e:
- msg = "Could not export file {}: {}".format(path, e)
+ msg = f"Could not export file {path}: {e}"
log.warning(msg)
project.emit_notification("log.warning", {"message": msg})
continue
@@ -85,7 +85,7 @@ async def export_project(zstream, project, temporary_dir, include_images=False,
_patch_mtime(path)
zstream.write(path, os.path.relpath(path, project._path))
except FileNotFoundError as e:
- log.warning("Cannot export local file: {}".format(e))
+ log.warning(f"Cannot export local file: {e}")
continue
# Export files from remote computes
@@ -97,7 +97,7 @@ async def export_project(zstream, project, temporary_dir, include_images=False,
log.debug("Downloading file '{}' from compute '{}'".format(compute_file["path"], compute.id))
response = await compute.download_file(project, compute_file["path"])
if response.status != 200:
- log.warning("Cannot export file from compute '{}'. Compute returned status code {}.".format(compute.id, response.status))
+ log.warning(f"Cannot export file from compute '{compute.id}'. Compute returned status code {response.status}.")
continue
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
async with aiofiles.open(fd, 'wb') as f:
@@ -181,7 +181,7 @@ async def _patch_project_file(project, path, zstream, include_images, keep_compu
with open(path) as f:
topology = json.load(f)
except (OSError, ValueError) as e:
- raise ControllerError("Project file '{}' cannot be read: {}".format(path, e))
+ raise ControllerError(f"Project file '{path}' cannot be read: {e}")
if "topology" in topology:
if "nodes" in topology["topology"]:
@@ -224,14 +224,14 @@ async def _patch_project_file(project, path, zstream, include_images, keep_compu
if not keep_compute_id:
topology["topology"]["computes"] = [] # Strip compute information because could contain secret info like password
- local_images = set([i['image'] for i in images if i['compute_id'] == 'local'])
+ local_images = {i['image'] for i in images if i['compute_id'] == 'local'}
for image in local_images:
_export_local_image(image, zstream)
- remote_images = set([
+ remote_images = {
(i['compute_id'], i['image_type'], i['image'])
- for i in images if i['compute_id'] != 'local'])
+ for i in images if i['compute_id'] != 'local'}
for compute_id, image_type, image in remote_images:
await _export_remote_images(project, compute_id, image_type, image, zstream, temporary_dir)
@@ -274,15 +274,15 @@ async def _export_remote_images(project, compute_id, image_type, image, project_
Export specific image from remote compute.
"""
- log.debug("Downloading image '{}' from compute '{}'".format(image, compute_id))
+ log.debug(f"Downloading image '{image}' from compute '{compute_id}'")
try:
compute = [compute for compute in project.computes if compute.id == compute_id][0]
except IndexError:
- raise ControllerNotFoundError("Cannot export image from '{}' compute. Compute doesn't exist.".format(compute_id))
+ raise ControllerNotFoundError(f"Cannot export image from '{compute_id}' compute. Compute doesn't exist.")
response = await compute.download_image(image_type, image)
if response.status != 200:
- raise ControllerError("Cannot export image from compute '{}'. Compute returned status code {}.".format(compute_id, response.status))
+ raise ControllerError(f"Cannot export image from compute '{compute_id}'. Compute returned status code {response.status}.")
(fd, temp_path) = tempfile.mkstemp(dir=temporary_dir)
async with aiofiles.open(fd, 'wb') as f:
@@ -290,7 +290,7 @@ async def _export_remote_images(project, compute_id, image_type, image, project_
try:
data = await response.content.read(CHUNK_SIZE)
except asyncio.TimeoutError:
- raise ControllerTimeoutError("Timeout when downloading image '{}' from remote compute {}:{}".format(image, compute.host, compute.port))
+ raise ControllerTimeoutError(f"Timeout when downloading image '{image}' from remote compute {compute.host}:{compute.port}")
if not data:
break
await f.write(data)
diff --git a/gns3server/controller/gns3vm/__init__.py b/gns3server/controller/gns3vm/__init__.py
index 0de1a793..4b5da724 100644
--- a/gns3server/controller/gns3vm/__init__.py
+++ b/gns3server/controller/gns3vm/__init__.py
@@ -63,7 +63,7 @@ class GNS3VM:
download_url = "https://github.com/GNS3/gns3-gui/releases/download/v{version}/GNS3.VM.VMware.Workstation.{version}.zip".format(version=__version__)
vmware_info = {
"engine_id": "vmware",
- "description": 'VMware is the recommended choice for best performances.
The GNS3 VM can be downloaded here.'.format(download_url),
+ "description": f'VMware is the recommended choice for best performances.
The GNS3 VM can be downloaded here.',
"support_when_exit": True,
"support_headless": True,
"support_ram": True
@@ -77,7 +77,7 @@ class GNS3VM:
hyperv_info = {
"engine_id": "hyper-v",
"name": "Hyper-V",
- "description": 'Hyper-V support (Windows 10/Server 2016 and above). Nested virtualization must be supported and enabled (Intel processor only)
The GNS3 VM can be downloaded here'.format(download_url),
+ "description": f'Hyper-V support (Windows 10/Server 2016 and above). Nested virtualization must be supported and enabled (Intel processor only)
The GNS3 VM can be downloaded here',
"support_when_exit": True,
"support_headless": False,
"support_ram": True
@@ -87,7 +87,7 @@ class GNS3VM:
virtualbox_info = {
"engine_id": "virtualbox",
"name": "VirtualBox",
- "description": 'VirtualBox support. Nested virtualization for both Intel and AMD processors is supported since version 6.1
The GNS3 VM can be downloaded here'.format(download_url),
+ "description": f'VirtualBox support. Nested virtualization for both Intel and AMD processors is supported since version 6.1
The GNS3 VM can be downloaded here',
"support_when_exit": True,
"support_headless": True,
"support_ram": True
@@ -246,7 +246,7 @@ class GNS3VM:
elif engine == "remote":
self._engines["remote"] = RemoteGNS3VM(self._controller)
return self._engines["remote"]
- raise NotImplementedError("The engine {} for the GNS3 VM is not supported".format(engine))
+ raise NotImplementedError(f"The engine {engine} for the GNS3 VM is not supported")
def __json__(self):
return self._settings
@@ -281,14 +281,14 @@ class GNS3VM:
# User will receive the error later when they will try to use the node
try:
compute = await self._controller.add_compute(compute_id="vm",
- name="GNS3 VM ({})".format(self.current_engine().vmname),
+ name=f"GNS3 VM ({self.current_engine().vmname})",
host=None,
force=True)
compute.set_last_error(str(e))
except ControllerError:
pass
- log.error("Cannot start the GNS3 VM: {}".format(e))
+ log.error(f"Cannot start the GNS3 VM: {e}")
async def exit_vm(self):
@@ -319,7 +319,7 @@ class GNS3VM:
engine.headless = self._settings["headless"]
engine.port = self._settings["port"]
compute = await self._controller.add_compute(compute_id="vm",
- name="GNS3 VM is starting ({})".format(engine.vmname),
+ name=f"GNS3 VM is starting ({engine.vmname})",
host=None,
force=True,
connect=False)
@@ -328,12 +328,12 @@ class GNS3VM:
await engine.start()
except Exception as e:
await self._controller.delete_compute("vm")
- log.error("Cannot start the GNS3 VM: {}".format(str(e)))
- await compute.update(name="GNS3 VM ({})".format(engine.vmname))
+ log.error(f"Cannot start the GNS3 VM: {str(e)}")
+ await compute.update(name=f"GNS3 VM ({engine.vmname})")
compute.set_last_error(str(e))
raise e
await compute.connect() # we can connect now that the VM has started
- await compute.update(name="GNS3 VM ({})".format(engine.vmname),
+ await compute.update(name=f"GNS3 VM ({engine.vmname})",
protocol=self.protocol,
host=self.ip_address,
port=self.port,
@@ -357,7 +357,7 @@ class GNS3VM:
vm_interface_netmask = interface["netmask"]
break
if vm_interface_netmask:
- vm_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, vm_interface_netmask)).network
+ vm_network = ipaddress.ip_interface(f"{compute.host_ip}/{vm_interface_netmask}").network
for compute_id in self._controller.computes:
if compute_id == "local":
compute = self._controller.get_compute(compute_id)
@@ -368,7 +368,7 @@ class GNS3VM:
netmask = interface["netmask"]
break
if netmask:
- compute_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, netmask)).network
+ compute_network = ipaddress.ip_interface(f"{compute.host_ip}/{netmask}").network
if vm_network.compare_networks(compute_network) != 0:
msg = "The GNS3 VM (IP={}, NETWORK={}) is not on the same network as the {} server (IP={}, NETWORK={}), please make sure the local server binding is in the same network as the GNS3 VM".format(self.ip_address,
vm_network,
@@ -377,9 +377,9 @@ class GNS3VM:
compute_network)
self._controller.notification.controller_emit("log.warning", {"message": msg})
except ComputeError as e:
- log.warning("Could not check the VM is in the same subnet as the local server: {}".format(e))
+ log.warning(f"Could not check the VM is in the same subnet as the local server: {e}")
except ControllerError as e:
- log.warning("Could not check the VM is in the same subnet as the local server: {}".format(e))
+ log.warning(f"Could not check the VM is in the same subnet as the local server: {e}")
@locking
async def _suspend(self):
diff --git a/gns3server/controller/gns3vm/gns3_vm_error.py b/gns3server/controller/gns3vm/gns3_vm_error.py
index 0a53cb3f..b72605a6 100644
--- a/gns3server/controller/gns3vm/gns3_vm_error.py
+++ b/gns3server/controller/gns3vm/gns3_vm_error.py
@@ -26,4 +26,4 @@ class GNS3VMError(Exception):
return self._message
def __str__(self):
- return "GNS3VM: {}".format(self._message)
+ return f"GNS3VM: {self._message}"
diff --git a/gns3server/controller/gns3vm/hyperv_gns3_vm.py b/gns3server/controller/gns3vm/hyperv_gns3_vm.py
index 45186cdf..df3eea48 100644
--- a/gns3server/controller/gns3vm/hyperv_gns3_vm.py
+++ b/gns3server/controller/gns3vm/hyperv_gns3_vm.py
@@ -55,7 +55,7 @@ class HyperVGNS3VM(BaseGNS3VM):
raise GNS3VMError("Hyper-V is only supported on Windows")
if sys.getwindowsversion().platform_version[0] < 10:
- raise GNS3VMError("Windows 10/Windows Server 2016 or a later version is required to run Hyper-V with nested virtualization enabled (version {} detected)".format(sys.getwindowsversion().platform_version[0]))
+ raise GNS3VMError(f"Windows 10/Windows Server 2016 or a later version is required to run Hyper-V with nested virtualization enabled (version {sys.getwindowsversion().platform_version[0]} detected)")
is_windows_10 = sys.getwindowsversion().platform_version[0] == 10 and sys.getwindowsversion().platform_version[1] == 0
@@ -69,7 +69,7 @@ class HyperVGNS3VM(BaseGNS3VM):
self._wmi = wmi
conn = self._wmi.WMI()
except self._wmi.x_wmi as e:
- raise GNS3VMError("Could not connect to WMI: {}".format(e))
+ raise GNS3VMError(f"Could not connect to WMI: {e}")
if not conn.Win32_ComputerSystem()[0].HypervisorPresent:
raise GNS3VMError("Hyper-V is not installed or activated")
@@ -95,7 +95,7 @@ class HyperVGNS3VM(BaseGNS3VM):
try:
self._conn = self._wmi.WMI(namespace=r"root\virtualization\v2")
except self._wmi.x_wmi as e:
- raise GNS3VMError("Could not connect to WMI: {}".format(e))
+ raise GNS3VMError(f"Could not connect to WMI: {e}")
if not self._conn.Msvm_VirtualSystemManagementService():
raise GNS3VMError("The Windows account running GNS3 does not have the required permissions for Hyper-V")
@@ -114,7 +114,7 @@ class HyperVGNS3VM(BaseGNS3VM):
if nb_vms == 0:
return None
elif nb_vms > 1:
- raise GNS3VMError("Duplicate VM name found for {}".format(vm_name))
+ raise GNS3VMError(f"Duplicate VM name found for {vm_name}")
else:
return vms[0]
@@ -158,7 +158,7 @@ class HyperVGNS3VM(BaseGNS3VM):
available_vcpus = psutil.cpu_count(logical=False)
if vcpus > available_vcpus:
- raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus))
+ raise GNS3VMError(f"You have allocated too many vCPUs for the GNS3 VM! (max available is {available_vcpus} vCPUs)")
try:
mem_settings = self._get_vm_resources(self._vm, 'Msvm_MemorySettingData')[0]
@@ -175,9 +175,9 @@ class HyperVGNS3VM(BaseGNS3VM):
cpu_settings.ExposeVirtualizationExtensions = True # allow the VM to use nested virtualization
self._management.ModifyResourceSettings(ResourceSettings=[cpu_settings.GetText_(1)])
- log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram))
+ log.info(f"GNS3 VM vCPU count set to {vcpus} and RAM amount set to {ram}")
except Exception as e:
- raise GNS3VMError("Could not set to {} and RAM amount set to {}: {}".format(vcpus, ram, e))
+ raise GNS3VMError(f"Could not set to {vcpus} and RAM amount set to {ram}: {e}")
async def list(self):
"""
@@ -193,7 +193,7 @@ class HyperVGNS3VM(BaseGNS3VM):
if vm.ElementName != self._management.SystemName:
vms.append({"vmname": vm.ElementName})
except self._wmi.x_wmi as e:
- raise GNS3VMError("Could not list Hyper-V VMs: {}".format(e))
+ raise GNS3VMError(f"Could not list Hyper-V VMs: {e}")
return vms
def _get_wmi_obj(self, path):
@@ -211,7 +211,7 @@ class HyperVGNS3VM(BaseGNS3VM):
if not self._vm:
self._vm = self._find_vm(self.vmname)
if not self._vm:
- raise GNS3VMError("Could not find Hyper-V VM {}".format(self.vmname))
+ raise GNS3VMError(f"Could not find Hyper-V VM {self.vmname}")
job_path, ret = self._vm.RequestStateChange(state)
if ret == HyperVGNS3VM._WMI_JOB_STATUS_STARTED:
job = self._get_wmi_obj(job_path)
@@ -219,9 +219,9 @@ class HyperVGNS3VM(BaseGNS3VM):
await asyncio.sleep(0.1)
job = self._get_wmi_obj(job_path)
if job.JobState != HyperVGNS3VM._WMI_JOB_STATE_COMPLETED:
- raise GNS3VMError("Error while changing state: {}".format(job.ErrorSummaryDescription))
+ raise GNS3VMError(f"Error while changing state: {job.ErrorSummaryDescription}")
elif ret != 0 or ret != 32775:
- raise GNS3VMError("Failed to change state to {}".format(state))
+ raise GNS3VMError(f"Failed to change state to {state}")
async def _is_vm_network_active(self):
"""
@@ -248,7 +248,7 @@ class HyperVGNS3VM(BaseGNS3VM):
self._vm = self._find_vm(self.vmname)
if not self._vm:
- raise GNS3VMError("Could not find Hyper-V VM {}".format(self.vmname))
+ raise GNS3VMError(f"Could not find Hyper-V VM {self.vmname}")
if not self._is_running():
if self.allocate_vcpus_ram:
@@ -260,7 +260,7 @@ class HyperVGNS3VM(BaseGNS3VM):
try:
await self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_ENABLED)
except GNS3VMError as e:
- raise GNS3VMError("Failed to start the GNS3 VM: {}".format(e))
+ raise GNS3VMError(f"Failed to start the GNS3 VM: {e}")
log.info("GNS3 VM has been started")
# check if VM network is active
@@ -296,10 +296,10 @@ class HyperVGNS3VM(BaseGNS3VM):
if guest_ip_address:
break
elif trial == 0:
- raise GNS3VMError("Could not find guest IP address for {}".format(self.vmname))
+ raise GNS3VMError(f"Could not find guest IP address for {self.vmname}")
await asyncio.sleep(1)
self.ip_address = guest_ip_address
- log.info("GNS3 VM IP address set to {}".format(guest_ip_address))
+ log.info(f"GNS3 VM IP address set to {guest_ip_address}")
self.running = True
async def suspend(self):
@@ -310,7 +310,7 @@ class HyperVGNS3VM(BaseGNS3VM):
try:
await self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_PAUSED)
except GNS3VMError as e:
- raise GNS3VMError("Failed to suspend the GNS3 VM: {}".format(e))
+ raise GNS3VMError(f"Failed to suspend the GNS3 VM: {e}")
log.info("GNS3 VM has been suspended")
self.running = False
@@ -322,6 +322,6 @@ class HyperVGNS3VM(BaseGNS3VM):
try:
await self._set_state(HyperVGNS3VM._HYPERV_VM_STATE_SHUTDOWN)
except GNS3VMError as e:
- raise GNS3VMError("Failed to stop the GNS3 VM: {}".format(e))
+ raise GNS3VMError(f"Failed to stop the GNS3 VM: {e}")
log.info("GNS3 VM has been stopped")
self.running = False
diff --git a/gns3server/controller/gns3vm/remote_gns3_vm.py b/gns3server/controller/gns3vm/remote_gns3_vm.py
index 386c0a20..0e6cf83a 100644
--- a/gns3server/controller/gns3vm/remote_gns3_vm.py
+++ b/gns3server/controller/gns3vm/remote_gns3_vm.py
@@ -58,7 +58,7 @@ class RemoteGNS3VM(BaseGNS3VM):
self.user = compute.user
self.password = compute.password
return
- raise GNS3VMError("Can't start the GNS3 VM remote VM {} not found".format(self.vmname))
+ raise GNS3VMError(f"Can't start the GNS3 VM remote VM {self.vmname} not found")
async def suspend(self):
"""
diff --git a/gns3server/controller/gns3vm/virtualbox_gns3_vm.py b/gns3server/controller/gns3vm/virtualbox_gns3_vm.py
index b89d1dc2..b6d15974 100644
--- a/gns3server/controller/gns3vm/virtualbox_gns3_vm.py
+++ b/gns3server/controller/gns3vm/virtualbox_gns3_vm.py
@@ -50,7 +50,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
result = await self._virtualbox_manager.execute(subcommand, args, timeout)
return ("\n".join(result))
except VirtualBoxError as e:
- raise GNS3VMError("Error while executing VBoxManage command: {}".format(e))
+ raise GNS3VMError(f"Error while executing VBoxManage command: {e}")
async def _get_state(self):
"""
@@ -90,7 +90,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
if not self._system_properties:
await self._get_system_properties()
if "API version" not in self._system_properties:
- raise VirtualBoxError("Can't access to VirtualBox API version:\n{}".format(self._system_properties))
+ raise VirtualBoxError(f"Can't access to VirtualBox API version:\n{self._system_properties}")
from cpuinfo import get_cpu_info
cpu_info = await wait_run_in_executor(get_cpu_info)
vendor_id = cpu_info.get('vendor_id_raw')
@@ -101,7 +101,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
if parse_version(self._system_properties["API version"]) < parse_version("6_0"):
raise VirtualBoxError("VirtualBox version 6.0 or above is required to run the GNS3 VM with nested virtualization enabled on AMD processors")
else:
- log.warning("Could not determine CPU vendor: {}".format(vendor_id))
+ log.warning(f"Could not determine CPU vendor: {vendor_id}")
async def _look_for_interface(self, network_backend):
"""
@@ -134,7 +134,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
for info in result.splitlines():
if '=' in info:
name, value = info.split('=', 1)
- if name == "hostonlyadapter{}".format(interface_number):
+ if name == f"hostonlyadapter{interface_number}":
return value.strip('"')
return None
@@ -217,7 +217,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
await self._check_requirements()
return await self._virtualbox_manager.list_vms()
except VirtualBoxError as e:
- raise GNS3VMError("Could not list VirtualBox VMs: {}".format(str(e)))
+ raise GNS3VMError(f"Could not list VirtualBox VMs: {str(e)}")
async def start(self):
"""
@@ -229,15 +229,15 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
# get a NAT interface number
nat_interface_number = await self._look_for_interface("nat")
if nat_interface_number < 0:
- raise GNS3VMError('VM "{}" must have a NAT interface configured in order to start'.format(self.vmname))
+ raise GNS3VMError(f'VM "{self.vmname}" must have a NAT interface configured in order to start')
hostonly_interface_number = await self._look_for_interface("hostonly")
if hostonly_interface_number < 0:
- raise GNS3VMError('VM "{}" must have a host-only interface configured in order to start'.format(self.vmname))
+ raise GNS3VMError(f'VM "{self.vmname}" must have a host-only interface configured in order to start')
vboxnet = await self._look_for_vboxnet(hostonly_interface_number)
if vboxnet is None:
- raise GNS3VMError('A VirtualBox host-only network could not be found on network adapter {} for "{}"'.format(hostonly_interface_number, self._vmname))
+ raise GNS3VMError(f'A VirtualBox host-only network could not be found on network adapter {hostonly_interface_number} for "{self._vmname}"')
if not (await self._check_vboxnet_exists(vboxnet)):
if sys.platform.startswith("win") and vboxnet == "vboxnet0":
@@ -245,7 +245,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
# on Windows. Try to patch this with the first available vboxnet we find.
first_available_vboxnet = await self._find_first_available_vboxnet()
if first_available_vboxnet is None:
- raise GNS3VMError('Please add a VirtualBox host-only network with DHCP enabled and attached it to network adapter {} for "{}"'.format(hostonly_interface_number, self._vmname))
+ raise GNS3VMError(f'Please add a VirtualBox host-only network with DHCP enabled and attached it to network adapter {hostonly_interface_number} for "{self._vmname}"')
await self.set_hostonly_network(hostonly_interface_number, first_available_vboxnet)
vboxnet = first_available_vboxnet
else:
@@ -254,10 +254,10 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
self._vmname))
if not (await self._check_dhcp_server(vboxnet)):
- raise GNS3VMError('DHCP must be enabled on VirtualBox host-only network "{}"'.format(vboxnet))
+ raise GNS3VMError(f'DHCP must be enabled on VirtualBox host-only network "{vboxnet}"')
vm_state = await self._get_state()
- log.info('"{}" state is {}'.format(self._vmname, vm_state))
+ log.info(f'"{self._vmname}" state is {vm_state}')
if vm_state == "poweroff":
if self.allocate_vcpus_ram:
@@ -285,20 +285,20 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
s.bind((ip_address, 0))
api_port = s.getsockname()[1]
except OSError as e:
- raise GNS3VMError("Error while getting random port: {}".format(e))
+ raise GNS3VMError(f"Error while getting random port: {e}")
if (await self._check_vbox_port_forwarding()):
# delete the GNS3VM NAT port forwarding rule if it exists
- log.info("Removing GNS3VM NAT port forwarding rule from interface {}".format(nat_interface_number))
- await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number), "delete", "GNS3VM"])
+ log.info(f"Removing GNS3VM NAT port forwarding rule from interface {nat_interface_number}")
+ await self._execute("controlvm", [self._vmname, f"natpf{nat_interface_number}", "delete", "GNS3VM"])
# add a GNS3VM NAT port forwarding rule to redirect 127.0.0.1 with random port to the port in the VM
- log.info("Adding GNS3VM NAT port forwarding rule with port {} to interface {}".format(api_port, nat_interface_number))
- await self._execute("controlvm", [self._vmname, "natpf{}".format(nat_interface_number),
- "GNS3VM,tcp,{},{},,{}".format(ip_address, api_port, self.port)])
+ log.info(f"Adding GNS3VM NAT port forwarding rule with port {api_port} to interface {nat_interface_number}")
+ await self._execute("controlvm", [self._vmname, f"natpf{nat_interface_number}",
+ f"GNS3VM,tcp,{ip_address},{api_port},,{self.port}"])
self.ip_address = await self._get_ip(hostonly_interface_number, api_port)
- log.info("GNS3 VM has been started with IP {}".format(self.ip_address))
+ log.info(f"GNS3 VM has been started with IP {self.ip_address}")
self.running = True
async def _get_ip(self, hostonly_interface_number, api_port):
@@ -329,7 +329,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
pass
remaining_try -= 1
await asyncio.sleep(1)
- raise GNS3VMError("Could not find guest IP address for {}".format(self.vmname))
+ raise GNS3VMError(f"Could not find guest IP address for {self.vmname}")
async def suspend(self):
"""
@@ -377,7 +377,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
"""
await self._execute("modifyvm", [self._vmname, "--cpus", str(vcpus)], timeout=3)
- log.info("GNS3 VM vCPU count set to {}".format(vcpus))
+ log.info(f"GNS3 VM vCPU count set to {vcpus}")
async def set_ram(self, ram):
"""
@@ -387,7 +387,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
"""
await self._execute("modifyvm", [self._vmname, "--memory", str(ram)], timeout=3)
- log.info("GNS3 VM RAM amount set to {}".format(ram))
+ log.info(f"GNS3 VM RAM amount set to {ram}")
async def enable_nested_hw_virt(self):
"""
@@ -405,7 +405,7 @@ class VirtualBoxGNS3VM(BaseGNS3VM):
:param hostonly_network_name: name of the VirtualBox host-only network
"""
- await self._execute("modifyvm", [self._vmname, "--hostonlyadapter{}".format(adapter_number), hostonly_network_name], timeout=3)
+ await self._execute("modifyvm", [self._vmname, f"--hostonlyadapter{adapter_number}", hostonly_network_name], timeout=3)
log.info('VirtualBox host-only network "{}" set on network adapter {} for "{}"'.format(hostonly_network_name,
adapter_number,
self._vmname))
diff --git a/gns3server/controller/gns3vm/vmware_gns3_vm.py b/gns3server/controller/gns3vm/vmware_gns3_vm.py
index 44bc38f3..e28a6e18 100644
--- a/gns3server/controller/gns3vm/vmware_gns3_vm.py
+++ b/gns3server/controller/gns3vm/vmware_gns3_vm.py
@@ -49,7 +49,7 @@ class VMwareGNS3VM(BaseGNS3VM):
result = await self._vmware_manager.execute(subcommand, args, timeout, log_level=log_level)
return (''.join(result))
except VMwareError as e:
- raise GNS3VMError("Error while executing VMware command: {}".format(e))
+ raise GNS3VMError(f"Error while executing VMware command: {e}")
async def _is_running(self):
result = await self._vmware_manager.execute("list", [])
@@ -67,13 +67,13 @@ class VMwareGNS3VM(BaseGNS3VM):
# memory must be a multiple of 4 (VMware requirement)
if ram % 4 != 0:
- raise GNS3VMError("Allocated memory {} for the GNS3 VM must be a multiple of 4".format(ram))
+ raise GNS3VMError(f"Allocated memory {ram} for the GNS3 VM must be a multiple of 4")
available_vcpus = psutil.cpu_count(logical=True)
if not float(vcpus).is_integer():
- raise GNS3VMError("The allocated vCPUs value is not an integer: {}".format(vcpus))
+ raise GNS3VMError(f"The allocated vCPUs value is not an integer: {vcpus}")
if vcpus > available_vcpus:
- raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus))
+ raise GNS3VMError(f"You have allocated too many vCPUs for the GNS3 VM! (max available is {available_vcpus} vCPUs)")
try:
pairs = VMware.parse_vmware_file(self._vmx_path)
@@ -84,9 +84,9 @@ class VMwareGNS3VM(BaseGNS3VM):
pairs["cpuid.corespersocket"] = str(cores_per_sockets)
pairs["memsize"] = str(ram)
VMware.write_vmx_file(self._vmx_path, pairs)
- log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram))
+ log.info(f"GNS3 VM vCPU count set to {vcpus} and RAM amount set to {ram}")
except OSError as e:
- raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e))
+ raise GNS3VMError(f'Could not read/write VMware VMX file "{self._vmx_path}": {e}')
async def _set_extra_options(self):
try:
@@ -103,13 +103,13 @@ class VMwareGNS3VM(BaseGNS3VM):
if key not in pairs.keys():
pairs[key] = value
updated = True
- log.info("GNS3 VM VMX `{}` set to `{}`".format(key, value))
+ log.info(f"GNS3 VM VMX `{key}` set to `{value}`")
if updated:
VMware.write_vmx_file(self._vmx_path, pairs)
log.info("GNS3 VM VMX has been updated.")
except OSError as e:
- raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e))
+ raise GNS3VMError(f'Could not read/write VMware VMX file "{self._vmx_path}": {e}')
async def list(self):
"""
@@ -119,7 +119,7 @@ class VMwareGNS3VM(BaseGNS3VM):
try:
return (await self._vmware_manager.list_vms())
except VMwareError as e:
- raise GNS3VMError("Could not list VMware VMs: {}".format(str(e)))
+ raise GNS3VMError(f"Could not list VMware VMs: {str(e)}")
async def start(self):
"""
@@ -134,19 +134,19 @@ class VMwareGNS3VM(BaseGNS3VM):
# check we have a valid VMX file path
if not self._vmx_path:
- raise GNS3VMError("VMWare VM {} not found".format(self.vmname))
+ raise GNS3VMError(f"VMWare VM {self.vmname} not found")
if not os.path.exists(self._vmx_path):
- raise GNS3VMError("VMware VMX file {} doesn't exist".format(self._vmx_path))
+ raise GNS3VMError(f"VMware VMX file {self._vmx_path} doesn't exist")
# check if the VMware guest tools are installed
vmware_tools_state = await self._execute("checkToolsState", [self._vmx_path])
if vmware_tools_state not in ("installed", "running"):
- raise GNS3VMError("VMware tools are not installed in {}".format(self.vmname))
+ raise GNS3VMError(f"VMware tools are not installed in {self.vmname}")
try:
running = await self._is_running()
except VMwareError as e:
- raise GNS3VMError("Could not list VMware VMs: {}".format(str(e)))
+ raise GNS3VMError(f"Could not list VMware VMs: {str(e)}")
if not running:
# set the number of vCPUs and amount of RAM
if self.allocate_vcpus_ram:
@@ -180,12 +180,12 @@ class VMwareGNS3VM(BaseGNS3VM):
guest_ip_address = await self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120)
break
except GNS3VMError as e:
- log.debug("{}".format(e))
+ log.debug(f"{e}")
await asyncio.sleep(1)
if not guest_ip_address:
- raise GNS3VMError("Could not find guest IP address for {}".format(self.vmname))
+ raise GNS3VMError(f"Could not find guest IP address for {self.vmname}")
self.ip_address = guest_ip_address
- log.info("GNS3 VM IP address set to {}".format(guest_ip_address))
+ log.info(f"GNS3 VM IP address set to {guest_ip_address}")
self.running = True
async def suspend(self):
@@ -198,7 +198,7 @@ class VMwareGNS3VM(BaseGNS3VM):
try:
await self._execute("suspend", [self._vmx_path])
except GNS3VMError as e:
- log.warning("Error when suspending the VM: {}".format(str(e)))
+ log.warning(f"Error when suspending the VM: {str(e)}")
log.info("GNS3 VM has been suspended")
self.running = False
@@ -212,6 +212,6 @@ class VMwareGNS3VM(BaseGNS3VM):
try:
await self._execute("stop", [self._vmx_path, "soft"])
except GNS3VMError as e:
- log.warning("Error when stopping the VM: {}".format(str(e)))
+ log.warning(f"Error when stopping the VM: {str(e)}")
log.info("GNS3 VM has been stopped")
self.running = False
diff --git a/gns3server/controller/import_project.py b/gns3server/controller/import_project.py
index 1edc014e..a48b0a35 100644
--- a/gns3server/controller/import_project.py
+++ b/gns3server/controller/import_project.py
@@ -257,9 +257,9 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
with zipfile.ZipFile(f) as zip_file:
await wait_run_in_executor(zip_file.extractall, tmpdir)
except OSError as e:
- raise ControllerError("Cannot open snapshot '{}': {}".format(os.path.basename(snapshot), e))
+ raise ControllerError(f"Cannot open snapshot '{os.path.basename(snapshot)}': {e}")
except zipfile.BadZipFile:
- raise ControllerError("Cannot extract files from snapshot '{}': not a GNS3 project (invalid zip)".format(os.path.basename(snapshot)))
+ raise ControllerError(f"Cannot extract files from snapshot '{os.path.basename(snapshot)}': not a GNS3 project (invalid zip)")
# patch the topology with the correct project name and ID
try:
@@ -272,9 +272,9 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
with open(topology_file_path, "w+", encoding="utf-8") as f:
json.dump(topology, f, indent=4, sort_keys=True)
except OSError as e:
- raise ControllerError("Cannot update snapshot '{}': the project.gns3 file cannot be modified: {}".format(os.path.basename(snapshot), e))
+ raise ControllerError(f"Cannot update snapshot '{os.path.basename(snapshot)}': the project.gns3 file cannot be modified: {e}")
except (ValueError, KeyError):
- raise ControllerError("Cannot update snapshot '{}': the project.gns3 file is corrupted".format(os.path.basename(snapshot)))
+ raise ControllerError(f"Cannot update snapshot '{os.path.basename(snapshot)}': the project.gns3 file is corrupted")
# write everything back to the original snapshot file
try:
@@ -287,4 +287,4 @@ async def _import_snapshots(snapshots_path, project_name, project_id):
async for chunk in zstream:
await f.write(chunk)
except OSError as e:
- raise ControllerError("Cannot update snapshot '{}': the snapshot cannot be recreated: {}".format(os.path.basename(snapshot), e))
+ raise ControllerError(f"Cannot update snapshot '{os.path.basename(snapshot)}': the snapshot cannot be recreated: {e}")
diff --git a/gns3server/controller/link.py b/gns3server/controller/link.py
index 8e461858..7541993f 100644
--- a/gns3server/controller/link.py
+++ b/gns3server/controller/link.py
@@ -226,7 +226,7 @@ class Link:
port = node.get_port(adapter_number, port_number)
if port is None:
- raise ControllerNotFoundError("Port {}/{} for {} not found".format(adapter_number, port_number, node.name))
+ raise ControllerNotFoundError(f"Port {adapter_number}/{port_number} for {node.name} not found")
if port.link is not None:
raise ControllerError("Port is already used")
@@ -245,11 +245,11 @@ class Link:
if other_port is None:
raise ControllerNotFoundError("Port {}/{} for {} not found".format(other_node["adapter_number"], other_node["port_number"], other_node["node"].name))
if port.link_type != other_port.link_type:
- raise ControllerError("Connecting a {} interface to a {} interface is not allowed".format(other_port.link_type, port.link_type))
+ raise ControllerError(f"Connecting a {other_port.link_type} interface to a {port.link_type} interface is not allowed")
if label is None:
label = {
- "text": html.escape("{}/{}".format(adapter_number, port_number)),
+ "text": html.escape(f"{adapter_number}/{port_number}"),
"style": "font-family: TypeWriter;font-size: 10.0;font-weight: bold;fill: #000000;fill-opacity: 1.0;"
}
diff --git a/gns3server/controller/node.py b/gns3server/controller/node.py
index 76270ce4..27aeae3d 100644
--- a/gns3server/controller/node.py
+++ b/gns3server/controller/node.py
@@ -102,7 +102,7 @@ class Node:
try:
setattr(self, prop, kwargs[prop])
except AttributeError as e:
- log.critical("Cannot set attribute '{}'".format(prop))
+ log.critical(f"Cannot set attribute '{prop}'")
raise e
else:
if prop not in self.CONTROLLER_ONLY_PROPERTIES and kwargs[prop] is not None and kwargs[prop] != "":
@@ -292,7 +292,7 @@ class Node:
try:
self._width, self._height, filetype = self._project.controller.symbols.get_size(val)
except (ValueError, OSError) as e:
- log.error("Could not set symbol: {}".format(e))
+ log.error(f"Could not set symbol: {e}")
# If symbol is invalid we replace it by the default
self.symbol = ":/symbols/computer.svg"
if self._label is None:
@@ -384,7 +384,7 @@ class Node:
trial = 0
while trial != 6:
try:
- response = await self._compute.post("/projects/{}/{}/nodes".format(self._project.id, self._node_type), data=data, timeout=timeout)
+ response = await self._compute.post(f"/projects/{self._project.id}/{self._node_type}/nodes", data=data, timeout=timeout)
except ComputeConflict as e:
if e.response.get("exception") == "ImageMissingError":
res = await self._upload_missing_image(self._node_type, e.response["image"])
@@ -532,7 +532,7 @@ class Node:
else:
await self.post("/start", data=data, timeout=240)
except asyncio.TimeoutError:
- raise ControllerTimeoutError("Timeout when starting {}".format(self._name))
+ raise ControllerTimeoutError(f"Timeout when starting {self._name}")
async def stop(self):
"""
@@ -544,7 +544,7 @@ class Node:
except (ComputeError, ControllerError):
pass
except asyncio.TimeoutError:
- raise ControllerTimeoutError("Timeout when stopping {}".format(self._name))
+ raise ControllerTimeoutError(f"Timeout when stopping {self._name}")
async def suspend(self):
"""
@@ -553,7 +553,7 @@ class Node:
try:
await self.post("/suspend", timeout=240)
except asyncio.TimeoutError:
- raise ControllerTimeoutError("Timeout when reloading {}".format(self._name))
+ raise ControllerTimeoutError(f"Timeout when reloading {self._name}")
async def reload(self):
"""
@@ -562,7 +562,7 @@ class Node:
try:
await self.post("/reload", timeout=240)
except asyncio.TimeoutError:
- raise ControllerTimeoutError("Timeout when reloading {}".format(self._name))
+ raise ControllerTimeoutError(f"Timeout when reloading {self._name}")
async def reset_console(self):
"""
@@ -573,25 +573,25 @@ class Node:
try:
await self.post("/console/reset", timeout=240)
except asyncio.TimeoutError:
- raise ControllerTimeoutError("Timeout when reset console {}".format(self._name))
+ raise ControllerTimeoutError(f"Timeout when reset console {self._name}")
async def post(self, path, data=None, **kwargs):
"""
HTTP post on the node
"""
if data:
- return (await self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), data=data, **kwargs))
+ return (await self._compute.post(f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}{path}", data=data, **kwargs))
else:
- return (await self._compute.post("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs))
+ return (await self._compute.post(f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}{path}", **kwargs))
async def put(self, path, data=None, **kwargs):
"""
HTTP post on the node
"""
if path is None:
- path = "/projects/{}/{}/nodes/{}".format(self._project.id, self._node_type, self._id)
+ path = f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}"
else:
- path = "/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path)
+ path = f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}{path}"
if data:
return (await self._compute.put(path, data=data, **kwargs))
else:
@@ -602,9 +602,9 @@ class Node:
HTTP post on the node
"""
if path is None:
- return await self._compute.delete("/projects/{}/{}/nodes/{}".format(self._project.id, self._node_type, self._id), **kwargs)
+ return await self._compute.delete(f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}", **kwargs)
else:
- return await self._compute.delete("/projects/{}/{}/nodes/{}{}".format(self._project.id, self._node_type, self._id, path), **kwargs)
+ return await self._compute.delete(f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}{path}", **kwargs)
async def _upload_missing_image(self, type, img):
"""
@@ -615,13 +615,13 @@ class Node:
for directory in images_directories(type):
image = os.path.join(directory, img)
if os.path.exists(image):
- self.project.emit_notification("log.info", {"message": "Uploading missing image {}".format(img)})
+ self.project.emit_notification("log.info", {"message": f"Uploading missing image {img}"})
try:
with open(image, 'rb') as f:
- await self._compute.post("/{}/images/{}".format(self._node_type, os.path.basename(img)), data=f, timeout=None)
+ await self._compute.post(f"/{self._node_type}/images/{os.path.basename(img)}", data=f, timeout=None)
except OSError as e:
- raise ControllerError("Can't upload {}: {}".format(image, str(e)))
- self.project.emit_notification("log.info", {"message": "Upload finished for {}".format(img)})
+ raise ControllerError(f"Can't upload {image}: {str(e)}")
+ self.project.emit_notification("log.info", {"message": f"Upload finished for {img}"})
return True
return False
@@ -629,13 +629,13 @@ class Node:
"""
Compute the idle PC for a dynamips node
"""
- return (await self._compute.get("/projects/{}/{}/nodes/{}/auto_idlepc".format(self._project.id, self._node_type, self._id), timeout=240)).json
+ return (await self._compute.get(f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}/auto_idlepc", timeout=240)).json
async def dynamips_idlepc_proposals(self):
"""
Compute a list of potential idle PC
"""
- return (await self._compute.get("/projects/{}/{}/nodes/{}/idlepc_proposals".format(self._project.id, self._node_type, self._id), timeout=240)).json
+ return (await self._compute.get(f"/projects/{self._project.id}/{self._node_type}/nodes/{self._id}/idlepc_proposals", timeout=240)).json
def get_port(self, adapter_number, port_number):
"""
@@ -663,7 +663,7 @@ class Node:
atm_port.add(int(dest.split(":")[0]))
atm_port = sorted(atm_port)
for port in atm_port:
- self._ports.append(PortFactory("{}".format(port), 0, 0, port, "atm"))
+ self._ports.append(PortFactory(f"{port}", 0, 0, port, "atm"))
return
elif self._node_type == "frame_relay_switch":
@@ -674,7 +674,7 @@ class Node:
frame_relay_port.add(int(dest.split(":")[0]))
frame_relay_port = sorted(frame_relay_port)
for port in frame_relay_port:
- self._ports.append(PortFactory("{}".format(port), 0, 0, port, "frame_relay"))
+ self._ports.append(PortFactory(f"{port}", 0, 0, port, "frame_relay"))
return
elif self._node_type == "dynamips":
self._ports = DynamipsPortFactory(self._properties)
@@ -686,14 +686,14 @@ class Node:
if custom_adapter["adapter_number"] == adapter_number:
custom_adapter_settings = custom_adapter
break
- port_name = "eth{}".format(adapter_number)
+ port_name = f"eth{adapter_number}"
port_name = custom_adapter_settings.get("port_name", port_name)
self._ports.append(PortFactory(port_name, 0, adapter_number, 0, "ethernet", short_name=port_name))
elif self._node_type in ("ethernet_switch", "ethernet_hub"):
# Basic node we don't want to have adapter number
port_number = 0
for port in self._properties.get("ports_mapping", []):
- self._ports.append(PortFactory(port["name"], 0, 0, port_number, "ethernet", short_name="e{}".format(port_number)))
+ self._ports.append(PortFactory(port["name"], 0, 0, port_number, "ethernet", short_name=f"e{port_number}"))
port_number += 1
elif self._node_type in ("vpcs", "traceng"):
self._ports.append(PortFactory("Ethernet0", 0, 0, 0, "ethernet", short_name="e0"))
@@ -706,7 +706,7 @@ class Node:
self._ports = StandardPortFactory(self._properties, self._port_by_adapter, self._first_port_name, self._port_name_format, self._port_segment_size, self._custom_adapters)
def __repr__(self):
- return "".format(self._node_type, self._name)
+ return f""
def __eq__(self, other):
if not isinstance(other, Node):
diff --git a/gns3server/controller/ports/atm_port.py b/gns3server/controller/ports/atm_port.py
index f63b9e4b..01d5f2f3 100644
--- a/gns3server/controller/ports/atm_port.py
+++ b/gns3server/controller/ports/atm_port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/controller/ports/ethernet_port.py b/gns3server/controller/ports/ethernet_port.py
index ae3f7e73..3ae256e2 100644
--- a/gns3server/controller/ports/ethernet_port.py
+++ b/gns3server/controller/ports/ethernet_port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/controller/ports/fastethernet_port.py b/gns3server/controller/ports/fastethernet_port.py
index 2a01e14a..9e97f02d 100644
--- a/gns3server/controller/ports/fastethernet_port.py
+++ b/gns3server/controller/ports/fastethernet_port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/controller/ports/frame_relay_port.py b/gns3server/controller/ports/frame_relay_port.py
index 87049917..1f2e0255 100644
--- a/gns3server/controller/ports/frame_relay_port.py
+++ b/gns3server/controller/ports/frame_relay_port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/controller/ports/gigabitethernet_port.py b/gns3server/controller/ports/gigabitethernet_port.py
index bcf7b143..0b542273 100644
--- a/gns3server/controller/ports/gigabitethernet_port.py
+++ b/gns3server/controller/ports/gigabitethernet_port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/controller/ports/port.py b/gns3server/controller/ports/port.py
index 601e8024..b2209889 100644
--- a/gns3server/controller/ports/port.py
+++ b/gns3server/controller/ports/port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -86,8 +85,8 @@ class Port:
return self._short_name
elif '/' in self._name:
return self._name.replace(self.long_name_type(), self.short_name_type())
- elif self._name.startswith("{}{}".format(self.long_name_type(), self._interface_number)):
- return self.short_name_type() + "{}".format(self._interface_number)
+ elif self._name.startswith(f"{self.long_name_type()}{self._interface_number}"):
+ return self.short_name_type() + f"{self._interface_number}"
return self._name
@short_name.setter
diff --git a/gns3server/controller/ports/port_factory.py b/gns3server/controller/ports/port_factory.py
index 4844f216..26f1b7fb 100644
--- a/gns3server/controller/ports/port_factory.py
+++ b/gns3server/controller/ports/port_factory.py
@@ -81,7 +81,7 @@ class StandardPortFactory:
adapter=adapter_number,
**cls._generate_replacement(interface_number, segment_number))
except (IndexError, ValueError, KeyError) as e:
- raise ControllerError("Invalid port name format {}: {}".format(port_name_format, str(e)))
+ raise ControllerError(f"Invalid port name format {port_name_format}: {str(e)}")
port_name = custom_adapter_settings.get("port_name", port_name)
port = PortFactory(port_name, segment_number, adapter_number, port_number, "ethernet")
@@ -106,7 +106,7 @@ class StandardPortFactory:
if "serial_adapters" in properties:
for adapter_number in range(adapter_number, properties["serial_adapters"] + adapter_number):
for port_number in range(0, port_by_adapter):
- ports.append(PortFactory("Serial{}/{}".format(segment_number, port_number), segment_number, adapter_number, port_number, "serial"))
+ ports.append(PortFactory(f"Serial{segment_number}/{port_number}", segment_number, adapter_number, port_number, "serial"))
segment_number += 1
return ports
@@ -206,18 +206,18 @@ class DynamipsPortFactory:
if properties[name]:
port_class = cls.ADAPTER_MATRIX[properties[name]]["port"]
for port_number in range(0, cls.ADAPTER_MATRIX[properties[name]]["nb_ports"]):
- name = "{}{}/{}".format(port_class.long_name_type(), adapter_number, port_number)
+ name = f"{port_class.long_name_type()}{adapter_number}/{port_number}"
port = port_class(name, adapter_number, adapter_number, port_number)
- port.short_name = "{}{}/{}".format(port_class.short_name_type(), adapter_number, port_number)
+ port.short_name = f"{port_class.short_name_type()}{adapter_number}/{port_number}"
ports.append(port)
adapter_number += 1
elif name.startswith("wic"):
if properties[name]:
port_class = cls.WIC_MATRIX[properties[name]]["port"]
for port_number in range(0, cls.WIC_MATRIX[properties[name]]["nb_ports"]):
- name = "{}{}/{}".format(port_class.long_name_type(), 0, display_wic_port_number)
+ name = f"{port_class.long_name_type()}{0}/{display_wic_port_number}"
port = port_class(name, 0, 0, wic_port_number)
- port.short_name = "{}{}/{}".format(port_class.short_name_type(), 0, display_wic_port_number)
+ port.short_name = f"{port_class.short_name_type()}{0}/{display_wic_port_number}"
ports.append(port)
display_wic_port_number += 1
wic_port_number += 1
diff --git a/gns3server/controller/ports/pos_port.py b/gns3server/controller/ports/pos_port.py
index bd6a508e..296f466e 100644
--- a/gns3server/controller/ports/pos_port.py
+++ b/gns3server/controller/ports/pos_port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/controller/ports/serial_port.py b/gns3server/controller/ports/serial_port.py
index 37fb4ebc..62a75d59 100644
--- a/gns3server/controller/ports/serial_port.py
+++ b/gns3server/controller/ports/serial_port.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/controller/project.py b/gns3server/controller/project.py
index cdfeca32..709821b2 100644
--- a/gns3server/controller/project.py
+++ b/gns3server/controller/project.py
@@ -100,7 +100,7 @@ class Project:
# Disallow overwrite of existing project
if project_id is None and path is not None:
if os.path.exists(path):
- raise ControllerForbiddenError("The path {} already exists".format(path))
+ raise ControllerForbiddenError(f"The path {path} already exists")
else:
raise ControllerForbiddenError("Providing a path to create a new project is deprecated.")
@@ -110,7 +110,7 @@ class Project:
try:
UUID(project_id, version=4)
except ValueError:
- raise ControllerError("{} is not a valid UUID".format(project_id))
+ raise ControllerError(f"{project_id} is not a valid UUID")
self._id = project_id
if path is None:
@@ -130,7 +130,7 @@ class Project:
self.dump()
self._iou_id_lock = asyncio.Lock()
- log.debug('Project "{name}" [{id}] loaded'.format(name=self.name, id=self._id))
+ log.debug(f'Project "{self.name}" [{self._id}] loaded')
def emit_notification(self, action, event):
"""
@@ -161,7 +161,7 @@ class Project:
# update on computes
for compute in list(self._project_created_on_compute):
await compute.put(
- "/projects/{}".format(self._id), {
+ f"/projects/{self._id}", {
"variables": self.variables
}
)
@@ -406,7 +406,7 @@ class Project:
try:
os.makedirs(path, exist_ok=True)
except OSError as e:
- raise ControllerError("Could not create project directory: {}".format(e))
+ raise ControllerError(f"Could not create project directory: {e}")
if '"' in path:
raise ControllerForbiddenError("You are not allowed to use \" in the project directory path. Not supported by Dynamips.")
@@ -473,7 +473,7 @@ class Project:
except KeyError as e:
raise ControllerError("{" + e.args[0] + "} is not a valid replacement string in the node name")
except (ValueError, IndexError) as e:
- raise ControllerError("{} is not a valid replacement string in the node name".format(base_name))
+ raise ControllerError(f"{base_name} is not a valid replacement string in the node name")
if name not in self._allocated_node_names:
self._allocated_node_names.add(name)
return name
@@ -596,7 +596,7 @@ class Project:
async def delete_node(self, node_id):
node = self.get_node(node_id)
if node.locked:
- raise ControllerError("Node {} cannot be deleted because it is locked".format(node.name))
+ raise ControllerError(f"Node {node.name} cannot be deleted because it is locked")
await self.__delete_node_links(node)
self.remove_allocated_node_name(node.name)
del self._nodes[node.id]
@@ -614,7 +614,7 @@ class Project:
try:
return self._nodes[node_id]
except KeyError:
- raise ControllerNotFoundError("Node ID {} doesn't exist".format(node_id))
+ raise ControllerNotFoundError(f"Node ID {node_id} doesn't exist")
def _get_closed_data(self, section, id_key):
"""
@@ -627,10 +627,10 @@ class Project:
try:
path = self._topology_file()
- with open(path, "r") as f:
+ with open(path) as f:
topology = json.load(f)
except OSError as e:
- raise ControllerError("Could not load topology: {}".format(e))
+ raise ControllerError(f"Could not load topology: {e}")
try:
data = {}
@@ -638,7 +638,7 @@ class Project:
data[elem[id_key]] = elem
return data
except KeyError:
- raise ControllerNotFoundError("Section {} not found in the topology".format(section))
+ raise ControllerNotFoundError(f"Section {section} not found in the topology")
@property
def nodes(self):
@@ -683,13 +683,13 @@ class Project:
try:
return self._drawings[drawing_id]
except KeyError:
- raise ControllerNotFoundError("Drawing ID {} doesn't exist".format(drawing_id))
+ raise ControllerNotFoundError(f"Drawing ID {drawing_id} doesn't exist")
@open_required
async def delete_drawing(self, drawing_id):
drawing = self.get_drawing(drawing_id)
if drawing.locked:
- raise ControllerError("Drawing ID {} cannot be deleted because it is locked".format(drawing_id))
+ raise ControllerError(f"Drawing ID {drawing_id} cannot be deleted because it is locked")
del self._drawings[drawing.id]
self.dump()
self.emit_notification("drawing.deleted", drawing.__json__())
@@ -729,7 +729,7 @@ class Project:
try:
return self._links[link_id]
except KeyError:
- raise ControllerNotFoundError("Link ID {} doesn't exist".format(link_id))
+ raise ControllerNotFoundError(f"Link ID {link_id} doesn't exist")
@property
def links(self):
@@ -755,7 +755,7 @@ class Project:
try:
return self._snapshots[snapshot_id]
except KeyError:
- raise ControllerNotFoundError("Snapshot ID {} doesn't exist".format(snapshot_id))
+ raise ControllerNotFoundError(f"Snapshot ID {snapshot_id} doesn't exist")
@open_required
async def snapshot(self, name):
@@ -766,7 +766,7 @@ class Project:
"""
if name in [snap.name for snap in self._snapshots.values()]:
- raise ControllerError("The snapshot name {} already exists".format(name))
+ raise ControllerError(f"The snapshot name {name} already exists")
snapshot = Snapshot(self, name=name)
await snapshot.create()
self._snapshots[snapshot.id] = snapshot
@@ -783,13 +783,13 @@ class Project:
if self._status == "closed" or self._closing:
return
if self._loading:
- log.warning("Closing project '{}' ignored because it is being loaded".format(self.name))
+ log.warning(f"Closing project '{self.name}' ignored because it is being loaded")
return
self._closing = True
await self.stop_all()
for compute in list(self._project_created_on_compute):
try:
- await compute.post("/projects/{}/close".format(self._id), dont_connect=True)
+ await compute.post(f"/projects/{self._id}/close", dont_connect=True)
# We don't care if a compute is down at this step
except (ComputeError, ControllerError, TimeoutError):
pass
@@ -828,10 +828,10 @@ class Project:
for pic_filename in pictures:
path = os.path.join(self.pictures_directory, pic_filename)
- log.info("Deleting unused picture '{}'".format(path))
+ log.info(f"Deleting unused picture '{path}'")
os.remove(path)
except OSError as e:
- log.warning("Could not delete unused pictures: {}".format(e))
+ log.warning(f"Could not delete unused pictures: {e}")
async def delete(self):
@@ -840,16 +840,16 @@ class Project:
await self.open()
except ControllerError as e:
# ignore missing images or other conflicts when deleting a project
- log.warning("Conflict while deleting project: {}".format(e))
+ log.warning(f"Conflict while deleting project: {e}")
await self.delete_on_computes()
await self.close()
try:
project_directory = get_default_project_directory()
if not os.path.commonprefix([project_directory, self.path]) == project_directory:
- raise ControllerError("Project '{}' cannot be deleted because it is not in the default project directory: '{}'".format(self._name, project_directory))
+ raise ControllerError(f"Project '{self._name}' cannot be deleted because it is not in the default project directory: '{project_directory}'")
shutil.rmtree(self.path)
except OSError as e:
- raise ControllerError("Cannot delete project directory {}: {}".format(self.path, str(e)))
+ raise ControllerError(f"Cannot delete project directory {self.path}: {str(e)}")
async def delete_on_computes(self):
"""
@@ -857,7 +857,7 @@ class Project:
"""
for compute in list(self._project_created_on_compute):
if compute.id != "local":
- await compute.delete("/projects/{}".format(self._id))
+ await compute.delete(f"/projects/{self._id}")
self._project_created_on_compute.remove(compute)
@classmethod
@@ -873,7 +873,7 @@ class Project:
try:
os.makedirs(path, exist_ok=True)
except OSError as e:
- raise ControllerError("Could not create project directory: {}".format(e))
+ raise ControllerError(f"Could not create project directory: {e}")
return path
def _topology_file(self):
@@ -971,7 +971,7 @@ class Project:
except Exception as e:
for compute in list(self._project_created_on_compute):
try:
- await compute.post("/projects/{}/close".format(self._id))
+ await compute.post(f"/projects/{self._id}/close")
# We don't care if a compute is down at this step
except ComputeError:
pass
@@ -1043,7 +1043,7 @@ class Project:
# export the project to a temporary location
project_path = os.path.join(tmpdir, "project.gns3p")
- log.info("Exporting project to '{}'".format(project_path))
+ log.info(f"Exporting project to '{project_path}'")
async with aiofiles.open(project_path, 'wb') as f:
async for chunk in zstream:
await f.write(chunk)
@@ -1052,9 +1052,9 @@ class Project:
with open(project_path, "rb") as f:
project = await import_project(self._controller, str(uuid.uuid4()), f, location=location, name=name, keep_compute_id=True)
- log.info("Project '{}' duplicated in {:.4f} seconds".format(project.name, time.time() - begin))
+ log.info(f"Project '{project.name}' duplicated in {time.time() - begin:.4f} seconds")
except (ValueError, OSError, UnicodeEncodeError) as e:
- raise ControllerError("Cannot duplicate project: {}".format(str(e)))
+ raise ControllerError(f"Cannot duplicate project: {str(e)}")
if previous_status == "closed":
await self.close()
@@ -1083,7 +1083,7 @@ class Project:
json.dump(topo, f, indent=4, sort_keys=True)
shutil.move(path + ".tmp", path)
except OSError as e:
- raise ControllerError("Could not write topology: {}".format(e))
+ raise ControllerError(f"Could not write topology: {e}")
@open_required
async def start_all(self):
@@ -1209,4 +1209,4 @@ class Project:
}
def __repr__(self):
- return "".format(self._name, self._id)
+ return f""
diff --git a/gns3server/controller/snapshot.py b/gns3server/controller/snapshot.py
index 1f5ecaed..dcc4b6c7 100644
--- a/gns3server/controller/snapshot.py
+++ b/gns3server/controller/snapshot.py
@@ -85,13 +85,13 @@ class Snapshot:
"""
if os.path.exists(self.path):
- raise ControllerError("The snapshot file '{}' already exists".format(self.name))
+ raise ControllerError(f"The snapshot file '{self.name}' already exists")
snapshot_directory = os.path.join(self._project.path, "snapshots")
try:
os.makedirs(snapshot_directory, exist_ok=True)
except OSError as e:
- raise ControllerError("Could not create the snapshot directory '{}': {}".format(snapshot_directory, e))
+ raise ControllerError(f"Could not create the snapshot directory '{snapshot_directory}': {e}")
try:
begin = time.time()
@@ -102,9 +102,9 @@ class Snapshot:
async with aiofiles.open(self.path, 'wb') as f:
async for chunk in zstream:
await f.write(chunk)
- log.info("Snapshot '{}' created in {:.4f} seconds".format(self.name, time.time() - begin))
+ log.info(f"Snapshot '{self.name}' created in {time.time() - begin:.4f} seconds")
except (ValueError, OSError, RuntimeError) as e:
- raise ControllerError("Could not create snapshot file '{}': {}".format(self.path, e))
+ raise ControllerError(f"Could not create snapshot file '{self.path}': {e}")
async def restore(self):
"""
diff --git a/gns3server/controller/symbols.py b/gns3server/controller/symbols.py
index 8430bfaf..e1644444 100644
--- a/gns3server/controller/symbols.py
+++ b/gns3server/controller/symbols.py
@@ -54,7 +54,7 @@ class Symbols:
def theme(self, theme):
if not self._themes.get(theme):
- raise ControllerNotFoundError("Could not find symbol theme '{}'".format(theme))
+ raise ControllerNotFoundError(f"Could not find symbol theme '{theme}'")
self._current_theme = theme
def default_symbols(self):
@@ -65,10 +65,10 @@ class Symbols:
theme = self._themes.get(symbol_theme, None)
if not theme:
- raise ControllerNotFoundError("Could not find symbol theme '{}'".format(symbol_theme))
+ raise ControllerNotFoundError(f"Could not find symbol theme '{symbol_theme}'")
symbol_path = theme.get(symbol)
if symbol_path not in self._symbols_path:
- log.warning("Default symbol {} was not found".format(symbol_path))
+ log.warning(f"Default symbol {symbol_path} was not found")
return None
return symbol_path
@@ -119,7 +119,7 @@ class Symbols:
try:
os.makedirs(directory, exist_ok=True)
except OSError as e:
- log.error("Could not create symbol directory '{}': {}".format(directory, e))
+ log.error(f"Could not create symbol directory '{directory}': {e}")
return None
return directory
@@ -132,12 +132,12 @@ class Symbols:
return self._symbols_path[symbol_id]
except (OSError, KeyError):
# try to return a symbol with the same name from the classic theme
- symbol = self._symbols_path.get(":/symbols/classic/{}".format(os.path.basename(symbol_id)))
+ symbol = self._symbols_path.get(f":/symbols/classic/{os.path.basename(symbol_id)}")
if symbol:
return symbol
else:
# return the default computer symbol
- log.warning("Could not retrieve symbol '{}', returning default symbol...".format(symbol_id))
+ log.warning(f"Could not retrieve symbol '{symbol_id}', returning default symbol...")
return self._symbols_path[":/symbols/classic/computer.svg"]
def get_size(self, symbol_id):
diff --git a/gns3server/controller/topology.py b/gns3server/controller/topology.py
index 90ce734a..ba103da2 100644
--- a/gns3server/controller/topology.py
+++ b/gns3server/controller/topology.py
@@ -55,7 +55,7 @@ def _check_topology_schema(topo):
DynamipsNodeValidation.parse_obj(node.get("properties", {}))
except pydantic.ValidationError as e:
- error = "Invalid data in topology file: {}".format(e)
+ error = f"Invalid data in topology file: {e}"
log.critical(error)
raise ControllerError(error)
@@ -127,7 +127,7 @@ def load_topology(path):
with open(path, encoding="utf-8") as f:
topo = json.load(f)
except (OSError, UnicodeDecodeError, ValueError) as e:
- raise ControllerError("Could not load topology {}: {}".format(path, str(e)))
+ raise ControllerError(f"Could not load topology {path}: {str(e)}")
if topo.get("revision", 0) > GNS3_FILE_FORMAT_REVISION:
raise ControllerError("This project was created with more recent version of GNS3 (file revision: {}). Please upgrade GNS3 to version {} or later".format(topo["revision"], topo["version"]))
@@ -138,7 +138,7 @@ def load_topology(path):
try:
shutil.copy(path, path + ".backup{}".format(topo.get("revision", 0)))
except OSError as e:
- raise ControllerError("Can't write backup of the topology {}: {}".format(path, str(e)))
+ raise ControllerError(f"Can't write backup of the topology {path}: {str(e)}")
changed = True
# update the version because we converted the topology
topo["version"] = __version__
@@ -189,7 +189,7 @@ def load_topology(path):
with open(path, "w+", encoding="utf-8") as f:
json.dump(topo, f, indent=4, sort_keys=True)
except OSError as e:
- raise ControllerError("Can't write the topology {}: {}".format(path, str(e)))
+ raise ControllerError(f"Can't write the topology {path}: {str(e)}")
return topo
@@ -272,12 +272,12 @@ def _convert_2_0_0_beta_2(topo, topo_path):
node_dir = os.path.join(dynamips_dir, node_id)
try:
os.makedirs(os.path.join(node_dir, "configs"), exist_ok=True)
- for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "*_i{}_*".format(dynamips_id))):
+ for path in glob.glob(os.path.join(glob.escape(dynamips_dir), f"*_i{dynamips_id}_*")):
shutil.move(path, os.path.join(node_dir, os.path.basename(path)))
- for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", "i{}_*".format(dynamips_id))):
+ for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", f"i{dynamips_id}_*")):
shutil.move(path, os.path.join(node_dir, "configs", os.path.basename(path)))
except OSError as e:
- raise ControllerError("Can't convert project {}: {}".format(topo_path, str(e)))
+ raise ControllerError(f"Can't convert project {topo_path}: {str(e)}")
return topo
@@ -621,7 +621,7 @@ def _convert_border_style(element):
elif border_style == 0:
pass # Solid line
else:
- style += 'stroke-dasharray="{}" '.format(QT_DASH_TO_SVG[border_style])
+ style += f'stroke-dasharray="{QT_DASH_TO_SVG[border_style]}" '
style += 'stroke="{stroke}" stroke-width="{stroke_width}"'.format(
stroke=element.get("border_color", "#000000"),
stroke_width=element.get("border_width", 2)
@@ -672,7 +672,7 @@ def _create_cloud(node, old_node, icon):
except ValueError:
raise ControllerError("UDP tunnel using IPV6 is not supported in cloud")
port = {
- "name": "UDP tunnel {}".format(len(ports) + 1),
+ "name": f"UDP tunnel {len(ports) + 1}",
"port_number": len(ports) + 1,
"type": port_type,
"lport": int(lport),
diff --git a/gns3server/controller/udp_link.py b/gns3server/controller/udp_link.py
index 105fadb5..eac87bef 100644
--- a/gns3server/controller/udp_link.py
+++ b/gns3server/controller/udp_link.py
@@ -50,12 +50,12 @@ class UDPLink(Link):
try:
(node1_host, node2_host) = await node1.compute.get_ip_on_same_subnet(node2.compute)
except ValueError as e:
- raise ControllerError("Cannot get an IP address on same subnet: {}".format(e))
+ raise ControllerError(f"Cannot get an IP address on same subnet: {e}")
# Reserve a UDP port on both side
- response = await node1.compute.post("/projects/{}/ports/udp".format(self._project.id))
+ response = await node1.compute.post(f"/projects/{self._project.id}/ports/udp")
self._node1_port = response.json["udp_port"]
- response = await node2.compute.post("/projects/{}/ports/udp".format(self._project.id))
+ response = await node2.compute.post(f"/projects/{self._project.id}/ports/udp")
self._node2_port = response.json["udp_port"]
node1_filters = {}
@@ -75,7 +75,7 @@ class UDPLink(Link):
"filters": node1_filters,
"suspend": self._suspended
})
- await node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120)
+ await node1.post(f"/adapters/{adapter_number1}/ports/{port_number1}/nio", data=self._link_data[0], timeout=120)
self._link_data.append({
"lport": self._node2_port,
@@ -86,10 +86,10 @@ class UDPLink(Link):
"suspend": self._suspended
})
try:
- await node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120)
+ await node2.post(f"/adapters/{adapter_number2}/ports/{port_number2}/nio", data=self._link_data[1], timeout=120)
except Exception as e:
# We clean the first NIO
- await node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
+ await node1.delete(f"/adapters/{adapter_number1}/ports/{port_number1}/nio", timeout=120)
raise e
self._created = True
@@ -116,14 +116,14 @@ class UDPLink(Link):
self._link_data[0]["filters"] = node1_filters
self._link_data[0]["suspend"] = self._suspended
if node1.node_type not in ("ethernet_switch", "ethernet_hub"):
- await node1.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120)
+ await node1.put(f"/adapters/{adapter_number1}/ports/{port_number1}/nio", data=self._link_data[0], timeout=120)
adapter_number2 = self._nodes[1]["adapter_number"]
port_number2 = self._nodes[1]["port_number"]
self._link_data[1]["filters"] = node2_filters
self._link_data[1]["suspend"] = self._suspended
if node2.node_type not in ("ethernet_switch", "ethernet_hub"):
- await node2.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=221)
+ await node2.put(f"/adapters/{adapter_number2}/ports/{port_number2}/nio", data=self._link_data[1], timeout=221)
async def delete(self):
"""
@@ -138,7 +138,7 @@ class UDPLink(Link):
except IndexError:
return
try:
- await node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
+ await node1.delete(f"/adapters/{adapter_number1}/ports/{port_number1}/nio", timeout=120)
# If the node is already deleted (user selected multiple element and delete all in the same time)
except ControllerNotFoundError:
pass
@@ -150,7 +150,7 @@ class UDPLink(Link):
except IndexError:
return
try:
- await node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), timeout=120)
+ await node2.delete(f"/adapters/{adapter_number2}/ports/{port_number2}/nio", timeout=120)
# If the node is already deleted (user selected multiple element and delete all in the same time)
except ControllerNotFoundError:
pass
diff --git a/gns3server/core/tasks.py b/gns3server/core/tasks.py
index 42810c31..12fd9d03 100644
--- a/gns3server/core/tasks.py
+++ b/gns3server/core/tasks.py
@@ -68,7 +68,7 @@ def create_startup_handler(app: FastAPI) -> Callable:
asyncio.ensure_future(Qemu.instance().list_images())
for module in MODULES:
- log.debug("Loading module {}".format(module.__name__))
+ log.debug(f"Loading module {module.__name__}")
m = module.instance()
m.port_manager = PortManager.instance()
@@ -82,14 +82,14 @@ def create_shutdown_handler(app: FastAPI) -> Callable:
await Controller.instance().stop()
for module in MODULES:
- log.debug("Unloading module {}".format(module.__name__))
+ log.debug(f"Unloading module {module.__name__}")
m = module.instance()
await m.unload()
if PortManager.instance().tcp_ports:
- log.warning("TCP ports are still used {}".format(PortManager.instance().tcp_ports))
+ log.warning(f"TCP ports are still used {PortManager.instance().tcp_ports}")
if PortManager.instance().udp_ports:
- log.warning("UDP ports are still used {}".format(PortManager.instance().udp_ports))
+ log.warning(f"UDP ports are still used {PortManager.instance().udp_ports}")
return shutdown_handler
diff --git a/gns3server/crash_report.py b/gns3server/crash_report.py
index 76d8b2b3..7afb33ad 100644
--- a/gns3server/crash_report.py
+++ b/gns3server/crash_report.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -77,7 +76,7 @@ class CrashReport:
if cacert_resource is not None and os.path.isfile(cacert_resource):
cacert = cacert_resource
else:
- log.error("The SSL certificate bundle file '{}' could not be found".format(cacert_resource))
+ log.error(f"The SSL certificate bundle file '{cacert_resource}' could not be found")
# Don't send log records as events.
sentry_logging = LoggingIntegration(level=logging.INFO, event_level=None)
@@ -92,7 +91,7 @@ class CrashReport:
"os:name": platform.system(),
"os:release": platform.release(),
"os:win_32": " ".join(platform.win32_ver()),
- "os:mac": "{} {}".format(platform.mac_ver()[0], platform.mac_ver()[2]),
+ "os:mac": f"{platform.mac_ver()[0]} {platform.mac_ver()[2]}",
"os:linux": " ".join(distro.linux_distribution()),
}
@@ -162,9 +161,9 @@ class CrashReport:
sentry_sdk.capture_exception()
else:
sentry_sdk.capture_exception()
- log.info("Crash report sent with event ID: {}".format(sentry_sdk.last_event_id()))
+ log.info(f"Crash report sent with event ID: {sentry_sdk.last_event_id()}")
except Exception as e:
- log.warning("Can't send crash report to Sentry: {}".format(e))
+ log.warning(f"Can't send crash report to Sentry: {e}")
@classmethod
def instance(cls):
diff --git a/gns3server/logger.py b/gns3server/logger.py
index ac0108c9..9f45b94e 100644
--- a/gns3server/logger.py
+++ b/gns3server/logger.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -62,7 +61,7 @@ class ColouredFormatter(logging.Formatter):
if record.name.startswith("uvicorn"):
message = message.replace(f"{record.name}:{record.lineno}", "uvicorn")
- message = '{colour}{message}{reset}'.format(colour=colour, message=message, reset=self.RESET)
+ message = f'{colour}{message}{self.RESET}'
return message
diff --git a/gns3server/main.py b/gns3server/main.py
index 14b8d855..464be247 100644
--- a/gns3server/main.py
+++ b/gns3server/main.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/__init__.py b/gns3server/schemas/__init__.py
index ba0809f0..c68f942a 100644
--- a/gns3server/schemas/__init__.py
+++ b/gns3server/schemas/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/atm_switch_nodes.py b/gns3server/schemas/atm_switch_nodes.py
index d594c5f9..6f9e55b1 100644
--- a/gns3server/schemas/atm_switch_nodes.py
+++ b/gns3server/schemas/atm_switch_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/base.py b/gns3server/schemas/base.py
index f398dca9..90d536a6 100644
--- a/gns3server/schemas/base.py
+++ b/gns3server/schemas/base.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/capabilities.py b/gns3server/schemas/capabilities.py
index 6197d93e..e54ab05d 100644
--- a/gns3server/schemas/capabilities.py
+++ b/gns3server/schemas/capabilities.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/cloud_nodes.py b/gns3server/schemas/cloud_nodes.py
index bb6c64ea..22b7402e 100644
--- a/gns3server/schemas/cloud_nodes.py
+++ b/gns3server/schemas/cloud_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/cloud_templates.py b/gns3server/schemas/cloud_templates.py
index 5d654e9c..6ae199f1 100644
--- a/gns3server/schemas/cloud_templates.py
+++ b/gns3server/schemas/cloud_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/common.py b/gns3server/schemas/common.py
index b876a79b..85ae01d3 100644
--- a/gns3server/schemas/common.py
+++ b/gns3server/schemas/common.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/computes.py b/gns3server/schemas/computes.py
index 544d5132..ab28960e 100644
--- a/gns3server/schemas/computes.py
+++ b/gns3server/schemas/computes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
@@ -78,9 +77,9 @@ class ComputeCreate(ComputeBase):
# due to random user generated by 1.4 it's common to have a very long user
if len(user) > 14:
user = user[:11] + "..."
- return "{}://{}@{}:{}".format(protocol, user, host, port)
+ return f"{protocol}://{user}@{host}:{port}"
else:
- return "{}://{}:{}".format(protocol, host, port)
+ return f"{protocol}://{host}:{port}"
class ComputeUpdate(ComputeBase):
diff --git a/gns3server/schemas/config.py b/gns3server/schemas/config.py
index 98484b9e..7e6787c6 100644
--- a/gns3server/schemas/config.py
+++ b/gns3server/schemas/config.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/docker_nodes.py b/gns3server/schemas/docker_nodes.py
index 39a9a570..6d728c93 100644
--- a/gns3server/schemas/docker_nodes.py
+++ b/gns3server/schemas/docker_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/docker_templates.py b/gns3server/schemas/docker_templates.py
index 1fdfae70..3cd91e25 100644
--- a/gns3server/schemas/docker_templates.py
+++ b/gns3server/schemas/docker_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/drawings.py b/gns3server/schemas/drawings.py
index dfb191db..f0672727 100644
--- a/gns3server/schemas/drawings.py
+++ b/gns3server/schemas/drawings.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/dynamips_nodes.py b/gns3server/schemas/dynamips_nodes.py
index 9e4f3bfe..aa9c0734 100644
--- a/gns3server/schemas/dynamips_nodes.py
+++ b/gns3server/schemas/dynamips_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/dynamips_templates.py b/gns3server/schemas/dynamips_templates.py
index 5416a0e8..255fd9db 100644
--- a/gns3server/schemas/dynamips_templates.py
+++ b/gns3server/schemas/dynamips_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/ethernet_hub_nodes.py b/gns3server/schemas/ethernet_hub_nodes.py
index 63bc01f5..e559a6d4 100644
--- a/gns3server/schemas/ethernet_hub_nodes.py
+++ b/gns3server/schemas/ethernet_hub_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/ethernet_hub_templates.py b/gns3server/schemas/ethernet_hub_templates.py
index 0ca7542c..396f5dcc 100644
--- a/gns3server/schemas/ethernet_hub_templates.py
+++ b/gns3server/schemas/ethernet_hub_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/ethernet_switch_nodes.py b/gns3server/schemas/ethernet_switch_nodes.py
index a313b138..9b1050ec 100644
--- a/gns3server/schemas/ethernet_switch_nodes.py
+++ b/gns3server/schemas/ethernet_switch_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/ethernet_switch_templates.py b/gns3server/schemas/ethernet_switch_templates.py
index 8baec036..e434fc00 100644
--- a/gns3server/schemas/ethernet_switch_templates.py
+++ b/gns3server/schemas/ethernet_switch_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/frame_relay_switch_nodes.py b/gns3server/schemas/frame_relay_switch_nodes.py
index 3619464d..fb6e580c 100644
--- a/gns3server/schemas/frame_relay_switch_nodes.py
+++ b/gns3server/schemas/frame_relay_switch_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/gns3vm.py b/gns3server/schemas/gns3vm.py
index 5adcc101..9ea8e9aa 100644
--- a/gns3server/schemas/gns3vm.py
+++ b/gns3server/schemas/gns3vm.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/iou_license.py b/gns3server/schemas/iou_license.py
index e650b8ad..72c80100 100644
--- a/gns3server/schemas/iou_license.py
+++ b/gns3server/schemas/iou_license.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/iou_nodes.py b/gns3server/schemas/iou_nodes.py
index c7e886e8..f237fdd9 100644
--- a/gns3server/schemas/iou_nodes.py
+++ b/gns3server/schemas/iou_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/iou_templates.py b/gns3server/schemas/iou_templates.py
index 504f2e62..bee305d3 100644
--- a/gns3server/schemas/iou_templates.py
+++ b/gns3server/schemas/iou_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/links.py b/gns3server/schemas/links.py
index ff6b2eed..c5d3b43f 100644
--- a/gns3server/schemas/links.py
+++ b/gns3server/schemas/links.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/nat_nodes.py b/gns3server/schemas/nat_nodes.py
index fed22b1a..fb3f805d 100644
--- a/gns3server/schemas/nat_nodes.py
+++ b/gns3server/schemas/nat_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/nios.py b/gns3server/schemas/nios.py
index 44daef4a..9e05574f 100644
--- a/gns3server/schemas/nios.py
+++ b/gns3server/schemas/nios.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/nodes.py b/gns3server/schemas/nodes.py
index 74a019b7..1c45aee6 100644
--- a/gns3server/schemas/nodes.py
+++ b/gns3server/schemas/nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/projects.py b/gns3server/schemas/projects.py
index 8980a6f0..2c7c846e 100644
--- a/gns3server/schemas/projects.py
+++ b/gns3server/schemas/projects.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/qemu_nodes.py b/gns3server/schemas/qemu_nodes.py
index 3f03d8dd..c63f8748 100644
--- a/gns3server/schemas/qemu_nodes.py
+++ b/gns3server/schemas/qemu_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/qemu_templates.py b/gns3server/schemas/qemu_templates.py
index 52303781..5b3baa25 100644
--- a/gns3server/schemas/qemu_templates.py
+++ b/gns3server/schemas/qemu_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/snapshots.py b/gns3server/schemas/snapshots.py
index 17620cdb..90a7667e 100644
--- a/gns3server/schemas/snapshots.py
+++ b/gns3server/schemas/snapshots.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/templates.py b/gns3server/schemas/templates.py
index cd66377c..f9de0b03 100644
--- a/gns3server/schemas/templates.py
+++ b/gns3server/schemas/templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/tokens.py b/gns3server/schemas/tokens.py
index ea725177..2adc4b2b 100644
--- a/gns3server/schemas/tokens.py
+++ b/gns3server/schemas/tokens.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/users.py b/gns3server/schemas/users.py
index 674ac08e..445e1d7b 100644
--- a/gns3server/schemas/users.py
+++ b/gns3server/schemas/users.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/version.py b/gns3server/schemas/version.py
index cb92d953..534e78b8 100644
--- a/gns3server/schemas/version.py
+++ b/gns3server/schemas/version.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/virtualbox_nodes.py b/gns3server/schemas/virtualbox_nodes.py
index 56f26565..6a785a8f 100644
--- a/gns3server/schemas/virtualbox_nodes.py
+++ b/gns3server/schemas/virtualbox_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/virtualbox_templates.py b/gns3server/schemas/virtualbox_templates.py
index 12e35fa3..fdb386b8 100644
--- a/gns3server/schemas/virtualbox_templates.py
+++ b/gns3server/schemas/virtualbox_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/vmware_nodes.py b/gns3server/schemas/vmware_nodes.py
index 01dffddb..4a3f2fc9 100644
--- a/gns3server/schemas/vmware_nodes.py
+++ b/gns3server/schemas/vmware_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/vmware_templates.py b/gns3server/schemas/vmware_templates.py
index 67d2aeae..cecb576d 100644
--- a/gns3server/schemas/vmware_templates.py
+++ b/gns3server/schemas/vmware_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/vpcs_nodes.py b/gns3server/schemas/vpcs_nodes.py
index a09d40b3..324be1a7 100644
--- a/gns3server/schemas/vpcs_nodes.py
+++ b/gns3server/schemas/vpcs_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/schemas/vpcs_templates.py b/gns3server/schemas/vpcs_templates.py
index ba16f12d..80cd18f9 100644
--- a/gns3server/schemas/vpcs_templates.py
+++ b/gns3server/schemas/vpcs_templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/server.py b/gns3server/server.py
index 9ed16f9c..2acaf4f6 100644
--- a/gns3server/server.py
+++ b/gns3server/server.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GNS3 Technologies Inc.
#
@@ -71,24 +70,23 @@ class Server:
try:
language, encoding = locale.getlocale()
except ValueError as e:
- log.error("Could not determine the current locale: {}".format(e))
+ log.error(f"Could not determine the current locale: {e}")
if not language and not encoding:
try:
log.warning("Could not find a default locale, switching to C.UTF-8...")
locale.setlocale(locale.LC_ALL, ("C", "UTF-8"))
except locale.Error as e:
- log.error("Could not switch to the C.UTF-8 locale: {}".format(e))
+ log.error(f"Could not switch to the C.UTF-8 locale: {e}")
raise SystemExit
elif encoding != "UTF-8":
- log.warning(
- "Your locale {}.{} encoding is not UTF-8, switching to the UTF-8 version...".format(language, encoding))
+ log.warning(f"Your locale {language}.{encoding} encoding is not UTF-8, switching to the UTF-8 version...")
try:
locale.setlocale(locale.LC_ALL, (language, "UTF-8"))
except locale.Error as e:
- log.error("Could not set an UTF-8 encoding for the {} locale: {}".format(language, e))
+ log.error(f"Could not set an UTF-8 encoding for the {language} locale: {e}")
raise SystemExit
else:
- log.info("Current locale is {}.{}".format(language, encoding))
+ log.info(f"Current locale is {language}.{encoding}")
def _parse_arguments(self, argv):
"""
@@ -97,7 +95,7 @@ class Server:
:params args: Array of command line arguments
"""
- parser = argparse.ArgumentParser(description="GNS3 server version {}".format(__version__))
+ parser = argparse.ArgumentParser(description=f"GNS3 server version {__version__}")
parser.add_argument("-v", "--version", help="show the version", action="version", version=__version__)
parser.add_argument("--host", help="run on the given host/IP address")
parser.add_argument("--port", help="run on the given port", type=int)
@@ -179,10 +177,10 @@ class Server:
try:
if signame == "SIGHUP":
- log.info("Server has got signal {}, reloading...".format(signame))
+ log.info(f"Server has got signal {signame}, reloading...")
asyncio.ensure_future(self.reload_server())
else:
- log.info("Server has got signal {}, exiting...".format(signame))
+ log.info(f"Server has got signal {signame}, exiting...")
os.kill(os.getpid(), signal.SIGTERM)
except asyncio.CancelledError:
pass
@@ -260,12 +258,12 @@ class Server:
self._pid_lock(args.pid)
self._kill_ghosts()
- log.info("GNS3 server version {}".format(__version__))
+ log.info(f"GNS3 server version {__version__}")
current_year = datetime.date.today().year
- log.info("Copyright (c) 2007-{} GNS3 Technologies Inc.".format(current_year))
+ log.info(f"Copyright (c) 2007-{current_year} GNS3 Technologies Inc.")
for config_file in Config.instance().get_config_files():
- log.info("Config file {} loaded".format(config_file))
+ log.info(f"Config file '{config_file}' loaded")
self._set_config_defaults_from_command_line(args)
config = Config.instance().settings
@@ -274,7 +272,7 @@ class Server:
log.warning("Local mode is enabled. Beware, clients will have full control on your filesystem")
if config.Server.enable_http_auth:
- log.info("HTTP authentication is enabled with username '{}'".format(config.Server.user))
+ log.info(f"HTTP authentication is enabled with username '{config.Server.user}'")
# we only support Python 3 version >= 3.6
if sys.version_info < (3, 6, 0):
@@ -302,7 +300,7 @@ class Server:
self._signal_handling()
try:
- log.info("Starting server on {}:{}".format(host, port))
+ log.info(f"Starting server on {host}:{port}")
# only show uvicorn access logs in debug mode
access_log = False
diff --git a/gns3server/services/__init__.py b/gns3server/services/__init__.py
index 4273f2d6..9e828880 100644
--- a/gns3server/services/__init__.py
+++ b/gns3server/services/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/services/authentication.py b/gns3server/services/authentication.py
index efb69741..5bc9aed7 100644
--- a/gns3server/services/authentication.py
+++ b/gns3server/services/authentication.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/services/computes.py b/gns3server/services/computes.py
index d11b90b5..63e8e6a4 100644
--- a/gns3server/services/computes.py
+++ b/gns3server/services/computes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GNS3 Technologies Inc.
#
diff --git a/gns3server/services/templates.py b/gns3server/services/templates.py
index 08444a3e..f48d58b0 100644
--- a/gns3server/services/templates.py
+++ b/gns3server/services/templates.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 GNS3 Technologies Inc.
#
diff --git a/gns3server/utils/__init__.py b/gns3server/utils/__init__.py
index 7aabbcd0..c571582d 100644
--- a/gns3server/utils/__init__.py
+++ b/gns3server/utils/__init__.py
@@ -60,7 +60,7 @@ def parse_version(version):
"""
release_type_found = False
- version_infos = re.split('(\.|[a-z]+)', version)
+ version_infos = re.split(r'(\.|[a-z]+)', version)
version = []
for info in version_infos:
if info == '.' or len(info) == 0:
diff --git a/gns3server/utils/application_id.py b/gns3server/utils/application_id.py
index b09bcb2b..39042626 100644
--- a/gns3server/utils/application_id.py
+++ b/gns3server/utils/application_id.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 GNS3 Technologies Inc.
#
@@ -38,7 +37,7 @@ def get_next_application_id(projects, computes):
if project.status == "opened":
nodes.extend(list(project.nodes.values()))
- used = set([n.properties["application_id"] for n in nodes if n.node_type == "iou" and n.compute.id in computes])
+ used = {n.properties["application_id"] for n in nodes if n.node_type == "iou" and n.compute.id in computes}
pool = set(range(1, 512))
try:
application_id = (pool - used).pop()
diff --git a/gns3server/utils/asyncio/__init__.py b/gns3server/utils/asyncio/__init__.py
index f0f6a626..7abb30ea 100644
--- a/gns3server/utils/asyncio/__init__.py
+++ b/gns3server/utils/asyncio/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/utils/asyncio/aiozipstream.py b/gns3server/utils/asyncio/aiozipstream.py
index cbff3aff..9071896d 100644
--- a/gns3server/utils/asyncio/aiozipstream.py
+++ b/gns3server/utils/asyncio/aiozipstream.py
@@ -56,7 +56,7 @@ def _get_compressor(compress_type):
return None
-class PointerIO(object):
+class PointerIO:
def __init__(self, mode='wb'):
if mode not in ('wb', ):
diff --git a/gns3server/utils/asyncio/embed_shell.py b/gns3server/utils/asyncio/embed_shell.py
index ca822ff1..b93b4f87 100644
--- a/gns3server/utils/asyncio/embed_shell.py
+++ b/gns3server/utils/asyncio/embed_shell.py
@@ -121,7 +121,7 @@ class EmbedShell:
found = True
break
if not found:
- res = ('Command not found {}\n'.format(cmd[0]) + (await self.help()))
+ res = (f'Command not found {cmd[0]}\n' + (await self.help()))
return res
async def run(self):
@@ -196,7 +196,7 @@ class UnstoppableEventLoop(EventLoop):
class ShellConnection(TelnetConnection):
def __init__(self, reader, writer, shell, window_size_changed_callback, loop):
- super(ShellConnection, self).__init__(reader, writer, window_size_changed_callback)
+ super().__init__(reader, writer, window_size_changed_callback)
self._shell = shell
self._loop = loop
self._cli = None
diff --git a/gns3server/utils/asyncio/input_stream.py b/gns3server/utils/asyncio/input_stream.py
index d5999a3f..3bb9f2ea 100644
--- a/gns3server/utils/asyncio/input_stream.py
+++ b/gns3server/utils/asyncio/input_stream.py
@@ -4,12 +4,10 @@ Parser for VT100 input stream.
# Copied from prompt_toolkit/terminal/vt100_input.py due to dependency on termios (which is not available on Windows)
-from __future__ import unicode_literals
import re
import six
-from six.moves import range
from prompt_toolkit.keys import Keys
from prompt_toolkit.key_binding.input_processor import KeyPress
@@ -41,7 +39,7 @@ _cpr_response_prefix_re = re.compile('^' + re.escape('\x1b[') + r'[\d;]*\Z')
_mouse_event_prefix_re = re.compile('^' + re.escape('\x1b[') + r'([\d;]*|M.{0,2})\Z')
-class _Flush(object):
+class _Flush:
""" Helper object to indicate flush operation to the parser. """
pass
@@ -217,7 +215,7 @@ class _IsPrefixOfLongerMatchCache(dict):
_IS_PREFIX_OF_LONGER_MATCH_CACHE = _IsPrefixOfLongerMatchCache()
-class InputStream(object):
+class InputStream:
"""
Parser for VT100 input stream.
@@ -345,7 +343,7 @@ class InputStream(object):
:param data: Input string (unicode).
"""
- assert isinstance(data, six.text_type)
+ assert isinstance(data, str)
if _DEBUG_RENDERER_INPUT:
self.LOG.write(repr(data).encode('utf-8') + b'\n')
diff --git a/gns3server/utils/asyncio/raw_command_server.py b/gns3server/utils/asyncio/raw_command_server.py
index a0533dd9..2b563b49 100644
--- a/gns3server/utils/asyncio/raw_command_server.py
+++ b/gns3server/utils/asyncio/raw_command_server.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
diff --git a/gns3server/utils/asyncio/serial.py b/gns3server/utils/asyncio/serial.py
index 765fda29..426084ff 100644
--- a/gns3server/utils/asyncio/serial.py
+++ b/gns3server/utils/asyncio/serial.py
@@ -102,7 +102,7 @@ async def _asyncio_open_serial_windows(path):
try:
await wait_for_named_pipe_creation(path)
except asyncio.TimeoutError:
- raise NodeError('Pipe file "{}" is missing'.format(path))
+ raise NodeError(f'Pipe file "{path}" is missing')
return WindowsPipe(path)
@@ -117,13 +117,13 @@ async def _asyncio_open_serial_unix(path):
# wait for VM to create the pipe file.
await wait_for_file_creation(path)
except asyncio.TimeoutError:
- raise NodeError('Pipe file "{}" is missing'.format(path))
+ raise NodeError(f'Pipe file "{path}" is missing')
output = SerialReaderWriterProtocol()
try:
await asyncio.get_event_loop().create_unix_connection(lambda: output, path)
except ConnectionRefusedError:
- raise NodeError('Can\'t open pipe file "{}"'.format(path))
+ raise NodeError(f'Can\'t open pipe file "{path}"')
return output
diff --git a/gns3server/utils/asyncio/telnet_server.py b/gns3server/utils/asyncio/telnet_server.py
index 7c9e7397..f2ac421f 100644
--- a/gns3server/utils/asyncio/telnet_server.py
+++ b/gns3server/utils/asyncio/telnet_server.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -58,7 +57,7 @@ LINEMO = 34 # Line Mode
READ_SIZE = 1024
-class TelnetConnection(object):
+class TelnetConnection:
"""Default implementation of telnet connection which may but may not be used."""
def __init__(self, reader, writer, window_size_changed_callback=None):
self.is_closing = False
@@ -310,7 +309,7 @@ class AsyncioTelnetServer:
command, payload = data[0], data[1:]
if command == NAWS:
if len(payload) == 4:
- columns, rows = struct.unpack(str('!HH'), bytes(payload))
+ columns, rows = struct.unpack('!HH', bytes(payload))
await connection.window_size_changed(columns, rows)
else:
log.warning('Wrong number of NAWS bytes')
@@ -358,7 +357,7 @@ class AsyncioTelnetServer:
pass
else:
log.debug("Unhandled telnet command: "
- "{0:#x} {1:#x}".format(*iac_cmd))
+ "{:#x} {:#x}".format(*iac_cmd))
elif iac_cmd[1] == SB: # starts negotiation commands
negotiation = []
for pos in range(2, self.MAX_NEGOTIATION_READ):
@@ -383,28 +382,28 @@ class AsyncioTelnetServer:
if iac_cmd[1] == DO:
if iac_cmd[2] not in [ECHO, SGA, BINARY]:
network_writer.write(bytes([IAC, WONT, iac_cmd[2]]))
- log.debug("Telnet WON'T {:#x}".format(iac_cmd[2]))
+ log.debug(f"Telnet WON'T {iac_cmd[2]:#x}")
else:
if iac_cmd[2] == SGA:
if self._binary:
network_writer.write(bytes([IAC, WILL, iac_cmd[2]]))
else:
network_writer.write(bytes([IAC, WONT, iac_cmd[2]]))
- log.debug("Telnet WON'T {:#x}".format(iac_cmd[2]))
+ log.debug(f"Telnet WON'T {iac_cmd[2]:#x}")
elif iac_cmd[1] == DONT:
log.debug("Unhandled DONT telnet command: "
- "{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
+ "{:#x} {:#x} {:#x}".format(*iac_cmd))
elif iac_cmd[1] == WILL:
if iac_cmd[2] not in [BINARY, NAWS]:
log.debug("Unhandled WILL telnet command: "
- "{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
+ "{:#x} {:#x} {:#x}".format(*iac_cmd))
elif iac_cmd[1] == WONT:
log.debug("Unhandled WONT telnet command: "
- "{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
+ "{:#x} {:#x} {:#x}".format(*iac_cmd))
else:
log.debug("Unhandled telnet command: "
- "{0:#x} {1:#x} {2:#x}".format(*iac_cmd))
+ "{:#x} {:#x} {:#x}".format(*iac_cmd))
# Remove the entire TELNET command from the buffer
buf = buf.replace(iac_cmd, b'', 1)
diff --git a/gns3server/utils/cpu_percent.py b/gns3server/utils/cpu_percent.py
index 441b7efb..d8375517 100644
--- a/gns3server/utils/cpu_percent.py
+++ b/gns3server/utils/cpu_percent.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 GNS3 Technologies Inc.
#
diff --git a/gns3server/utils/get_resource.py b/gns3server/utils/get_resource.py
index b988a200..9201edf0 100644
--- a/gns3server/utils/get_resource.py
+++ b/gns3server/utils/get_resource.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
diff --git a/gns3server/utils/images.py b/gns3server/utils/images.py
index 9f7d84c7..2eddf2c7 100644
--- a/gns3server/utils/images.py
+++ b/gns3server/utils/images.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -83,7 +82,7 @@ def list_images(type):
"md5sum": md5sum(os.path.join(root, filename)),
"filesize": os.stat(os.path.join(root, filename)).st_size})
except OSError as e:
- log.warning("Can't add image {}: {}".format(path, str(e)))
+ log.warning(f"Can't add image {path}: {str(e)}")
return images
@@ -170,7 +169,7 @@ def md5sum(path, stopped_event=None):
with open(path, 'rb') as f:
while True:
if stopped_event is not None and stopped_event.is_set():
- log.error("MD5 sum calculation of `{}` has stopped due to cancellation".format(path))
+ log.error(f"MD5 sum calculation of `{path}` has stopped due to cancellation")
return
buf = f.read(128)
if not buf:
@@ -182,7 +181,7 @@ def md5sum(path, stopped_event=None):
return None
try:
- with open('{}.md5sum'.format(path), 'w+') as f:
+ with open(f'{path}.md5sum', 'w+') as f:
f.write(digest)
except OSError as e:
log.error("Can't write digest of %s: %s", path, str(e))
@@ -195,6 +194,6 @@ def remove_checksum(path):
Remove the checksum of an image from cache if exists
"""
- path = '{}.md5sum'.format(path)
+ path = f'{path}.md5sum'
if os.path.exists(path):
os.remove(path)
diff --git a/gns3server/utils/interfaces.py b/gns3server/utils/interfaces.py
index 742a7c1c..fdfee5a6 100644
--- a/gns3server/utils/interfaces.py
+++ b/gns3server/utils/interfaces.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 GNS3 Technologies Inc.
#
@@ -46,10 +45,10 @@ def _get_windows_interfaces_from_registry():
hkeycard = winreg.OpenKey(hkey, network_card_id)
guid, _ = winreg.QueryValueEx(hkeycard, "ServiceName")
netcard, _ = winreg.QueryValueEx(hkeycard, "Description")
- connection = r"SYSTEM\CurrentControlSet\Control\Network\{4D36E972-E325-11CE-BFC1-08002BE10318}" + "\{}\Connection".format(guid)
+ connection = r"SYSTEM\CurrentControlSet\Control\Network\{4D36E972-E325-11CE-BFC1-08002BE10318}" + fr"\{guid}\Connection"
hkeycon = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, connection)
name, _ = winreg.QueryValueEx(hkeycon, "Name")
- interface = r"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\{}".format(guid)
+ interface = fr"SYSTEM\CurrentControlSet\Services\Tcpip\Parameters\Interfaces\{guid}"
hkeyinterface = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, interface)
is_dhcp_enabled, _ = winreg.QueryValueEx(hkeyinterface, "EnableDHCP")
if is_dhcp_enabled:
@@ -74,7 +73,7 @@ def _get_windows_interfaces_from_registry():
winreg.CloseKey(hkeycard)
winreg.CloseKey(hkey)
except OSError as e:
- log.error("could not read registry information: {}".format(e))
+ log.error(f"could not read registry information: {e}")
return interfaces
@@ -92,7 +91,7 @@ def get_windows_interfaces():
interfaces = []
try:
locator = win32com.client.Dispatch("WbemScripting.SWbemLocator")
- service = locator.ConnectServer(".", "root\cimv2")
+ service = locator.ConnectServer(".", r"root\cimv2")
network_configs = service.InstancesOf("Win32_NetworkAdapterConfiguration")
# more info on Win32_NetworkAdapter: http://msdn.microsoft.com/en-us/library/aa394216%28v=vs.85%29.aspx
for adapter in service.InstancesOf("Win32_NetworkAdapter"):
@@ -188,7 +187,7 @@ def interfaces():
net_if_addrs = psutil.net_if_addrs()
for interface in sorted(net_if_addrs.keys()):
if allowed_interfaces and interface not in allowed_interfaces and not interface.startswith("gns3tap"):
- log.warning("Interface '{}' is not allowed to be used on this server".format(interface))
+ log.warning(f"Interface '{interface}' is not allowed to be used on this server")
continue
ip_address = ""
mac_address = ""
@@ -221,7 +220,7 @@ def interfaces():
message = "pywin32 module is not installed, please install it on the server to get the available interface names"
raise ComputeError(message)
except Exception as e:
- log.error("uncaught exception {type}".format(type=type(e)), exc_info=1)
+ log.error(f"uncaught exception {type(e)}", exc_info=1)
raise ComputeError(f"uncaught exception: {e}")
if service_installed is False:
diff --git a/gns3server/utils/notification_queue.py b/gns3server/utils/notification_queue.py
index ba00f462..1afcda84 100644
--- a/gns3server/utils/notification_queue.py
+++ b/gns3server/utils/notification_queue.py
@@ -64,7 +64,7 @@ class NotificationQueue(asyncio.Queue):
msg["memory_usage_percent"] = psutil.virtual_memory().percent
msg["disk_usage_percent"] = psutil.disk_usage(get_default_project_directory()).percent
except OSError as e:
- log.warning("Could not get CPU and memory usage from psutil: {}".format(e))
+ log.warning(f"Could not get CPU and memory usage from psutil: {e}")
return msg
async def get_json(self, timeout):
diff --git a/gns3server/utils/picture.py b/gns3server/utils/picture.py
index 2bdb956b..4eb549b0 100644
--- a/gns3server/utils/picture.py
+++ b/gns3server/utils/picture.py
@@ -121,7 +121,7 @@ def get_size(data, default_width=0, default_height=0):
else:
height = _svg_convert_size(height_attr)
except (AttributeError, IndexError) as e:
- raise ValueError("Invalid SVG file: {}".format(e))
+ raise ValueError(f"Invalid SVG file: {e}")
return width, height, filetype
diff --git a/gns3server/utils/qt.py b/gns3server/utils/qt.py
index 9c1b88f8..8d5c89dd 100644
--- a/gns3server/utils/qt.py
+++ b/gns3server/utils/qt.py
@@ -27,7 +27,7 @@ def qt_font_to_style(font, color):
if font is None:
font = "TypeWriter,10,-1,5,75,0,0,0,0,0"
font_info = font.split(",")
- style = "font-family: {};font-size: {};".format(font_info[0], font_info[1])
+ style = f"font-family: {font_info[0]};font-size: {font_info[1]};"
if font_info[4] == "75":
style += "font-weight: bold;"
if font_info[5] == "1":
@@ -37,8 +37,8 @@ def qt_font_to_style(font, color):
color = "000000"
if len(color) == 9:
style += "fill: #" + color[-6:] + ";"
- style += "fill-opacity: {};".format(round(1.0 / 255 * int(color[:3][-2:], base=16), 2))
+ style += f"fill-opacity: {round(1.0 / 255 * int(color[:3][-2:], base=16), 2)};"
else:
style += "fill: #" + color[-6:] + ";"
- style += "fill-opacity: {};".format(1.0)
+ style += f"fill-opacity: {1.0};"
return style
diff --git a/gns3server/utils/vmnet.py b/gns3server/utils/vmnet.py
index 3748dbba..0f96b325 100644
--- a/gns3server/utils/vmnet.py
+++ b/gns3server/utils/vmnet.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -44,7 +43,7 @@ def parse_networking_file():
pairs = dict()
allocated_subnets = []
try:
- with open(VMWARE_NETWORKING_FILE, "r", encoding="utf-8") as f:
+ with open(VMWARE_NETWORKING_FILE, encoding="utf-8") as f:
version = f.readline()
for line in f.read().splitlines():
try:
@@ -55,9 +54,9 @@ def parse_networking_file():
if key.endswith("HOSTONLY_SUBNET"):
allocated_subnets.append(value)
except ValueError:
- raise SystemExit("Error while parsing {}".format(VMWARE_NETWORKING_FILE))
+ raise SystemExit(f"Error while parsing {VMWARE_NETWORKING_FILE}")
except OSError as e:
- raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
+ raise SystemExit(f"Cannot open {VMWARE_NETWORKING_FILE}: {e}")
return version, pairs, allocated_subnets
@@ -71,9 +70,9 @@ def write_networking_file(version, pairs):
with open(VMWARE_NETWORKING_FILE, "w", encoding="utf-8") as f:
f.write(version)
for key, value in vmnets.items():
- f.write("answer {} {}\n".format(key, value))
+ f.write(f"answer {key} {value}\n")
except OSError as e:
- raise SystemExit("Cannot open {}: {}".format(VMWARE_NETWORKING_FILE, e))
+ raise SystemExit(f"Cannot open {VMWARE_NETWORKING_FILE}: {e}")
# restart VMware networking service
if sys.platform.startswith("darwin"):
@@ -98,7 +97,7 @@ def parse_vmnet_range(start, end):
if len(values) != 2:
raise argparse.ArgumentTypeError("vmnet range must consist of 2 numbers")
if not start <= values[0] or not values[1] <= end:
- raise argparse.ArgumentTypeError("vmnet range must be between {} and {}".format(start, end))
+ raise argparse.ArgumentTypeError(f"vmnet range must be between {start} and {end}")
setattr(args, self.dest, values)
return Range
@@ -176,14 +175,14 @@ def vmnet_windows(args, vmnet_range_start, vmnet_range_end):
for vmnet_number in range(1, 20):
if vmnet_number in (1, 8):
continue
- print("Removing vmnet{}...".format(vmnet_number))
- os.system('"{}" -- remove adapter vmnet{}'.format(vnetlib_path, vmnet_number))
+ print(f"Removing vmnet{vmnet_number}...")
+ os.system(f'"{vnetlib_path}" -- remove adapter vmnet{vmnet_number}')
else:
for vmnet_number in range(vmnet_range_start, vmnet_range_end + 1):
if vmnet_number in (1, 8):
continue
- print("Adding vmnet{}...".format(vmnet_number))
- os.system('"{}" -- add adapter vmnet{}'.format(vnetlib_path, vmnet_number))
+ print(f"Adding vmnet{vmnet_number}...")
+ os.system(f'"{vnetlib_path}" -- add adapter vmnet{vmnet_number}')
os.system("net stop npf")
os.system("net start npf")
os.system("net stop npcap")
@@ -203,9 +202,9 @@ def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
version, pairs, allocated_subnets = parse_networking_file()
if args.list and not sys.platform.startswith("win"):
for vmnet_number in range(1, 256):
- vmnet_name = "VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)
+ vmnet_name = f"VNET_{vmnet_number}_VIRTUAL_ADAPTER"
if vmnet_name in pairs:
- print("vmnet{}".format(vmnet_number))
+ print(f"vmnet{vmnet_number}")
return
if args.clean:
@@ -216,7 +215,7 @@ def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
del pairs[key]
else:
for vmnet_number in range(vmnet_range_start, vmnet_range_end + 1):
- vmnet_name = "VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)
+ vmnet_name = f"VNET_{vmnet_number}_VIRTUAL_ADAPTER"
if vmnet_name in pairs:
continue
allocated_subnet = None
@@ -228,13 +227,13 @@ def vmnet_unix(args, vmnet_range_start, vmnet_range_end):
break
if allocated_subnet is None:
- print("Couldn't allocate a subnet for vmnet{}".format(vmnet_number))
+ print(f"Couldn't allocate a subnet for vmnet{vmnet_number}")
continue
- print("Adding vmnet{}...".format(vmnet_number))
- pairs["VNET_{}_HOSTONLY_NETMASK".format(vmnet_number)] = "255.255.255.0"
- pairs["VNET_{}_HOSTONLY_SUBNET".format(vmnet_number)] = allocated_subnet
- pairs["VNET_{}_VIRTUAL_ADAPTER".format(vmnet_number)] = "yes"
+ print(f"Adding vmnet{vmnet_number}...")
+ pairs[f"VNET_{vmnet_number}_HOSTONLY_NETMASK"] = "255.255.255.0"
+ pairs[f"VNET_{vmnet_number}_HOSTONLY_SUBNET"] = allocated_subnet
+ pairs[f"VNET_{vmnet_number}_VIRTUAL_ADAPTER"] = "yes"
write_networking_file(version, pairs)
@@ -246,7 +245,7 @@ def main():
parser = argparse.ArgumentParser(description='%(prog)s add/remove vmnet interfaces')
parser.add_argument('-r', "--range", nargs='+', action=parse_vmnet_range(1, 255),
- type=int, help="vmnet range to add (default is {} {})".format(DEFAULT_RANGE[0], DEFAULT_RANGE[1]))
+ type=int, help=f"vmnet range to add (default is {DEFAULT_RANGE[0]} {DEFAULT_RANGE[1]})")
parser.add_argument("-C", "--clean", action="store_true", help="remove all vmnets excepting vmnet1 and vmnet8")
parser.add_argument("-l", "--list", action="store_true", help="list all existing vmnets (UNIX only)")
diff --git a/gns3server/utils/windows_loopback.py b/gns3server/utils/windows_loopback.py
index 282a2295..66239ff2 100644
--- a/gns3server/utils/windows_loopback.py
+++ b/gns3server/utils/windows_loopback.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
@@ -37,11 +36,11 @@ def parse_add_loopback():
def __call__(self, parser, args, values, option_string=None):
try:
- ipaddress.IPv4Interface("{}/{}".format(values[1], values[2]))
+ ipaddress.IPv4Interface(f"{values[1]}/{values[2]}")
except ipaddress.AddressValueError as e:
- raise argparse.ArgumentTypeError("Invalid IP address: {}".format(e))
+ raise argparse.ArgumentTypeError(f"Invalid IP address: {e}")
except ipaddress.NetmaskValueError as e:
- raise argparse.ArgumentTypeError("Invalid subnet mask: {}".format(e))
+ raise argparse.ArgumentTypeError(f"Invalid subnet mask: {e}")
setattr(args, self.dest, values)
return Add
@@ -52,7 +51,7 @@ def add_loopback(devcon_path, name, ip_address, netmask):
previous_adapters = wmi.WMI().Win32_NetworkAdapter()
for adapter in previous_adapters:
if "Loopback" in adapter.Description and adapter.NetConnectionID == name:
- raise SystemExit('Windows loopback adapter named "{}" already exists'.format(name))
+ raise SystemExit(f'Windows loopback adapter named "{name}" already exists')
# install a new Windows loopback adapter
os.system('"{}" install {}\\inf\\netloop.inf *MSLOOP'.format(devcon_path, os.path.expandvars("%WINDIR%")))
@@ -60,11 +59,11 @@ def add_loopback(devcon_path, name, ip_address, netmask):
# configure the new Windows loopback adapter
for adapter in wmi.WMI().Win32_NetworkAdapter():
if "Loopback" in adapter.Description and adapter not in previous_adapters:
- print('Renaming loopback adapter "{}" to "{}"'.format(adapter.NetConnectionID, name))
+ print(f'Renaming loopback adapter "{adapter.NetConnectionID}" to "{name}"')
adapter.NetConnectionID = name
for network_config in wmi.WMI().Win32_NetworkAdapterConfiguration(IPEnabled=True):
if network_config.InterfaceIndex == adapter.InterfaceIndex:
- print('Configuring loopback adapter "{}" with {} {}'.format(name, ip_address, netmask))
+ print(f'Configuring loopback adapter "{name}" with {ip_address} {netmask}')
retcode = network_config.EnableStatic(IPAddress=[ip_address], SubnetMask=[netmask])[0]
if retcode == 1:
print("A reboot is required")
@@ -88,12 +87,12 @@ def remove_loopback(devcon_path, name):
for adapter in wmi.WMI().Win32_NetworkAdapter():
if "Loopback" in adapter.Description and adapter.NetConnectionID == name:
# remove a Windows loopback adapter
- print('Removing loopback adapter "{}"'.format(name))
- os.system('"{}" remove @{}'.format(devcon_path, adapter.PNPDeviceID))
+ print(f'Removing loopback adapter "{name}"')
+ os.system(f'"{devcon_path}" remove @{adapter.PNPDeviceID}')
deleted = True
if not deleted:
- raise SystemExit('Could not find adapter "{}"'.format(name))
+ raise SystemExit(f'Could not find adapter "{name}"')
# update winpcap/npcap services
os.system("net stop npf")
diff --git a/gns3server/version.py b/gns3server/version.py
index 7e7af450..ebe058ef 100644
--- a/gns3server/version.py
+++ b/gns3server/version.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 GNS3 Technologies Inc.
#
@@ -32,6 +31,6 @@ if "dev" in __version__:
import subprocess
if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", ".git")):
r = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"]).decode().strip()
- __version__ = "{}-{}".format(__version__, r)
+ __version__ = f"{__version__}-{r}"
except Exception as e:
print(e)