mirror of
https://github.com/GNS3/gns3-server
synced 2025-03-01 08:52:00 +00:00
Use pyupgrade with --py36-plus param.
This commit is contained in:
parent
a9eb035b7d
commit
f928738bd5
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -226,7 +225,7 @@ async def start_capture(adapter_number: int,
|
||||
try:
|
||||
pcap_file_path.encode('ascii')
|
||||
except UnicodeEncodeError:
|
||||
raise DynamipsError('The capture file path "{}" must only contain ASCII (English) characters'.format(pcap_file_path))
|
||||
raise DynamipsError(f"The capture file path '{pcap_file_path}' must only contain ASCII (English) characters")
|
||||
|
||||
await node.start_capture(adapter_number, port_number, pcap_file_path, node_capture_data.data_link_type)
|
||||
return {"pcap_file_path": pcap_file_path}
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -46,7 +45,7 @@ async def notification_ws(websocket: WebSocket):
|
||||
except (ConnectionClosed, WebSocketDisconnect):
|
||||
log.info(f"Client {websocket.client.host}:{websocket.client.port} has disconnected from compute WebSocket")
|
||||
except WebSocketException as e:
|
||||
log.warning("Error while sending to controller event to WebSocket client: '{}'".format(e))
|
||||
log.warning(f"Error while sending to controller event to WebSocket client: {e}")
|
||||
finally:
|
||||
await websocket.close()
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -61,7 +60,7 @@ async def shutdown():
|
||||
try:
|
||||
future.result()
|
||||
except Exception as e:
|
||||
log.error("Could not close project {}".format(e), exc_info=1)
|
||||
log.error(f"Could not close project: {e}", exc_info=1)
|
||||
continue
|
||||
|
||||
# then shutdown the server itself
|
||||
@ -94,7 +93,7 @@ def check_version(version: schemas.Version):
|
||||
|
||||
print(version.version)
|
||||
if version.version != __version__:
|
||||
raise ControllerError("Client version {} is not the same as server version {}".format(version.version, __version__))
|
||||
raise ControllerError(f"Client version {version.version} is not the same as server version {__version__}")
|
||||
return {"version": __version__}
|
||||
|
||||
|
||||
@ -135,7 +134,7 @@ async def statistics():
|
||||
r = await compute.get("/statistics")
|
||||
compute_statistics.append({"compute_id": compute.id, "compute_name": compute.name, "statistics": r.json})
|
||||
except ControllerError as e:
|
||||
log.error("Could not retrieve statistics on compute {}: {}".format(compute.name, e))
|
||||
log.error(f"Could not retrieve statistics on compute {compute.name}: {e}")
|
||||
return compute_statistics
|
||||
|
||||
# @Route.post(
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -58,7 +57,7 @@ class NodeConcurrency(APIRoute):
|
||||
project_id = request.path_params.get("project_id")
|
||||
|
||||
if node_id and "pcap" not in request.url.path and not request.url.path.endswith("console/ws"):
|
||||
lock_key = "{}:{}".format(project_id, node_id)
|
||||
lock_key = f"{project_id}:{node_id}"
|
||||
node_locks.setdefault(lock_key, {"lock": asyncio.Lock(), "concurrency": 0})
|
||||
node_locks[lock_key]["concurrency"] += 1
|
||||
|
||||
@ -323,11 +322,9 @@ async def get_file(file_path: str, node: Node = Depends(dep_node)):
|
||||
raise ControllerForbiddenError("It is forbidden to get a file outside the project directory")
|
||||
|
||||
node_type = node.node_type
|
||||
path = "/project-files/{}/{}/{}".format(node_type, node.id, path)
|
||||
path = f"/project-files/{node_type}/{node.id}/{path}"
|
||||
|
||||
res = await node.compute.http_query("GET", "/projects/{project_id}/files{path}".format(project_id=node.project.id, path=path),
|
||||
timeout=None,
|
||||
raw=True)
|
||||
res = await node.compute.http_query("GET", f"/projects/{node.project.id}/files{path}", timeout=None, raw=True)
|
||||
return Response(res.body, media_type="application/octet-stream")
|
||||
|
||||
|
||||
@ -345,14 +342,11 @@ async def post_file(file_path: str, request: Request, node: Node = Depends(dep_n
|
||||
raise ControllerForbiddenError("Cannot write outside the node directory")
|
||||
|
||||
node_type = node.node_type
|
||||
path = "/project-files/{}/{}/{}".format(node_type, node.id, path)
|
||||
path = f"/project-files/{node_type}/{node.id}/{path}"
|
||||
|
||||
data = await request.body() #FIXME: are we handling timeout or large files correctly?
|
||||
|
||||
await node.compute.http_query("POST", "/projects/{project_id}/files{path}".format(project_id=node.project.id, path=path),
|
||||
data=data,
|
||||
timeout=None,
|
||||
raw=True)
|
||||
await node.compute.http_query("POST", f"/projects/{node.project.id}/files{path}", data=data, timeout=None, raw=True)
|
||||
|
||||
|
||||
@router.websocket("/{node_id}/console/ws")
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -41,7 +40,7 @@ async def http_notification():
|
||||
with Controller.instance().notification.controller_queue() as queue:
|
||||
while True:
|
||||
msg = await queue.get_json(5)
|
||||
yield ("{}\n".format(msg)).encode("utf-8")
|
||||
yield (f"{msg}\n").encode("utf-8")
|
||||
|
||||
return StreamingResponse(event_stream(), media_type="application/json")
|
||||
|
||||
@ -62,6 +61,6 @@ async def notification_ws(websocket: WebSocket):
|
||||
except (ConnectionClosed, WebSocketDisconnect):
|
||||
log.info(f"Client {websocket.client.host}:{websocket.client.port} has disconnected from controller WebSocket")
|
||||
except WebSocketException as e:
|
||||
log.warning("Error while sending to controller event to WebSocket client: '{}'".format(e))
|
||||
log.warning(f"Error while sending to controller event to WebSocket client: {e}")
|
||||
finally:
|
||||
await websocket.close()
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2020 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -180,7 +179,7 @@ async def load_project(path: str = Body(..., embed=True)):
|
||||
controller = Controller.instance()
|
||||
dot_gns3_file = path
|
||||
if Config.instance().settings.Server.local is False:
|
||||
log.error("Cannot load '{}' because the server has not been started with the '--local' parameter".format(dot_gns3_file))
|
||||
log.error(f"Cannot load '{dot_gns3_file}' because the server has not been started with the '--local' parameter")
|
||||
raise ControllerForbiddenError("Cannot load project when server is not local")
|
||||
project = await controller.load_project(dot_gns3_file,)
|
||||
return project.__json__()
|
||||
@ -195,7 +194,7 @@ async def notification(project_id: UUID):
|
||||
controller = Controller.instance()
|
||||
project = controller.get_project(str(project_id))
|
||||
|
||||
log.info("New client has connected to the notification stream for project ID '{}' (HTTP steam method)".format(project.id))
|
||||
log.info(f"New client has connected to the notification stream for project ID '{project.id}' (HTTP steam method)")
|
||||
|
||||
async def event_stream():
|
||||
|
||||
@ -203,15 +202,15 @@ async def notification(project_id: UUID):
|
||||
with controller.notification.project_queue(project.id) as queue:
|
||||
while True:
|
||||
msg = await queue.get_json(5)
|
||||
yield ("{}\n".format(msg)).encode("utf-8")
|
||||
yield (f"{msg}\n").encode("utf-8")
|
||||
finally:
|
||||
log.info("Client has disconnected from notification for project ID '{}' (HTTP stream method)".format(project.id))
|
||||
log.info(f"Client has disconnected from notification for project ID '{project.id}' (HTTP stream method)")
|
||||
if project.auto_close:
|
||||
# To avoid trouble with client connecting disconnecting we sleep few seconds before checking
|
||||
# if someone else is not connected
|
||||
await asyncio.sleep(5)
|
||||
if not controller.notification.project_has_listeners(project.id):
|
||||
log.info("Project '{}' is automatically closing due to no client listening".format(project.id))
|
||||
log.info(f"Project '{project.id}' is automatically closing due to no client listening")
|
||||
await project.close()
|
||||
|
||||
return StreamingResponse(event_stream(), media_type="application/json")
|
||||
@ -227,16 +226,16 @@ async def notification_ws(project_id: UUID, websocket: WebSocket):
|
||||
project = controller.get_project(str(project_id))
|
||||
await websocket.accept()
|
||||
|
||||
log.info("New client has connected to the notification stream for project ID '{}' (WebSocket method)".format(project.id))
|
||||
log.info(f"New client has connected to the notification stream for project ID '{project.id}' (WebSocket method)")
|
||||
try:
|
||||
with controller.notification.project_queue(project.id) as queue:
|
||||
while True:
|
||||
notification = await queue.get_json(5)
|
||||
await websocket.send_text(notification)
|
||||
except (ConnectionClosed, WebSocketDisconnect):
|
||||
log.info("Client has disconnected from notification stream for project ID '{}' (WebSocket method)".format(project.id))
|
||||
log.info(f"Client has disconnected from notification stream for project ID '{project.id}' (WebSocket method)")
|
||||
except WebSocketException as e:
|
||||
log.warning("Error while sending to project event to WebSocket client: '{}'".format(e))
|
||||
log.warning(f"Error while sending to project event to WebSocket client: {e}")
|
||||
finally:
|
||||
await websocket.close()
|
||||
if project.auto_close:
|
||||
@ -244,7 +243,7 @@ async def notification_ws(project_id: UUID, websocket: WebSocket):
|
||||
# if someone else is not connected
|
||||
await asyncio.sleep(5)
|
||||
if not controller.notification.project_has_listeners(project.id):
|
||||
log.info("Project '{}' is automatically closing due to no client listening".format(project.id))
|
||||
log.info(f"Project '{project.id}' is automatically closing due to no client listening")
|
||||
await project.close()
|
||||
|
||||
|
||||
@ -285,14 +284,14 @@ async def export_project(project: Project = Depends(dep_project),
|
||||
async for chunk in zstream:
|
||||
yield chunk
|
||||
|
||||
log.info("Project '{}' exported in {:.4f} seconds".format(project.name, time.time() - begin))
|
||||
log.info(f"Project '{project.name}' exported in {time.time() - begin:.4f} seconds")
|
||||
|
||||
# Will be raise if you have no space left or permission issue on your temporary directory
|
||||
# RuntimeError: something was wrong during the zip process
|
||||
except (ValueError, OSError, RuntimeError) as e:
|
||||
raise ConnectionError("Cannot export project: {}".format(e))
|
||||
raise ConnectionError(f"Cannot export project: {e}")
|
||||
|
||||
headers = {"CONTENT-DISPOSITION": 'attachment; filename="{}.gns3project"'.format(project.name)}
|
||||
headers = {"CONTENT-DISPOSITION": f'attachment; filename="{project.name}.gns3project"'}
|
||||
return StreamingResponse(streamer(), media_type="application/gns3project", headers=headers)
|
||||
|
||||
|
||||
@ -325,9 +324,9 @@ async def import_project(project_id: UUID, request: Request, path: Optional[Path
|
||||
with open(temp_project_path, "rb") as f:
|
||||
project = await import_controller_project(controller, str(project_id), f, location=path, name=name)
|
||||
|
||||
log.info("Project '{}' imported in {:.4f} seconds".format(project.name, time.time() - begin))
|
||||
log.info(f"Project '{project.name}' imported in {time.time() - begin:.4f} seconds")
|
||||
except OSError as e:
|
||||
raise ControllerError("Could not import the project: {}".format(e))
|
||||
raise ControllerError(f"Could not import the project: {e}")
|
||||
return project.__json__()
|
||||
|
||||
|
||||
|
@ -54,7 +54,7 @@ async def get_symbol(symbol_id: str):
|
||||
symbol = controller.symbols.get_path(symbol_id)
|
||||
return FileResponse(symbol)
|
||||
except (KeyError, OSError) as e:
|
||||
return ControllerNotFoundError("Could not get symbol file: {}".format(e))
|
||||
return ControllerNotFoundError(f"Could not get symbol file: {e}")
|
||||
|
||||
|
||||
@router.get("/{symbol_id:path}/dimensions",
|
||||
@ -70,7 +70,7 @@ async def get_symbol_dimensions(symbol_id: str):
|
||||
symbol_dimensions = {'width': width, 'height': height}
|
||||
return symbol_dimensions
|
||||
except (KeyError, OSError, ValueError) as e:
|
||||
return ControllerNotFoundError("Could not get symbol file: {}".format(e))
|
||||
return ControllerNotFoundError(f"Could not get symbol file: {e}")
|
||||
|
||||
|
||||
@router.post("/{symbol_id:path}/raw",
|
||||
@ -87,7 +87,7 @@ async def upload_symbol(symbol_id: str, request: Request):
|
||||
with open(path, "wb") as f:
|
||||
f.write(await request.body())
|
||||
except (UnicodeEncodeError, OSError) as e:
|
||||
raise ControllerError("Could not write symbol file '{}': {}".format(path, e))
|
||||
raise ControllerError(f"Could not write symbol file '{path}': {e}")
|
||||
|
||||
# Reset the symbol list
|
||||
controller.symbols.list()
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2021 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -153,5 +153,5 @@ async def add_extra_headers(request: Request, call_next):
|
||||
response = await call_next(request)
|
||||
process_time = time.time() - start_time
|
||||
response.headers["X-Process-Time"] = str(process_time)
|
||||
response.headers["X-GNS3-Server-Version"] = "{}".format(__version__)
|
||||
response.headers["X-GNS3-Server-Version"] = f"{__version__}"
|
||||
return response
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2014 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -16,7 +15,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class Adapter(object):
|
||||
class Adapter:
|
||||
|
||||
"""
|
||||
Base class for adapters.
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2014 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2014 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -144,12 +143,12 @@ class BaseManager:
|
||||
try:
|
||||
future.result()
|
||||
except (Exception, GeneratorExit) as e:
|
||||
log.error("Could not close node {}".format(e), exc_info=1)
|
||||
log.error(f"Could not close node: {e}", exc_info=1)
|
||||
continue
|
||||
|
||||
if hasattr(BaseManager, "_instance"):
|
||||
BaseManager._instance = None
|
||||
log.debug("Module {} unloaded".format(self.module_name))
|
||||
log.debug(f"Module {self.module_name} unloaded")
|
||||
|
||||
def get_node(self, node_id, project_id=None):
|
||||
"""
|
||||
@ -168,15 +167,15 @@ class BaseManager:
|
||||
try:
|
||||
UUID(node_id, version=4)
|
||||
except ValueError:
|
||||
raise ComputeError("Node ID {} is not a valid UUID".format(node_id))
|
||||
raise ComputeError(f"Node ID {node_id} is not a valid UUID")
|
||||
|
||||
if node_id not in self._nodes:
|
||||
raise ComputeNotFoundError("Node ID {} doesn't exist".format(node_id))
|
||||
raise ComputeNotFoundError(f"Node ID {node_id} doesn't exist")
|
||||
|
||||
node = self._nodes[node_id]
|
||||
if project_id:
|
||||
if node.project.id != project.id:
|
||||
raise ComputeNotFoundError("Project ID {} doesn't belong to node {}".format(project_id, node.name))
|
||||
raise ComputeNotFoundError("Project ID {project_id} doesn't belong to node {node.name}")
|
||||
|
||||
return node
|
||||
|
||||
@ -226,7 +225,7 @@ class BaseManager:
|
||||
shutil.rmtree(destination_dir)
|
||||
shutil.copytree(source_node.working_dir, destination_dir, symlinks=True, ignore_dangling_symlinks=True)
|
||||
except OSError as e:
|
||||
raise ComputeError("Cannot duplicate node data: {}".format(e))
|
||||
raise ComputeError(f"Cannot duplicate node data: {e}")
|
||||
|
||||
# We force a refresh of the name. This forces the rewrite
|
||||
# of some configuration files
|
||||
@ -326,7 +325,7 @@ class BaseManager:
|
||||
if struct.unpack("<IIIII", caps)[1] & 1 << 13:
|
||||
return True
|
||||
except (AttributeError, OSError) as e:
|
||||
log.error("could not determine if CAP_NET_RAW capability is set for {}: {}".format(executable, e))
|
||||
log.error(f"Could not determine if CAP_NET_RAW capability is set for {executable}: {e}")
|
||||
|
||||
return False
|
||||
|
||||
@ -347,13 +346,13 @@ class BaseManager:
|
||||
try:
|
||||
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
|
||||
if not info:
|
||||
raise ComputeError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
|
||||
raise ComputeError(f"getaddrinfo returned an empty list on {rhost}:{rport}")
|
||||
for res in info:
|
||||
af, socktype, proto, _, sa = res
|
||||
with socket.socket(af, socktype, proto) as sock:
|
||||
sock.connect(sa)
|
||||
except OSError as e:
|
||||
raise ComputeError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
|
||||
raise ComputeError(f"Could not create an UDP connection to {rhost}:{rport}: {e}")
|
||||
nio = NIOUDP(lport, rhost, rport)
|
||||
nio.filters = nio_settings.get("filters", {})
|
||||
nio.suspend = nio_settings.get("suspend", False)
|
||||
@ -368,7 +367,7 @@ class BaseManager:
|
||||
elif nio_settings["type"] in ("nio_generic_ethernet", "nio_ethernet"):
|
||||
ethernet_device = nio_settings["ethernet_device"]
|
||||
if not is_interface_up(ethernet_device):
|
||||
raise ComputeError("Ethernet interface {} does not exist or is down".format(ethernet_device))
|
||||
raise ComputeError(f"Ethernet interface {ethernet_device} does not exist or is down")
|
||||
nio = NIOEthernet(ethernet_device)
|
||||
assert nio is not None
|
||||
return nio
|
||||
@ -400,9 +399,9 @@ class BaseManager:
|
||||
continue
|
||||
yield data
|
||||
except FileNotFoundError:
|
||||
raise ComputeNotFoundError("File '{}' not found".format(path))
|
||||
raise ComputeNotFoundError(f"File '{path}' not found")
|
||||
except PermissionError:
|
||||
raise ComputeForbiddenError("File '{}' cannot be accessed".format(path))
|
||||
raise ComputeForbiddenError(f"File '{path}' cannot be accessed")
|
||||
|
||||
def get_abs_image_path(self, path, extra_dir=None):
|
||||
"""
|
||||
@ -426,17 +425,17 @@ class BaseManager:
|
||||
# Windows path should not be send to a unix server
|
||||
if not sys.platform.startswith("win"):
|
||||
if re.match(r"^[A-Z]:", path) is not None:
|
||||
raise NodeError("{} is not allowed on this remote server. Please only use a file from '{}'".format(path, img_directory))
|
||||
raise NodeError(f"'{path}' is not allowed on this remote server. Please only use a file from '{img_directory}'")
|
||||
|
||||
if not os.path.isabs(path):
|
||||
for directory in valid_directory_prefices:
|
||||
log.debug("Searching for image '{}' in '{}'".format(orig_path, directory))
|
||||
log.debug(f"Searching for image '{orig_path}' in '{directory}'")
|
||||
path = self._recursive_search_file_in_directory(directory, orig_path)
|
||||
if path:
|
||||
return force_unix_path(path)
|
||||
|
||||
# Not found we try the default directory
|
||||
log.debug("Searching for image '{}' in default directory".format(orig_path))
|
||||
log.debug(f"Searching for image '{orig_path}' in default directory")
|
||||
s = os.path.split(orig_path)
|
||||
path = force_unix_path(os.path.join(img_directory, *s))
|
||||
if os.path.exists(path):
|
||||
@ -445,7 +444,7 @@ class BaseManager:
|
||||
|
||||
# For local server we allow using absolute path outside image directory
|
||||
if Config.instance().settings.Server.local is True:
|
||||
log.debug("Searching for '{}'".format(orig_path))
|
||||
log.debug(f"Searching for '{orig_path}'")
|
||||
path = force_unix_path(path)
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
@ -454,12 +453,12 @@ class BaseManager:
|
||||
# Check to see if path is an absolute path to a valid directory
|
||||
path = force_unix_path(path)
|
||||
for directory in valid_directory_prefices:
|
||||
log.debug("Searching for image '{}' in '{}'".format(orig_path, directory))
|
||||
log.debug(f"Searching for image '{orig_path}' in '{directory}'")
|
||||
if os.path.commonprefix([directory, path]) == directory:
|
||||
if os.path.exists(path):
|
||||
return path
|
||||
raise ImageMissingError(orig_path)
|
||||
raise NodeError("{} is not allowed on this remote server. Please only use a file from '{}'".format(path, img_directory))
|
||||
raise NodeError(f"'{path}' is not allowed on this remote server. Please only use a file from '{img_directory}'")
|
||||
|
||||
def _recursive_search_file_in_directory(self, directory, searched_file):
|
||||
"""
|
||||
@ -518,7 +517,7 @@ class BaseManager:
|
||||
try:
|
||||
return list_images(self._NODE_TYPE)
|
||||
except OSError as e:
|
||||
raise ComputeError("Can not list images {}".format(e))
|
||||
raise ComputeError(f"Can not list images {e}")
|
||||
|
||||
def get_images_directory(self):
|
||||
"""
|
||||
@ -534,8 +533,8 @@ class BaseManager:
|
||||
directory = self.get_images_directory()
|
||||
path = os.path.abspath(os.path.join(directory, *os.path.split(filename)))
|
||||
if os.path.commonprefix([directory, path]) != directory:
|
||||
raise ComputeForbiddenError("Could not write image: {}, {} is forbidden".format(filename, path))
|
||||
log.info("Writing image file to '{}'".format(path))
|
||||
raise ComputeForbiddenError(f"Could not write image: {filename}, '{path}' is forbidden")
|
||||
log.info(f"Writing image file to '{path}'")
|
||||
try:
|
||||
remove_checksum(path)
|
||||
# We store the file under his final name only when the upload is finished
|
||||
@ -548,7 +547,7 @@ class BaseManager:
|
||||
shutil.move(tmp_path, path)
|
||||
await cancellable_wait_run_in_executor(md5sum, path)
|
||||
except OSError as e:
|
||||
raise ComputeError("Could not write image: {} because {}".format(filename, e))
|
||||
raise ComputeError(f"Could not write image '{filename}': {e}")
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -290,7 +289,7 @@ class BaseNode:
|
||||
try:
|
||||
self._temporary_directory = tempfile.mkdtemp()
|
||||
except OSError as e:
|
||||
raise NodeError("Can't create temporary directory: {}".format(e))
|
||||
raise NodeError(f"Can't create temporary directory: {e}")
|
||||
return self._temporary_directory
|
||||
|
||||
def create(self):
|
||||
@ -315,7 +314,7 @@ class BaseNode:
|
||||
try:
|
||||
await wait_run_in_executor(shutil.rmtree, directory, onerror=set_rw)
|
||||
except OSError as e:
|
||||
raise ComputeError("Could not delete the node working directory: {}".format(e))
|
||||
raise ComputeError(f"Could not delete the node working directory: {e}")
|
||||
|
||||
def start(self):
|
||||
"""
|
||||
@ -416,13 +415,13 @@ class BaseNode:
|
||||
|
||||
if self._wrap_console and self._console_type == "telnet":
|
||||
await self._wrap_telnet_proxy(self._internal_console_port, self.console)
|
||||
log.info("New Telnet proxy server for console started (internal port = {}, external port = {})".format(self._internal_console_port,
|
||||
self.console))
|
||||
log.info(f"New Telnet proxy server for console started "
|
||||
f"(internal port = {self._internal_console_port}, external port = {self.console})")
|
||||
|
||||
if self._wrap_aux and self._aux_type == "telnet":
|
||||
await self._wrap_telnet_proxy(self._internal_aux_port, self.aux)
|
||||
log.info("New Telnet proxy server for auxiliary console started (internal port = {}, external port = {})".format(self._internal_aux_port,
|
||||
self.aux))
|
||||
log.info(f"New Telnet proxy server for auxiliary console started "
|
||||
f"(internal port = {self._internal_aux_port}, external port = {self.aux})")
|
||||
|
||||
async def stop_wrap_console(self):
|
||||
"""
|
||||
@ -450,16 +449,16 @@ class BaseNode:
|
||||
"""
|
||||
|
||||
if self.status != "started":
|
||||
raise NodeError("Node {} is not started".format(self.name))
|
||||
raise NodeError(f"Node {self.name} is not started")
|
||||
|
||||
if self._console_type != "telnet":
|
||||
raise NodeError("Node {} console type is not telnet".format(self.name))
|
||||
raise NodeError(f"Node {self.name} console type is not telnet")
|
||||
|
||||
try:
|
||||
(telnet_reader, telnet_writer) = await asyncio.open_connection(self._manager.port_manager.console_host,
|
||||
self.console)
|
||||
except ConnectionError as e:
|
||||
raise NodeError("Cannot connect to node {} telnet server: {}".format(self.name, e))
|
||||
raise NodeError(f"Cannot connect to node {self.name} telnet server: {e}")
|
||||
|
||||
log.info("Connected to Telnet server")
|
||||
|
||||
@ -518,7 +517,7 @@ class BaseNode:
|
||||
return
|
||||
|
||||
if self._aux_type == "vnc" and aux is not None and aux < 5900:
|
||||
raise NodeError("VNC auxiliary console require a port superior or equal to 5900, current port is {}".format(aux))
|
||||
raise NodeError(f"VNC auxiliary console require a port superior or equal to 5900, current port is {aux}")
|
||||
|
||||
if self._aux:
|
||||
self._manager.port_manager.release_tcp_port(self._aux, self._project)
|
||||
@ -556,7 +555,7 @@ class BaseNode:
|
||||
return
|
||||
|
||||
if self._console_type == "vnc" and console is not None and console < 5900:
|
||||
raise NodeError("VNC console require a port superior or equal to 5900, current port is {}".format(console))
|
||||
raise NodeError(f"VNC console require a port superior or equal to 5900, current port is {console}")
|
||||
|
||||
if self._console:
|
||||
self._manager.port_manager.release_tcp_port(self._console, self._project)
|
||||
@ -697,11 +696,11 @@ class BaseNode:
|
||||
if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running():
|
||||
await self._start_ubridge(self._ubridge_require_privileged_access)
|
||||
if not self._ubridge_hypervisor or not self._ubridge_hypervisor.is_running():
|
||||
raise NodeError("Cannot send command '{}': uBridge is not running".format(command))
|
||||
raise NodeError(f"Cannot send command '{command}': uBridge is not running")
|
||||
try:
|
||||
await self._ubridge_hypervisor.send(command)
|
||||
except UbridgeError as e:
|
||||
raise UbridgeError("Error while sending command '{}': {}: {}".format(command, e, self._ubridge_hypervisor.read_stdout()))
|
||||
raise UbridgeError(f"Error while sending command '{command}': {e}: {self._ubridge_hypervisor.read_stdout()}")
|
||||
|
||||
@locking
|
||||
async def _start_ubridge(self, require_privileged_access=False):
|
||||
@ -722,10 +721,10 @@ class BaseNode:
|
||||
server_host = self._manager.config.settings.Server.host
|
||||
if not self.ubridge:
|
||||
self._ubridge_hypervisor = Hypervisor(self._project, self.ubridge_path, self.working_dir, server_host)
|
||||
log.info("Starting new uBridge hypervisor {}:{}".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
||||
log.info(f"Starting new uBridge hypervisor {self._ubridge_hypervisor.host}:{self._ubridge_hypervisor.port}")
|
||||
await self._ubridge_hypervisor.start()
|
||||
if self._ubridge_hypervisor:
|
||||
log.info("Hypervisor {}:{} has successfully started".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
||||
log.info(f"Hypervisor {self._ubridge_hypervisor.host}:{self._ubridge_hypervisor.port} has successfully started")
|
||||
await self._ubridge_hypervisor.connect()
|
||||
# save if privileged are required in case uBridge needs to be restarted in self._ubridge_send()
|
||||
self._ubridge_require_privileged_access = require_privileged_access
|
||||
@ -736,7 +735,7 @@ class BaseNode:
|
||||
"""
|
||||
|
||||
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
log.info("Stopping uBridge hypervisor {}:{}".format(self._ubridge_hypervisor.host, self._ubridge_hypervisor.port))
|
||||
log.info(f"Stopping uBridge hypervisor {self._ubridge_hypervisor.host}:{self._ubridge_hypervisor.port}")
|
||||
await self._ubridge_hypervisor.stop()
|
||||
self._ubridge_hypervisor = None
|
||||
|
||||
@ -749,26 +748,32 @@ class BaseNode:
|
||||
:param destination_nio: destination NIO instance
|
||||
"""
|
||||
|
||||
await self._ubridge_send("bridge create {name}".format(name=bridge_name))
|
||||
await self._ubridge_send(f"bridge create {bridge_name}")
|
||||
|
||||
if not isinstance(destination_nio, NIOUDP):
|
||||
raise NodeError("Destination NIO is not UDP")
|
||||
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
|
||||
lport=source_nio.lport,
|
||||
rhost=source_nio.rhost,
|
||||
rport=source_nio.rport))
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(
|
||||
name=bridge_name,
|
||||
lport=source_nio.lport,
|
||||
rhost=source_nio.rhost,
|
||||
rport=source_nio.rport)
|
||||
)
|
||||
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
|
||||
lport=destination_nio.lport,
|
||||
rhost=destination_nio.rhost,
|
||||
rport=destination_nio.rport))
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(
|
||||
name=bridge_name,
|
||||
lport=destination_nio.lport,
|
||||
rhost=destination_nio.rhost,
|
||||
rport=destination_nio.rport)
|
||||
)
|
||||
|
||||
if destination_nio.capturing:
|
||||
await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name,
|
||||
pcap_file=destination_nio.pcap_output_file))
|
||||
await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(
|
||||
name=bridge_name,
|
||||
pcap_file=destination_nio.pcap_output_file)
|
||||
)
|
||||
|
||||
await self._ubridge_send('bridge start {name}'.format(name=bridge_name))
|
||||
await self._ubridge_send(f"bridge start {bridge_name}")
|
||||
await self._ubridge_apply_filters(bridge_name, destination_nio.filters)
|
||||
|
||||
async def update_ubridge_udp_connection(self, bridge_name, source_nio, destination_nio):
|
||||
@ -781,7 +786,7 @@ class BaseNode:
|
||||
"""
|
||||
|
||||
if self.ubridge:
|
||||
await self._ubridge_send("bridge delete {name}".format(name=name))
|
||||
await self._ubridge_send(f"bridge delete {name}")
|
||||
|
||||
async def _ubridge_apply_filters(self, bridge_name, filters):
|
||||
"""
|
||||
@ -793,13 +798,13 @@ class BaseNode:
|
||||
|
||||
await self._ubridge_send('bridge reset_packet_filters ' + bridge_name)
|
||||
for packet_filter in self._build_filter_list(filters):
|
||||
cmd = 'bridge add_packet_filter {} {}'.format(bridge_name, packet_filter)
|
||||
cmd = f"bridge add_packet_filter {bridge_name} {packet_filter}"
|
||||
try:
|
||||
await self._ubridge_send(cmd)
|
||||
except UbridgeError as e:
|
||||
match = re.search(r"Cannot compile filter '(.*)': syntax error", str(e))
|
||||
if match:
|
||||
message = "Warning: ignoring BPF packet filter '{}' due to syntax error".format(self.name, match.group(1))
|
||||
message = f"Warning: ignoring BPF packet filter '{self.name}' due to syntax error: {match.group(1)}"
|
||||
log.warning(message)
|
||||
self.project.emit("log.warning", {"message": message})
|
||||
else:
|
||||
@ -838,7 +843,10 @@ class BaseNode:
|
||||
|
||||
if sys.platform.startswith("linux") and block_host_traffic is False:
|
||||
# on Linux we use RAW sockets by default excepting if host traffic must be blocked
|
||||
await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface))
|
||||
await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=ethernet_interface)
|
||||
)
|
||||
elif sys.platform.startswith("win"):
|
||||
# on Windows we use Winpcap/Npcap
|
||||
windows_interfaces = interfaces()
|
||||
@ -853,27 +861,38 @@ class BaseNode:
|
||||
npf_id = interface["id"]
|
||||
source_mac = interface["mac_address"]
|
||||
if npf_id:
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name,
|
||||
interface=npf_id))
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=npf_id)
|
||||
)
|
||||
else:
|
||||
raise NodeError("Could not find NPF id for interface {}".format(ethernet_interface))
|
||||
raise NodeError(f"Could not find NPF id for interface {ethernet_interface}")
|
||||
|
||||
if block_host_traffic:
|
||||
if source_mac:
|
||||
await self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac))
|
||||
log.info('PCAP filter applied on "{interface}" for source MAC {mac}'.format(interface=ethernet_interface, mac=source_mac))
|
||||
await self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(
|
||||
name=bridge_name,
|
||||
mac=source_mac)
|
||||
)
|
||||
log.info(f"PCAP filter applied on '{ethernet_interface}' for source MAC {source_mac}")
|
||||
else:
|
||||
log.warning("Could not block host network traffic on {} (no MAC address found)".format(ethernet_interface))
|
||||
log.warning(f"Could not block host network traffic on {ethernet_interface} (no MAC address found)")
|
||||
else:
|
||||
# on other platforms we just rely on the pcap library
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=ethernet_interface))
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=ethernet_interface)
|
||||
)
|
||||
source_mac = None
|
||||
for interface in interfaces():
|
||||
if interface["name"] == ethernet_interface:
|
||||
source_mac = interface["mac_address"]
|
||||
if source_mac:
|
||||
await self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(name=bridge_name, mac=source_mac))
|
||||
log.info('PCAP filter applied on "{interface}" for source MAC {mac}'.format(interface=ethernet_interface, mac=source_mac))
|
||||
await self._ubridge_send('bridge set_pcap_filter {name} "not ether src {mac}"'.format(
|
||||
name=bridge_name,
|
||||
mac=source_mac)
|
||||
)
|
||||
log.info(f"PCAP filter applied on '{ethernet_interface}' for source MAC {source_mac}")
|
||||
|
||||
def _create_local_udp_tunnel(self):
|
||||
"""
|
||||
@ -889,11 +908,13 @@ class BaseNode:
|
||||
destination_nio_settings = {'lport': rport, 'rhost': '127.0.0.1', 'rport': lport, 'type': 'nio_udp'}
|
||||
source_nio = self.manager.create_nio(source_nio_settings)
|
||||
destination_nio = self.manager.create_nio(destination_nio_settings)
|
||||
log.info("{module}: '{name}' [{id}]:local UDP tunnel created between port {port1} and {port2}".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
port1=lport,
|
||||
port2=rport))
|
||||
log.info("{module}: '{name}' [{id}]:local UDP tunnel created between port {port1} and {port2}".format(
|
||||
module=self.manager.module_name,
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
port1=lport,
|
||||
port2=rport)
|
||||
)
|
||||
return source_nio, destination_nio
|
||||
|
||||
@property
|
||||
@ -916,11 +937,13 @@ class BaseNode:
|
||||
available_ram = int(psutil.virtual_memory().available / (1024 * 1024))
|
||||
percentage_left = psutil.virtual_memory().percent
|
||||
if requested_ram > available_ram:
|
||||
message = '"{}" requires {}MB of RAM to run but there is only {}MB - {}% of RAM left on "{}"'.format(self.name,
|
||||
requested_ram,
|
||||
available_ram,
|
||||
percentage_left,
|
||||
platform.node())
|
||||
message = '"{}" requires {}MB of RAM to run but there is only {}MB - {}% of RAM left on "{}"'.format(
|
||||
self.name,
|
||||
requested_ram,
|
||||
available_ram,
|
||||
percentage_left,
|
||||
platform.node()
|
||||
)
|
||||
self.project.emit("log.warning", {"message": message})
|
||||
|
||||
def _get_custom_adapter_settings(self, adapter_number):
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -40,6 +39,6 @@ class BuiltinNodeFactory:
|
||||
def __new__(cls, name, node_id, project, manager, node_type, **kwargs):
|
||||
|
||||
if node_type not in BUILTIN_NODES:
|
||||
raise NodeError("Unknown node type: {}".format(node_type))
|
||||
raise NodeError(f"Unknown node type: {node_type}")
|
||||
|
||||
return BUILTIN_NODES[node_type](name, node_id, project, manager, **kwargs)
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -213,7 +212,7 @@ class Cloud(BaseNode):
|
||||
"""
|
||||
|
||||
await self.start()
|
||||
log.info('Cloud "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
|
||||
log.info(f'Cloud "{self._name}" [{self._id}] has been created')
|
||||
|
||||
async def start(self):
|
||||
"""
|
||||
@ -246,7 +245,7 @@ class Cloud(BaseNode):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
|
||||
await self._stop_ubridge()
|
||||
log.info('Cloud "{name}" [{id}] has been closed'.format(name=self._name, id=self._id))
|
||||
log.info(f'Cloud "{self._name}" [{self._id}] has been closed')
|
||||
|
||||
async def _is_wifi_adapter_osx(self, adapter_name):
|
||||
"""
|
||||
@ -256,7 +255,7 @@ class Cloud(BaseNode):
|
||||
try:
|
||||
output = await gns3server.utils.asyncio.subprocess_check_output("networksetup", "-listallhardwareports")
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
log.warning("Could not execute networksetup: {}".format(e))
|
||||
log.warning(f"Could not execute networksetup: {e}")
|
||||
return False
|
||||
|
||||
is_wifi = False
|
||||
@ -285,17 +284,21 @@ class Cloud(BaseNode):
|
||||
break
|
||||
|
||||
if not port_info:
|
||||
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name,
|
||||
port_number=port_number))
|
||||
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(
|
||||
name=self.name,
|
||||
port_number=port_number)
|
||||
)
|
||||
|
||||
bridge_name = "{}-{}".format(self._id, port_number)
|
||||
await self._ubridge_send("bridge create {name}".format(name=bridge_name))
|
||||
bridge_name = f"{self._id}-{port_number}"
|
||||
await self._ubridge_send(f"bridge create {bridge_name}")
|
||||
if not isinstance(nio, NIOUDP):
|
||||
raise NodeError("Source NIO is not UDP")
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
|
||||
lport=nio.lport,
|
||||
rhost=nio.rhost,
|
||||
rport=nio.rport))
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(
|
||||
name=bridge_name,
|
||||
lport=nio.lport,
|
||||
rhost=nio.rhost,
|
||||
rport=nio.rport)
|
||||
)
|
||||
|
||||
await self._ubridge_apply_filters(bridge_name, nio.filters)
|
||||
if port_info["type"] in ("ethernet", "tap"):
|
||||
@ -310,7 +313,7 @@ class Cloud(BaseNode):
|
||||
if port_info["type"] == "ethernet":
|
||||
network_interfaces = [interface["name"] for interface in self._interfaces()]
|
||||
if not port_info["interface"] in network_interfaces:
|
||||
raise NodeError("Interface '{}' could not be found on this system, please update '{}'".format(port_info["interface"], self.name))
|
||||
raise NodeError(f"Interface '{port_info['interface']}' could not be found on this system, please update '{self.name}'")
|
||||
|
||||
if sys.platform.startswith("linux"):
|
||||
await self._add_linux_ethernet(port_info, bridge_name)
|
||||
@ -320,19 +323,26 @@ class Cloud(BaseNode):
|
||||
await self._add_windows_ethernet(port_info, bridge_name)
|
||||
|
||||
elif port_info["type"] == "tap":
|
||||
await self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))
|
||||
await self._ubridge_send('bridge add_nio_tap {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=port_info["interface"])
|
||||
)
|
||||
|
||||
elif port_info["type"] == "udp":
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(name=bridge_name,
|
||||
lport=port_info["lport"],
|
||||
rhost=port_info["rhost"],
|
||||
rport=port_info["rport"]))
|
||||
await self._ubridge_send('bridge add_nio_udp {name} {lport} {rhost} {rport}'.format(
|
||||
name=bridge_name,
|
||||
lport=port_info["lport"],
|
||||
rhost=port_info["rhost"],
|
||||
rport=port_info["rport"])
|
||||
)
|
||||
|
||||
if nio.capturing:
|
||||
await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(name=bridge_name,
|
||||
pcap_file=nio.pcap_output_file))
|
||||
await self._ubridge_send('bridge start_capture {name} "{pcap_file}"'.format(
|
||||
name=bridge_name,
|
||||
pcap_file=nio.pcap_output_file)
|
||||
)
|
||||
|
||||
await self._ubridge_send('bridge start {name}'.format(name=bridge_name))
|
||||
await self._ubridge_send(f"bridge start {bridge_name}")
|
||||
|
||||
async def _add_linux_ethernet(self, port_info, bridge_name):
|
||||
"""
|
||||
@ -352,10 +362,19 @@ class Cloud(BaseNode):
|
||||
break
|
||||
i += 1
|
||||
|
||||
await self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap))
|
||||
await self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface))
|
||||
await self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=tap)
|
||||
)
|
||||
await self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(
|
||||
tap=tap,
|
||||
interface=interface)
|
||||
)
|
||||
else:
|
||||
await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface))
|
||||
await self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=interface)
|
||||
)
|
||||
|
||||
async def _add_osx_ethernet(self, port_info, bridge_name):
|
||||
"""
|
||||
@ -367,12 +386,17 @@ class Cloud(BaseNode):
|
||||
raise NodeError("Connecting to a Wireless adapter is not supported on Mac OS")
|
||||
if port_info["interface"].startswith("vmnet"):
|
||||
# Use a special NIO to connect to VMware vmnet interfaces on OSX (libpcap doesn't support them)
|
||||
await self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(name=bridge_name,
|
||||
interface=port_info["interface"]))
|
||||
await self._ubridge_send('bridge add_nio_fusion_vmnet {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=port_info["interface"])
|
||||
)
|
||||
return
|
||||
if not gns3server.utils.interfaces.has_netmask(port_info["interface"]):
|
||||
raise NodeError("Interface {} has no netmask, interface down?".format(port_info["interface"]))
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))
|
||||
raise NodeError(f"Interface {port_info['interface']} has no netmask, interface down?")
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=port_info["interface"])
|
||||
)
|
||||
|
||||
async def _add_windows_ethernet(self, port_info, bridge_name):
|
||||
"""
|
||||
@ -380,8 +404,11 @@ class Cloud(BaseNode):
|
||||
"""
|
||||
|
||||
if not gns3server.utils.interfaces.has_netmask(port_info["interface"]):
|
||||
raise NodeError("Interface {} has no netmask, interface down?".format(port_info["interface"]))
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(name=bridge_name, interface=port_info["interface"]))
|
||||
raise NodeError(f"Interface {port_info['interface']} has no netmask, interface down?")
|
||||
await self._ubridge_send('bridge add_nio_ethernet {name} "{interface}"'.format(
|
||||
name=bridge_name,
|
||||
interface=port_info["interface"])
|
||||
)
|
||||
|
||||
async def add_nio(self, nio, port_number):
|
||||
"""
|
||||
@ -392,7 +419,7 @@ class Cloud(BaseNode):
|
||||
"""
|
||||
|
||||
if port_number in self._nios:
|
||||
raise NodeError("Port {} isn't free".format(port_number))
|
||||
raise NodeError(f"Port {port_number} isn't free")
|
||||
|
||||
log.info('Cloud "{name}" [{id}]: NIO {nio} bound to port {port}'.format(name=self._name,
|
||||
id=self._id,
|
||||
@ -416,7 +443,7 @@ class Cloud(BaseNode):
|
||||
:param port_number: port to allocate for the NIO
|
||||
"""
|
||||
|
||||
bridge_name = "{}-{}".format(self._id, port_number)
|
||||
bridge_name = f"{self._id}-{port_number}"
|
||||
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
await self._ubridge_apply_filters(bridge_name, nio.filters)
|
||||
|
||||
@ -427,8 +454,8 @@ class Cloud(BaseNode):
|
||||
:param port_number: adapter number
|
||||
"""
|
||||
|
||||
bridge_name = "{}-{}".format(self._id, port_number)
|
||||
await self._ubridge_send("bridge delete {name}".format(name=bridge_name))
|
||||
bridge_name = f"{self._id}-{port_number}"
|
||||
await self._ubridge_send(f"bridge delete {bridge_name}")
|
||||
|
||||
async def remove_nio(self, port_number):
|
||||
"""
|
||||
@ -440,17 +467,19 @@ class Cloud(BaseNode):
|
||||
"""
|
||||
|
||||
if port_number not in self._nios:
|
||||
raise NodeError("Port {} is not allocated".format(port_number))
|
||||
raise NodeError(f"Port {port_number} is not allocated")
|
||||
|
||||
await self.stop_capture(port_number)
|
||||
nio = self._nios[port_number]
|
||||
if isinstance(nio, NIOUDP):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
|
||||
log.info('Cloud "{name}" [{id}]: NIO {nio} removed from port {port}'.format(name=self._name,
|
||||
id=self._id,
|
||||
nio=nio,
|
||||
port=port_number))
|
||||
log.info('Cloud "{name}" [{id}]: NIO {nio} removed from port {port}'.format(
|
||||
name=self._name,
|
||||
id=self._id,
|
||||
nio=nio,
|
||||
port=port_number)
|
||||
)
|
||||
|
||||
del self._nios[port_number]
|
||||
if self._ubridge_hypervisor and self._ubridge_hypervisor.is_running():
|
||||
@ -468,11 +497,13 @@ class Cloud(BaseNode):
|
||||
"""
|
||||
|
||||
if not [port["port_number"] for port in self._ports_mapping if port_number == port["port_number"]]:
|
||||
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(name=self.name,
|
||||
port_number=port_number))
|
||||
raise NodeError("Port {port_number} doesn't exist on cloud '{name}'".format(
|
||||
name=self.name,
|
||||
port_number=port_number)
|
||||
)
|
||||
|
||||
if port_number not in self._nios:
|
||||
raise NodeError("Port {} is not connected".format(port_number))
|
||||
raise NodeError(f"Port {port_number} is not connected")
|
||||
|
||||
nio = self._nios[port_number]
|
||||
|
||||
@ -489,14 +520,18 @@ class Cloud(BaseNode):
|
||||
|
||||
nio = self.get_nio(port_number)
|
||||
if nio.capturing:
|
||||
raise NodeError("Packet capture is already activated on port {port_number}".format(port_number=port_number))
|
||||
raise NodeError(f"Packet capture is already activated on port {port_number}")
|
||||
nio.start_packet_capture(output_file)
|
||||
bridge_name = "{}-{}".format(self._id, port_number)
|
||||
await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=bridge_name,
|
||||
output_file=output_file))
|
||||
log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(name=self.name,
|
||||
id=self.id,
|
||||
port_number=port_number))
|
||||
bridge_name = f"{self._id}-{port_number}"
|
||||
await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(
|
||||
name=bridge_name,
|
||||
output_file=output_file)
|
||||
)
|
||||
log.info("Cloud '{name}' [{id}]: starting packet capture on port {port_number}".format(
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
port_number=port_number)
|
||||
)
|
||||
|
||||
async def stop_capture(self, port_number):
|
||||
"""
|
||||
@ -509,9 +544,11 @@ class Cloud(BaseNode):
|
||||
if not nio.capturing:
|
||||
return
|
||||
nio.stop_packet_capture()
|
||||
bridge_name = "{}-{}".format(self._id, port_number)
|
||||
await self._ubridge_send("bridge stop_capture {name}".format(name=bridge_name))
|
||||
bridge_name = f"{self._id}-{port_number}"
|
||||
await self._ubridge_send(f"bridge stop_capture {bridge_name}")
|
||||
|
||||
log.info("Cloud'{name}' [{id}]: stopping packet capture on port {port_number}".format(name=self.name,
|
||||
id=self.id,
|
||||
port_number=port_number))
|
||||
log.info("Cloud'{name}' [{id}]: stopping packet capture on port {port_number}".format(
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
port_number=port_number)
|
||||
)
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -51,7 +50,7 @@ class EthernetHub(BaseNode):
|
||||
"""
|
||||
|
||||
super().create()
|
||||
log.info('Ethernet hub "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
|
||||
log.info(f'Ethernet hub "{self._name}" [{self._id}] has been created')
|
||||
|
||||
async def delete(self):
|
||||
"""
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2016 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -51,7 +50,7 @@ class EthernetSwitch(BaseNode):
|
||||
"""
|
||||
|
||||
super().create()
|
||||
log.info('Ethernet switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
|
||||
log.info(f'Ethernet switch "{self._name}" [{self._id}] has been created')
|
||||
|
||||
async def delete(self):
|
||||
"""
|
||||
|
@ -40,7 +40,7 @@ class Nat(Cloud):
|
||||
if not nat_interface:
|
||||
nat_interface = "virbr0"
|
||||
if nat_interface not in [interface["name"] for interface in gns3server.utils.interfaces.interfaces()]:
|
||||
raise NodeError("NAT interface {} is missing, please install libvirt".format(nat_interface))
|
||||
raise NodeError(f"NAT interface {nat_interface} is missing, please install libvirt")
|
||||
interface = nat_interface
|
||||
else:
|
||||
nat_interface = Config.instance().settings.Server.default_nat_interface
|
||||
@ -49,10 +49,11 @@ class Nat(Cloud):
|
||||
interfaces = list(filter(lambda x: nat_interface in x.lower(),
|
||||
[interface["name"] for interface in gns3server.utils.interfaces.interfaces()]))
|
||||
if not len(interfaces):
|
||||
raise NodeError("NAT interface {} is missing. You need to install VMware or use the NAT node on GNS3 VM".format(nat_interface))
|
||||
raise NodeError(f"NAT interface {nat_interface} is missing. "
|
||||
f"You need to install VMware or use the NAT node on GNS3 VM")
|
||||
interface = interfaces[0] # take the first available interface containing the vmnet8 name
|
||||
|
||||
log.info("NAT node '{}' configured to use NAT interface '{}'".format(name, interface))
|
||||
log.info(f"NAT node '{name}' configured to use NAT interface '{interface}'")
|
||||
ports = [
|
||||
{
|
||||
"name": "nat0",
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -68,9 +67,8 @@ class Docker(BaseManager):
|
||||
docker_version = parse_version(version['ApiVersion'])
|
||||
|
||||
if docker_version < parse_version(DOCKER_MINIMUM_API_VERSION):
|
||||
raise DockerError(
|
||||
"Docker version is {}. GNS3 requires a minimum version of {}".format(version["Version"],
|
||||
DOCKER_MINIMUM_VERSION))
|
||||
raise DockerError(f"Docker version is {version['Version']}. "
|
||||
f"GNS3 requires a minimum version of {DOCKER_MINIMUM_VERSION}")
|
||||
|
||||
preferred_api_version = parse_version(DOCKER_PREFERRED_API_VERSION)
|
||||
if docker_version >= preferred_api_version:
|
||||
@ -150,7 +148,7 @@ class Docker(BaseManager):
|
||||
headers={"content-type": "application/json", },
|
||||
timeout=timeout)
|
||||
except aiohttp.ClientError as e:
|
||||
raise DockerError("Docker has returned an error: {}".format(str(e)))
|
||||
raise DockerError(f"Docker has returned an error: {e}")
|
||||
except (asyncio.TimeoutError):
|
||||
raise DockerError("Docker timeout " + method + " " + path)
|
||||
if response.status >= 300:
|
||||
@ -159,13 +157,13 @@ class Docker(BaseManager):
|
||||
body = json.loads(body.decode("utf-8"))["message"]
|
||||
except ValueError:
|
||||
pass
|
||||
log.debug("Query Docker %s %s params=%s data=%s Response: %s", method, path, params, data, body)
|
||||
log.debug(f"Query Docker {method} {path} params={params} data={data} Response: {body}")
|
||||
if response.status == 304:
|
||||
raise DockerHttp304Error("Docker has returned an error: {} {}".format(response.status, body))
|
||||
raise DockerHttp304Error(f"Docker has returned an error: {response.status} {body}")
|
||||
elif response.status == 404:
|
||||
raise DockerHttp404Error("Docker has returned an error: {} {}".format(response.status, body))
|
||||
raise DockerHttp404Error(f"Docker has returned an error: {response.status} {body}")
|
||||
else:
|
||||
raise DockerError("Docker has returned an error: {} {}".format(response.status, body))
|
||||
raise DockerError(f"Docker has returned an error: {response.status} {body}")
|
||||
return response
|
||||
|
||||
async def websocket_query(self, path, params={}):
|
||||
@ -191,27 +189,28 @@ class Docker(BaseManager):
|
||||
"""
|
||||
|
||||
try:
|
||||
await self.query("GET", "images/{}/json".format(image))
|
||||
await self.query("GET", f"images/{image}/json")
|
||||
return # We already have the image skip the download
|
||||
except DockerHttp404Error:
|
||||
pass
|
||||
|
||||
if progress_callback:
|
||||
progress_callback("Pulling '{}' from docker hub".format(image))
|
||||
progress_callback(f"Pulling '{image}' from docker hub")
|
||||
try:
|
||||
response = await self.http_query("POST", "images/create", params={"fromImage": image}, timeout=None)
|
||||
except DockerError as e:
|
||||
raise DockerError("Could not pull the '{}' image from Docker Hub, please check your Internet connection (original error: {})".format(image, e))
|
||||
raise DockerError(f"Could not pull the '{image}' image from Docker Hub, "
|
||||
f"please check your Internet connection (original error: {e})")
|
||||
# The pull api will stream status via an HTTP JSON stream
|
||||
content = ""
|
||||
while True:
|
||||
try:
|
||||
chunk = await response.content.read(CHUNK_SIZE)
|
||||
except aiohttp.ServerDisconnectedError:
|
||||
log.error("Disconnected from server while pulling Docker image '{}' from docker hub".format(image))
|
||||
log.error(f"Disconnected from server while pulling Docker image '{image}' from docker hub")
|
||||
break
|
||||
except asyncio.TimeoutError:
|
||||
log.error("Timeout while pulling Docker image '{}' from docker hub".format(image))
|
||||
log.error(f"Timeout while pulling Docker image '{image}' from docker hub")
|
||||
break
|
||||
if not chunk:
|
||||
break
|
||||
@ -228,7 +227,7 @@ class Docker(BaseManager):
|
||||
pass
|
||||
response.close()
|
||||
if progress_callback:
|
||||
progress_callback("Success pulling image {}".format(image))
|
||||
progress_callback(f"Success pulling image {image}")
|
||||
|
||||
async def list_images(self):
|
||||
"""
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -78,7 +77,7 @@ class DockerVM(BaseNode):
|
||||
|
||||
# force the latest image if no version is specified
|
||||
if ":" not in image:
|
||||
image = "{}:latest".format(image)
|
||||
image = f"{image}:latest"
|
||||
self._image = image
|
||||
self._start_command = start_command
|
||||
self._environment = environment
|
||||
@ -110,9 +109,11 @@ class DockerVM(BaseNode):
|
||||
else:
|
||||
self.adapters = adapters
|
||||
|
||||
log.debug("{module}: {name} [{image}] initialized.".format(module=self.manager.module_name,
|
||||
name=self.name,
|
||||
image=self._image))
|
||||
log.debug("{module}: {name} [{image}] initialized.".format(
|
||||
module=self.manager.module_name,
|
||||
name=self.name,
|
||||
image=self._image)
|
||||
)
|
||||
|
||||
def __json__(self):
|
||||
return {
|
||||
@ -148,7 +149,7 @@ class DockerVM(BaseNode):
|
||||
if not os.path.exists("/tmp/.X11-unix/"):
|
||||
return display
|
||||
while True:
|
||||
if not os.path.exists("/tmp/.X11-unix/X{}".format(display)):
|
||||
if not os.path.exists(f"/tmp/.X11-unix/X{display}"):
|
||||
return display
|
||||
display += 1
|
||||
|
||||
@ -242,7 +243,7 @@ class DockerVM(BaseNode):
|
||||
"""
|
||||
|
||||
try:
|
||||
result = await self.manager.query("GET", "containers/{}/json".format(self._cid))
|
||||
result = await self.manager.query("GET", f"containers/{self._cid}/json")
|
||||
except DockerError:
|
||||
return "exited"
|
||||
|
||||
@ -257,7 +258,7 @@ class DockerVM(BaseNode):
|
||||
:returns: Dictionary information about the container image
|
||||
"""
|
||||
|
||||
result = await self.manager.query("GET", "images/{}/json".format(self._image))
|
||||
result = await self.manager.query("GET", f"images/{self._image}/json")
|
||||
return result
|
||||
|
||||
def _mount_binds(self, image_info):
|
||||
@ -267,20 +268,20 @@ class DockerVM(BaseNode):
|
||||
|
||||
resources = get_resource("compute/docker/resources")
|
||||
if not os.path.exists(resources):
|
||||
raise DockerError("{} is missing can't start Docker containers".format(resources))
|
||||
binds = ["{}:/gns3:ro".format(resources)]
|
||||
raise DockerError(f"{resources} is missing, can't start Docker container")
|
||||
binds = [f"{resources}:/gns3:ro"]
|
||||
|
||||
# We mount our own etc/network
|
||||
try:
|
||||
self._create_network_config()
|
||||
except OSError as e:
|
||||
raise DockerError("Could not create network config in the container: {}".format(e))
|
||||
raise DockerError(f"Could not create network config in the container: {e}")
|
||||
volumes = ["/etc/network"]
|
||||
|
||||
volumes.extend((image_info.get("Config", {}).get("Volumes") or {}).keys())
|
||||
for volume in self._extra_volumes:
|
||||
if not volume.strip() or volume[0] != "/" or volume.find("..") >= 0:
|
||||
raise DockerError("Persistent volume '{}' has invalid format. It must start with a '/' and not contain '..'.".format(volume))
|
||||
raise DockerError(f"Persistent volume '{volume}' has invalid format. It must start with a '/' and not contain '..'.")
|
||||
volumes.extend(self._extra_volumes)
|
||||
|
||||
self._volumes = []
|
||||
@ -297,7 +298,7 @@ class DockerVM(BaseNode):
|
||||
for volume in self._volumes:
|
||||
source = os.path.join(self.working_dir, os.path.relpath(volume, "/"))
|
||||
os.makedirs(source, exist_ok=True)
|
||||
binds.append("{}:/gns3volumes{}".format(source, volume))
|
||||
binds.append(f"{source}:/gns3volumes{volume}")
|
||||
|
||||
return binds
|
||||
|
||||
@ -343,16 +344,17 @@ class DockerVM(BaseNode):
|
||||
try:
|
||||
image_infos = await self._get_image_information()
|
||||
except DockerHttp404Error:
|
||||
log.info("Image '{}' is missing, pulling it from Docker hub...".format(self._image))
|
||||
log.info(f"Image '{self._image}' is missing, pulling it from Docker hub...")
|
||||
await self.pull_image(self._image)
|
||||
image_infos = await self._get_image_information()
|
||||
|
||||
if image_infos is None:
|
||||
raise DockerError("Cannot get information for image '{}', please try again.".format(self._image))
|
||||
raise DockerError(f"Cannot get information for image '{self._image}', please try again.")
|
||||
|
||||
available_cpus = psutil.cpu_count(logical=True)
|
||||
if self._cpus > available_cpus:
|
||||
raise DockerError("You have allocated too many CPUs for the Docker container (max available is {} CPUs)".format(available_cpus))
|
||||
raise DockerError(f"You have allocated too many CPUs for the Docker container "
|
||||
f"(max available is {available_cpus} CPUs)")
|
||||
|
||||
params = {
|
||||
"Hostname": self._name,
|
||||
@ -381,7 +383,7 @@ class DockerVM(BaseNode):
|
||||
try:
|
||||
params["Cmd"] = shlex.split(self._start_command)
|
||||
except ValueError as e:
|
||||
raise DockerError("Invalid start command '{}': {}".format(self._start_command, e))
|
||||
raise DockerError(f"Invalid start command '{self._start_command}': {e}")
|
||||
if len(params["Cmd"]) == 0:
|
||||
params["Cmd"] = image_infos.get("Config", {"Cmd": []}).get("Cmd")
|
||||
if params["Cmd"] is None:
|
||||
@ -391,7 +393,7 @@ class DockerVM(BaseNode):
|
||||
params["Entrypoint"].insert(0, "/gns3/init.sh") # FIXME /gns3/init.sh is not found?
|
||||
|
||||
# Give the information to the container on how many interface should be inside
|
||||
params["Env"].append("GNS3_MAX_ETHERNET=eth{}".format(self.adapters - 1))
|
||||
params["Env"].append(f"GNS3_MAX_ETHERNET=eth{self.adapters - 1}")
|
||||
# Give the information to the container the list of volume path mounted
|
||||
params["Env"].append("GNS3_VOLUMES={}".format(":".join(self._volumes)))
|
||||
|
||||
@ -412,7 +414,7 @@ class DockerVM(BaseNode):
|
||||
for e in self._environment.strip().split("\n"):
|
||||
e = e.strip()
|
||||
if e.split("=")[0] == "":
|
||||
self.project.emit("log.warning", {"message": "{} has invalid environment variable: {}".format(self.name, e)})
|
||||
self.project.emit("log.warning", {"message": f"{self.name} has invalid environment variable: {e}"})
|
||||
continue
|
||||
if not e.startswith("GNS3_"):
|
||||
formatted = self._format_env(variables, e)
|
||||
@ -421,17 +423,17 @@ class DockerVM(BaseNode):
|
||||
if self._console_type == "vnc":
|
||||
await self._start_vnc()
|
||||
params["Env"].append("QT_GRAPHICSSYSTEM=native") # To fix a Qt issue: https://github.com/GNS3/gns3-server/issues/556
|
||||
params["Env"].append("DISPLAY=:{}".format(self._display))
|
||||
params["Env"].append(f"DISPLAY=:{self._display}")
|
||||
params["HostConfig"]["Binds"].append("/tmp/.X11-unix/:/tmp/.X11-unix/")
|
||||
|
||||
if self._extra_hosts:
|
||||
extra_hosts = self._format_extra_hosts(self._extra_hosts)
|
||||
if extra_hosts:
|
||||
params["Env"].append("GNS3_EXTRA_HOSTS={}".format(extra_hosts))
|
||||
params["Env"].append(f"GNS3_EXTRA_HOSTS={extra_hosts}")
|
||||
|
||||
result = await self.manager.query("POST", "containers/create", data=params)
|
||||
self._cid = result['Id']
|
||||
log.info("Docker container '{name}' [{id}] created".format(name=self._name, id=self._id))
|
||||
log.info(f"Docker container '{self._name}' [{self._id}] created")
|
||||
return True
|
||||
|
||||
def _format_env(self, variables, env):
|
||||
@ -450,8 +452,8 @@ class DockerVM(BaseNode):
|
||||
if hostname and ip:
|
||||
hosts.append((hostname, ip))
|
||||
except ValueError:
|
||||
raise DockerError("Can't apply `ExtraHosts`, wrong format: {}".format(extra_hosts))
|
||||
return "\n".join(["{}\t{}".format(h[1], h[0]) for h in hosts])
|
||||
raise DockerError(f"Can't apply `ExtraHosts`, wrong format: {extra_hosts}")
|
||||
return "\n".join([f"{h[1]}\t{h[0]}" for h in hosts])
|
||||
|
||||
async def update(self):
|
||||
"""
|
||||
@ -479,8 +481,10 @@ class DockerVM(BaseNode):
|
||||
try:
|
||||
state = await self._get_container_state()
|
||||
except DockerHttp404Error:
|
||||
raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(name=self.name,
|
||||
cid=self._cid))
|
||||
raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(
|
||||
name=self.name,
|
||||
cid=self._cid)
|
||||
)
|
||||
if state == "paused":
|
||||
await self.unpause()
|
||||
elif state == "running":
|
||||
@ -494,7 +498,7 @@ class DockerVM(BaseNode):
|
||||
|
||||
await self._clean_servers()
|
||||
|
||||
await self.manager.query("POST", "containers/{}/start".format(self._cid))
|
||||
await self.manager.query("POST", f"containers/{self._cid}/start")
|
||||
self._namespace = await self._get_namespace()
|
||||
|
||||
await self._start_ubridge(require_privileged_access=True)
|
||||
@ -524,10 +528,12 @@ class DockerVM(BaseNode):
|
||||
|
||||
self._permissions_fixed = False
|
||||
self.status = "started"
|
||||
log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(name=self._name,
|
||||
image=self._image,
|
||||
console=self.console,
|
||||
console_type=self.console_type))
|
||||
log.info("Docker container '{name}' [{image}] started listen for {console_type} on {console}".format(
|
||||
name=self._name,
|
||||
image=self._image,
|
||||
console=self.console,
|
||||
console_type=self.console_type)
|
||||
)
|
||||
|
||||
async def _start_aux(self):
|
||||
"""
|
||||
@ -543,12 +549,12 @@ class DockerVM(BaseNode):
|
||||
stderr=asyncio.subprocess.STDOUT,
|
||||
stdin=asyncio.subprocess.PIPE)
|
||||
except OSError as e:
|
||||
raise DockerError("Could not start auxiliary console process: {}".format(e))
|
||||
raise DockerError(f"Could not start auxiliary console process: {e}")
|
||||
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True)
|
||||
try:
|
||||
self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux)))
|
||||
self._telnet_servers.append(await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.aux))
|
||||
except OSError as e:
|
||||
raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.aux, e))
|
||||
raise DockerError(f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.aux}: {e}")
|
||||
log.debug(f"Docker container '{self.name}' started listen for auxiliary telnet on {self.aux}")
|
||||
|
||||
async def _fix_permissions(self):
|
||||
@ -558,10 +564,10 @@ class DockerVM(BaseNode):
|
||||
"""
|
||||
|
||||
state = await self._get_container_state()
|
||||
log.info("Docker container '{name}' fix ownership, state = {state}".format(name=self._name, state=state))
|
||||
log.info(f"Docker container '{self._name}' fix ownership, state = {state}")
|
||||
if state == "stopped" or state == "exited":
|
||||
# We need to restart it to fix permissions
|
||||
await self.manager.query("POST", "containers/{}/start".format(self._cid))
|
||||
await self.manager.query("POST", f"containers/{self._cid}/start")
|
||||
|
||||
for volume in self._volumes:
|
||||
log.debug("Docker container '{name}' [{image}] fix ownership on {path}".format(
|
||||
@ -584,7 +590,7 @@ class DockerVM(BaseNode):
|
||||
.format(uid=os.getuid(), gid=os.getgid(), path=volume),
|
||||
)
|
||||
except OSError as e:
|
||||
raise DockerError("Could not fix permissions for {}: {}".format(volume, e))
|
||||
raise DockerError(f"Could not fix permissions for {volume}: {e}")
|
||||
await process.wait()
|
||||
self._permissions_fixed = True
|
||||
|
||||
@ -608,13 +614,13 @@ class DockerVM(BaseNode):
|
||||
"-rfbport", str(self.console),
|
||||
"-AlwaysShared",
|
||||
"-SecurityTypes", "None",
|
||||
":{}".format(self._display),
|
||||
f":{self._display}",
|
||||
stdout=fd, stderr=subprocess.STDOUT)
|
||||
else:
|
||||
if restart is False:
|
||||
self._xvfb_process = await asyncio.create_subprocess_exec("Xvfb",
|
||||
"-nolisten",
|
||||
"tcp", ":{}".format(self._display),
|
||||
"tcp", f":{self._display}",
|
||||
"-screen", "0",
|
||||
self._console_resolution + "x16")
|
||||
|
||||
@ -625,7 +631,7 @@ class DockerVM(BaseNode):
|
||||
"-nopw",
|
||||
"-shared",
|
||||
"-geometry", self._console_resolution,
|
||||
"-display", "WAIT:{}".format(self._display),
|
||||
"-display", f"WAIT:{self._display}",
|
||||
"-rfbport", str(self.console),
|
||||
"-rfbportv6", str(self.console),
|
||||
"-noncache",
|
||||
@ -642,17 +648,17 @@ class DockerVM(BaseNode):
|
||||
if not (tigervnc_path or shutil.which("Xvfb") and shutil.which("x11vnc")):
|
||||
raise DockerError("Please install TigerVNC server (recommended) or Xvfb + x11vnc before using VNC support")
|
||||
await self._start_vnc_process()
|
||||
x11_socket = os.path.join("/tmp/.X11-unix/", "X{}".format(self._display))
|
||||
x11_socket = os.path.join("/tmp/.X11-unix/", f"X{self._display}")
|
||||
try:
|
||||
await wait_for_file_creation(x11_socket)
|
||||
except asyncio.TimeoutError:
|
||||
raise DockerError('x11 socket file "{}" does not exist'.format(x11_socket))
|
||||
raise DockerError(f'x11 socket file "{x11_socket}" does not exist')
|
||||
|
||||
if not hasattr(sys, "_called_from_test") or not sys._called_from_test:
|
||||
# Start vncconfig for tigervnc clipboard support, connection available only after socket creation.
|
||||
tigervncconfig_path = shutil.which("vncconfig")
|
||||
if tigervnc_path and tigervncconfig_path:
|
||||
self._vncconfig_process = await asyncio.create_subprocess_exec(tigervncconfig_path, "-display", ":{}".format(self._display), "-nowin")
|
||||
self._vncconfig_process = await asyncio.create_subprocess_exec(tigervncconfig_path, "-display", f":{self._display}", "-nowin")
|
||||
|
||||
# sometimes the VNC process can crash
|
||||
monitor_process(self._vnc_process, self._vnc_callback)
|
||||
@ -665,7 +671,7 @@ class DockerVM(BaseNode):
|
||||
"""
|
||||
|
||||
if returncode != 0 and self._closing is False:
|
||||
self.project.emit("log.error", {"message": "The vnc process has stopped with return code {} for node '{}'. Please restart this node.".format(returncode, self.name)})
|
||||
self.project.emit("log.error", {"message": f"The vnc process has stopped with return code {returncode} for node '{self.name}'. Please restart this node."})
|
||||
self._vnc_process = None
|
||||
|
||||
async def _start_http(self):
|
||||
@ -679,15 +685,15 @@ class DockerVM(BaseNode):
|
||||
# We replace host and port in the server answer otherwise some link could be broken
|
||||
server = AsyncioRawCommandServer(command, replaces=[
|
||||
(
|
||||
'://127.0.0.1'.encode(), # {{HOST}} mean client host
|
||||
'://{{HOST}}'.encode(),
|
||||
b'://127.0.0.1', # {{HOST}} mean client host
|
||||
b'://{{HOST}}',
|
||||
),
|
||||
(
|
||||
':{}'.format(self._console_http_port).encode(),
|
||||
':{}'.format(self.console).encode(),
|
||||
f':{self._console_http_port}'.encode(),
|
||||
f':{self.console}'.encode(),
|
||||
)
|
||||
])
|
||||
self._telnet_servers.append((await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)))
|
||||
self._telnet_servers.append(await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console))
|
||||
|
||||
async def _window_size_changed_callback(self, columns, rows):
|
||||
"""
|
||||
@ -699,7 +705,7 @@ class DockerVM(BaseNode):
|
||||
"""
|
||||
|
||||
# resize the container TTY.
|
||||
await self._manager.query("POST", "containers/{}/resize?h={}&w={}".format(self._cid, rows, columns))
|
||||
await self._manager.query("POST", f"containers/{self._cid}/resize?h={rows}&w={columns}")
|
||||
|
||||
|
||||
async def _start_console(self):
|
||||
@ -724,11 +730,11 @@ class DockerVM(BaseNode):
|
||||
input_stream = InputStream()
|
||||
telnet = AsyncioTelnetServer(reader=output_stream, writer=input_stream, echo=True, naws=True, window_size_changed_callback=self._window_size_changed_callback)
|
||||
try:
|
||||
self._telnet_servers.append((await asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console)))
|
||||
self._telnet_servers.append(await asyncio.start_server(telnet.run, self._manager.port_manager.console_host, self.console))
|
||||
except OSError as e:
|
||||
raise DockerError("Could not start Telnet server on socket {}:{}: {}".format(self._manager.port_manager.console_host, self.console, e))
|
||||
raise DockerError(f"Could not start Telnet server on socket {self._manager.port_manager.console_host}:{self.console}: {e}")
|
||||
|
||||
self._console_websocket = await self.manager.websocket_query("containers/{}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1".format(self._cid))
|
||||
self._console_websocket = await self.manager.websocket_query(f"containers/{self._cid}/attach/ws?stream=1&stdin=1&stdout=1&stderr=1")
|
||||
input_stream.ws = self._console_websocket
|
||||
|
||||
output_stream.feed_data(self.name.encode() + b" console is now available... Press RETURN to get started.\r\n")
|
||||
@ -750,7 +756,7 @@ class DockerVM(BaseNode):
|
||||
elif msg.type == aiohttp.WSMsgType.BINARY:
|
||||
out.feed_data(msg.data)
|
||||
elif msg.type == aiohttp.WSMsgType.ERROR:
|
||||
log.critical("Docker WebSocket Error: {}".format(ws.exception()))
|
||||
log.critical(f"Docker WebSocket Error: {ws.exception()}")
|
||||
else:
|
||||
out.feed_eof()
|
||||
await ws.close()
|
||||
@ -785,7 +791,7 @@ class DockerVM(BaseNode):
|
||||
Restart this Docker container.
|
||||
"""
|
||||
|
||||
await self.manager.query("POST", "containers/{}/restart".format(self._cid))
|
||||
await self.manager.query("POST", f"containers/{self._cid}/restart")
|
||||
log.info("Docker container '{name}' [{image}] restarted".format(
|
||||
name=self._name, image=self._image))
|
||||
|
||||
@ -825,14 +831,14 @@ class DockerVM(BaseNode):
|
||||
if state != "stopped" or state != "exited":
|
||||
# t=5 number of seconds to wait before killing the container
|
||||
try:
|
||||
await self.manager.query("POST", "containers/{}/stop".format(self._cid), params={"t": 5})
|
||||
log.info("Docker container '{name}' [{image}] stopped".format(name=self._name, image=self._image))
|
||||
await self.manager.query("POST", f"containers/{self._cid}/stop", params={"t": 5})
|
||||
log.info(f"Docker container '{self._name}' [{self._image}] stopped")
|
||||
except DockerHttp304Error:
|
||||
# Container is already stopped
|
||||
pass
|
||||
# Ignore runtime error because when closing the server
|
||||
except RuntimeError as e:
|
||||
log.debug("Docker runtime error when closing: {}".format(str(e)))
|
||||
log.debug(f"Docker runtime error when closing: {str(e)}")
|
||||
return
|
||||
self.status = "stopped"
|
||||
|
||||
@ -841,18 +847,18 @@ class DockerVM(BaseNode):
|
||||
Pauses this Docker container.
|
||||
"""
|
||||
|
||||
await self.manager.query("POST", "containers/{}/pause".format(self._cid))
|
||||
await self.manager.query("POST", f"containers/{self._cid}/pause")
|
||||
self.status = "suspended"
|
||||
log.info("Docker container '{name}' [{image}] paused".format(name=self._name, image=self._image))
|
||||
log.info(f"Docker container '{self._name}' [{self._image}] paused")
|
||||
|
||||
async def unpause(self):
|
||||
"""
|
||||
Unpauses this Docker container.
|
||||
"""
|
||||
|
||||
await self.manager.query("POST", "containers/{}/unpause".format(self._cid))
|
||||
await self.manager.query("POST", f"containers/{self._cid}/unpause")
|
||||
self.status = "started"
|
||||
log.info("Docker container '{name}' [{image}] unpaused".format(name=self._name, image=self._image))
|
||||
log.info(f"Docker container '{self._name}' [{self._image}] unpaused")
|
||||
|
||||
async def close(self):
|
||||
"""
|
||||
@ -892,17 +898,17 @@ class DockerVM(BaseNode):
|
||||
pass
|
||||
|
||||
if self._display:
|
||||
display = "/tmp/.X11-unix/X{}".format(self._display)
|
||||
display = f"/tmp/.X11-unix/X{self._display}"
|
||||
try:
|
||||
if os.path.exists(display):
|
||||
os.remove(display)
|
||||
except OSError as e:
|
||||
log.warning("Could not remove display {}: {}".format(display, e))
|
||||
log.warning(f"Could not remove display {display}: {e}")
|
||||
|
||||
# v – 1/True/true or 0/False/false, Remove the volumes associated to the container. Default false.
|
||||
# force - 1/True/true or 0/False/false, Kill then remove the container. Default false.
|
||||
try:
|
||||
await self.manager.query("DELETE", "containers/{}".format(self._cid), params={"force": 1, "v": 1})
|
||||
await self.manager.query("DELETE", f"containers/{self._cid}", params={"force": 1, "v": 1})
|
||||
except DockerError:
|
||||
pass
|
||||
log.info("Docker container '{name}' [{image}] removed".format(
|
||||
@ -916,7 +922,7 @@ class DockerVM(BaseNode):
|
||||
self.manager.port_manager.release_udp_port(nio.lport, self._project)
|
||||
# Ignore runtime error because when closing the server
|
||||
except (DockerHttp404Error, RuntimeError) as e:
|
||||
log.debug("Docker error when closing: {}".format(str(e)))
|
||||
log.debug(f"Docker error when closing: {str(e)}")
|
||||
return
|
||||
|
||||
async def _add_ubridge_connection(self, nio, adapter_number):
|
||||
@ -934,14 +940,14 @@ class DockerVM(BaseNode):
|
||||
adapter_number=adapter_number))
|
||||
|
||||
for index in range(4096):
|
||||
if "tap-gns3-e{}".format(index) not in psutil.net_if_addrs():
|
||||
adapter.host_ifc = "tap-gns3-e{}".format(str(index))
|
||||
if f"tap-gns3-e{index}" not in psutil.net_if_addrs():
|
||||
adapter.host_ifc = f"tap-gns3-e{str(index)}"
|
||||
break
|
||||
if adapter.host_ifc is None:
|
||||
raise DockerError("Adapter {adapter_number} couldn't allocate interface on Docker container '{name}'. Too many Docker interfaces already exists".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
bridge_name = 'bridge{}'.format(adapter_number)
|
||||
await self._ubridge_send('bridge create {}'.format(bridge_name))
|
||||
bridge_name = f'bridge{adapter_number}'
|
||||
await self._ubridge_send(f'bridge create {bridge_name}')
|
||||
self._bridges.add(bridge_name)
|
||||
await self._ubridge_send('bridge add_nio_tap bridge{adapter_number} {hostif}'.format(adapter_number=adapter_number,
|
||||
hostif=adapter.host_ifc))
|
||||
@ -958,12 +964,12 @@ class DockerVM(BaseNode):
|
||||
|
||||
async def _get_namespace(self):
|
||||
|
||||
result = await self.manager.query("GET", "containers/{}/json".format(self._cid))
|
||||
result = await self.manager.query("GET", f"containers/{self._cid}/json")
|
||||
return int(result['State']['Pid'])
|
||||
|
||||
async def _connect_nio(self, adapter_number, nio):
|
||||
|
||||
bridge_name = 'bridge{}'.format(adapter_number)
|
||||
bridge_name = f'bridge{adapter_number}'
|
||||
await self._ubridge_send('bridge add_nio_udp {bridge_name} {lport} {rhost} {rport}'.format(bridge_name=bridge_name,
|
||||
lport=nio.lport,
|
||||
rhost=nio.rhost,
|
||||
@ -972,7 +978,7 @@ class DockerVM(BaseNode):
|
||||
if nio.capturing:
|
||||
await self._ubridge_send('bridge start_capture {bridge_name} "{pcap_file}"'.format(bridge_name=bridge_name,
|
||||
pcap_file=nio.pcap_output_file))
|
||||
await self._ubridge_send('bridge start {bridge_name}'.format(bridge_name=bridge_name))
|
||||
await self._ubridge_send(f'bridge start {bridge_name}')
|
||||
await self._ubridge_apply_filters(bridge_name, nio.filters)
|
||||
|
||||
async def adapter_add_nio_binding(self, adapter_number, nio):
|
||||
@ -1007,7 +1013,7 @@ class DockerVM(BaseNode):
|
||||
"""
|
||||
|
||||
if self.ubridge:
|
||||
bridge_name = 'bridge{}'.format(adapter_number)
|
||||
bridge_name = f'bridge{adapter_number}'
|
||||
if bridge_name in self._bridges:
|
||||
await self._ubridge_apply_filters(bridge_name, nio.filters)
|
||||
|
||||
@ -1029,8 +1035,8 @@ class DockerVM(BaseNode):
|
||||
await self.stop_capture(adapter_number)
|
||||
if self.ubridge:
|
||||
nio = adapter.get_nio(0)
|
||||
bridge_name = 'bridge{}'.format(adapter_number)
|
||||
await self._ubridge_send("bridge stop {}".format(bridge_name))
|
||||
bridge_name = f'bridge{adapter_number}'
|
||||
await self._ubridge_send(f"bridge stop {bridge_name}")
|
||||
await self._ubridge_send('bridge remove_nio_udp bridge{adapter} {lport} {rhost} {rport}'.format(adapter=adapter_number,
|
||||
lport=nio.lport,
|
||||
rhost=nio.rhost,
|
||||
@ -1061,7 +1067,7 @@ class DockerVM(BaseNode):
|
||||
nio = adapter.get_nio(0)
|
||||
|
||||
if not nio:
|
||||
raise DockerError("Adapter {} is not connected".format(adapter_number))
|
||||
raise DockerError(f"Adapter {adapter_number} is not connected")
|
||||
|
||||
return nio
|
||||
|
||||
@ -1112,10 +1118,10 @@ class DockerVM(BaseNode):
|
||||
:param output_file: PCAP destination file for the capture
|
||||
"""
|
||||
|
||||
adapter = "bridge{}".format(adapter_number)
|
||||
adapter = f"bridge{adapter_number}"
|
||||
if not self.ubridge:
|
||||
raise DockerError("Cannot start the packet capture: uBridge is not running")
|
||||
await self._ubridge_send('bridge start_capture {name} "{output_file}"'.format(name=adapter, output_file=output_file))
|
||||
await self._ubridge_send(f'bridge start_capture {adapter} "{output_file}"')
|
||||
|
||||
async def _stop_ubridge_capture(self, adapter_number):
|
||||
"""
|
||||
@ -1124,10 +1130,10 @@ class DockerVM(BaseNode):
|
||||
:param adapter_number: adapter number
|
||||
"""
|
||||
|
||||
adapter = "bridge{}".format(adapter_number)
|
||||
adapter = f"bridge{adapter_number}"
|
||||
if not self.ubridge:
|
||||
raise DockerError("Cannot stop the packet capture: uBridge is not running")
|
||||
await self._ubridge_send("bridge stop_capture {name}".format(name=adapter))
|
||||
await self._ubridge_send(f"bridge stop_capture {adapter}")
|
||||
|
||||
async def start_capture(self, adapter_number, output_file):
|
||||
"""
|
||||
@ -1139,7 +1145,7 @@ class DockerVM(BaseNode):
|
||||
|
||||
nio = self.get_nio(adapter_number)
|
||||
if nio.capturing:
|
||||
raise DockerError("Packet capture is already activated on adapter {adapter_number}".format(adapter_number=adapter_number))
|
||||
raise DockerError(f"Packet capture is already activated on adapter {adapter_number}")
|
||||
|
||||
nio.start_packet_capture(output_file)
|
||||
if self.status == "started" and self.ubridge:
|
||||
@ -1174,7 +1180,7 @@ class DockerVM(BaseNode):
|
||||
:returns: string
|
||||
"""
|
||||
|
||||
result = await self.manager.query("GET", "containers/{}/logs".format(self._cid), params={"stderr": 1, "stdout": 1})
|
||||
result = await self.manager.query("GET", f"containers/{self._cid}/logs", params={"stderr": 1, "stdout": 1})
|
||||
return result
|
||||
|
||||
async def delete(self):
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -150,7 +149,7 @@ class Dynamips(BaseManager):
|
||||
"""
|
||||
self._dynamips_ids.setdefault(project_id, set())
|
||||
if dynamips_id in self._dynamips_ids[project_id]:
|
||||
raise DynamipsError("Dynamips identifier {} is already used by another router".format(dynamips_id))
|
||||
raise DynamipsError(f"Dynamips identifier {dynamips_id} is already used by another router")
|
||||
self._dynamips_ids[project_id].add(dynamips_id)
|
||||
|
||||
def release_dynamips_id(self, project_id, dynamips_id):
|
||||
@ -178,7 +177,7 @@ class Dynamips(BaseManager):
|
||||
try:
|
||||
future.result()
|
||||
except (Exception, GeneratorExit) as e:
|
||||
log.error("Could not stop device hypervisor {}".format(e), exc_info=1)
|
||||
log.error(f"Could not stop device hypervisor {e}", exc_info=1)
|
||||
continue
|
||||
|
||||
async def project_closing(self, project):
|
||||
@ -201,7 +200,7 @@ class Dynamips(BaseManager):
|
||||
try:
|
||||
future.result()
|
||||
except (Exception, GeneratorExit) as e:
|
||||
log.error("Could not delete device {}".format(e), exc_info=1)
|
||||
log.error(f"Could not delete device {e}", exc_info=1)
|
||||
|
||||
async def project_closed(self, project):
|
||||
"""
|
||||
@ -222,12 +221,12 @@ class Dynamips(BaseManager):
|
||||
files += glob.glob(os.path.join(glob.escape(project_dir), "*", "c[0-9][0-9][0-9][0-9]_i[0-9]*_log.txt"))
|
||||
for file in files:
|
||||
try:
|
||||
log.debug("Deleting file {}".format(file))
|
||||
log.debug(f"Deleting file {file}")
|
||||
if file in self._ghost_files:
|
||||
self._ghost_files.remove(file)
|
||||
await wait_run_in_executor(os.remove, file)
|
||||
except OSError as e:
|
||||
log.warning("Could not delete file {}: {}".format(file, e))
|
||||
log.warning(f"Could not delete file {file}: {e}")
|
||||
continue
|
||||
|
||||
# Release the dynamips ids if we want to reload the same project
|
||||
@ -255,9 +254,9 @@ class Dynamips(BaseManager):
|
||||
if not dynamips_path:
|
||||
raise DynamipsError("Could not find Dynamips")
|
||||
if not os.path.isfile(dynamips_path):
|
||||
raise DynamipsError("Dynamips {} is not accessible".format(dynamips_path))
|
||||
raise DynamipsError(f"Dynamips {dynamips_path} is not accessible")
|
||||
if not os.access(dynamips_path, os.X_OK):
|
||||
raise DynamipsError("Dynamips {} is not executable".format(dynamips_path))
|
||||
raise DynamipsError(f"Dynamips {dynamips_path} is not executable")
|
||||
|
||||
self._dynamips_path = dynamips_path
|
||||
return dynamips_path
|
||||
@ -284,7 +283,7 @@ class Dynamips(BaseManager):
|
||||
try:
|
||||
info = socket.getaddrinfo(server_host, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
|
||||
if not info:
|
||||
raise DynamipsError("getaddrinfo returns an empty list on {}".format(server_host))
|
||||
raise DynamipsError(f"getaddrinfo returns an empty list on {server_host}")
|
||||
for res in info:
|
||||
af, socktype, proto, _, sa = res
|
||||
# let the OS find an unused port for the Dynamips hypervisor
|
||||
@ -293,17 +292,17 @@ class Dynamips(BaseManager):
|
||||
port = sock.getsockname()[1]
|
||||
break
|
||||
except OSError as e:
|
||||
raise DynamipsError("Could not find free port for the Dynamips hypervisor: {}".format(e))
|
||||
raise DynamipsError(f"Could not find free port for the Dynamips hypervisor: {e}")
|
||||
|
||||
port_manager = PortManager.instance()
|
||||
hypervisor = Hypervisor(self._dynamips_path, working_dir, server_host, port, port_manager.console_host)
|
||||
|
||||
log.info("Creating new hypervisor {}:{} with working directory {}".format(hypervisor.host, hypervisor.port, working_dir))
|
||||
log.info(f"Creating new hypervisor {hypervisor.host}:{hypervisor.port} with working directory {working_dir}")
|
||||
await hypervisor.start()
|
||||
log.info("Hypervisor {}:{} has successfully started".format(hypervisor.host, hypervisor.port))
|
||||
log.info(f"Hypervisor {hypervisor.host}:{hypervisor.port} has successfully started")
|
||||
await hypervisor.connect()
|
||||
if parse_version(hypervisor.version) < parse_version('0.2.11'):
|
||||
raise DynamipsError("Dynamips version must be >= 0.2.11, detected version is {}".format(hypervisor.version))
|
||||
raise DynamipsError(f"Dynamips version must be >= 0.2.11, detected version is {hypervisor.version}")
|
||||
|
||||
return hypervisor
|
||||
|
||||
@ -315,7 +314,7 @@ class Dynamips(BaseManager):
|
||||
try:
|
||||
await self._set_ghost_ios(vm)
|
||||
except GeneratorExit:
|
||||
log.warning("Could not create ghost IOS image {} (GeneratorExit)".format(vm.name))
|
||||
log.warning(f"Could not create ghost IOS image {vm.name} (GeneratorExit)")
|
||||
|
||||
async def create_nio(self, node, nio_settings):
|
||||
"""
|
||||
@ -335,13 +334,13 @@ class Dynamips(BaseManager):
|
||||
try:
|
||||
info = socket.getaddrinfo(rhost, rport, socket.AF_UNSPEC, socket.SOCK_DGRAM, 0, socket.AI_PASSIVE)
|
||||
if not info:
|
||||
raise DynamipsError("getaddrinfo returns an empty list on {}:{}".format(rhost, rport))
|
||||
raise DynamipsError(f"getaddrinfo returns an empty list on {rhost}:{rport}")
|
||||
for res in info:
|
||||
af, socktype, proto, _, sa = res
|
||||
with socket.socket(af, socktype, proto) as sock:
|
||||
sock.connect(sa)
|
||||
except OSError as e:
|
||||
raise DynamipsError("Could not create an UDP connection to {}:{}: {}".format(rhost, rport, e))
|
||||
raise DynamipsError(f"Could not create an UDP connection to {rhost}:{rport}: {e}")
|
||||
nio = NIOUDP(node, lport, rhost, rport)
|
||||
nio.filters = nio_settings.get("filters", {})
|
||||
nio.suspend = nio_settings.get("suspend", False)
|
||||
@ -355,11 +354,11 @@ class Dynamips(BaseManager):
|
||||
if interface["name"] == ethernet_device:
|
||||
npf_interface = interface["id"]
|
||||
if not npf_interface:
|
||||
raise DynamipsError("Could not find interface {} on this host".format(ethernet_device))
|
||||
raise DynamipsError(f"Could not find interface {ethernet_device} on this host")
|
||||
else:
|
||||
ethernet_device = npf_interface
|
||||
if not is_interface_up(ethernet_device):
|
||||
raise DynamipsError("Ethernet interface {} is down".format(ethernet_device))
|
||||
raise DynamipsError(f"Ethernet interface {ethernet_device} is down")
|
||||
nio = NIOGenericEthernet(node.hypervisor, ethernet_device)
|
||||
elif nio_settings["type"] == "nio_linux_ethernet":
|
||||
if sys.platform.startswith("win"):
|
||||
@ -371,7 +370,7 @@ class Dynamips(BaseManager):
|
||||
nio = NIOTAP(node.hypervisor, tap_device)
|
||||
if not is_interface_up(tap_device):
|
||||
# test after the TAP interface has been created (if it doesn't exist yet)
|
||||
raise DynamipsError("TAP interface {} is down".format(tap_device))
|
||||
raise DynamipsError(f"TAP interface {tap_device} is down")
|
||||
elif nio_settings["type"] == "nio_unix":
|
||||
local_file = nio_settings["local_file"]
|
||||
remote_file = nio_settings["remote_file"]
|
||||
@ -425,7 +424,7 @@ class Dynamips(BaseManager):
|
||||
finally:
|
||||
await ghost.clean_delete()
|
||||
except DynamipsError as e:
|
||||
log.warning("Could not create ghost instance: {}".format(e))
|
||||
log.warning(f"Could not create ghost instance: {e}")
|
||||
|
||||
if vm.ghost_file != ghost_file and os.path.isfile(ghost_file_path):
|
||||
# set the ghost file to the router
|
||||
@ -442,8 +441,8 @@ class Dynamips(BaseManager):
|
||||
|
||||
for name, value in settings.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
if hasattr(vm, "set_{}".format(name)):
|
||||
setter = getattr(vm, "set_{}".format(name))
|
||||
if hasattr(vm, f"set_{name}"):
|
||||
setter = getattr(vm, f"set_{name}")
|
||||
await setter(value)
|
||||
elif name.startswith("slot") and value in ADAPTER_MATRIX:
|
||||
slot_id = int(name[-1])
|
||||
@ -455,14 +454,14 @@ class Dynamips(BaseManager):
|
||||
if not isinstance(vm.slots[slot_id], type(adapter)):
|
||||
await vm.slot_add_binding(slot_id, adapter)
|
||||
except IndexError:
|
||||
raise DynamipsError("Slot {} doesn't exist on this router".format(slot_id))
|
||||
raise DynamipsError(f"Slot {slot_id} doesn't exist on this router")
|
||||
elif name.startswith("slot") and (value is None or value == ""):
|
||||
slot_id = int(name[-1])
|
||||
try:
|
||||
if vm.slots[slot_id]:
|
||||
await vm.slot_remove_binding(slot_id)
|
||||
except IndexError:
|
||||
raise DynamipsError("Slot {} doesn't exist on this router".format(slot_id))
|
||||
raise DynamipsError(f"Slot {slot_id} doesn't exist on this router")
|
||||
elif name.startswith("wic") and value in WIC_MATRIX:
|
||||
wic_slot_id = int(name[-1])
|
||||
wic_name = value
|
||||
@ -473,14 +472,14 @@ class Dynamips(BaseManager):
|
||||
if not isinstance(vm.slots[0].wics[wic_slot_id], type(wic)):
|
||||
await vm.install_wic(wic_slot_id, wic)
|
||||
except IndexError:
|
||||
raise DynamipsError("WIC slot {} doesn't exist on this router".format(wic_slot_id))
|
||||
raise DynamipsError(f"WIC slot {wic_slot_id} doesn't exist on this router")
|
||||
elif name.startswith("wic") and (value is None or value == ""):
|
||||
wic_slot_id = int(name[-1])
|
||||
try:
|
||||
if vm.slots[0].wics and vm.slots[0].wics[wic_slot_id]:
|
||||
await vm.uninstall_wic(wic_slot_id)
|
||||
except IndexError:
|
||||
raise DynamipsError("WIC slot {} doesn't exist on this router".format(wic_slot_id))
|
||||
raise DynamipsError(f"WIC slot {wic_slot_id} doesn't exist on this router")
|
||||
|
||||
mmap_support = self.config.settings.Dynamips.mmap_support
|
||||
if mmap_support is False:
|
||||
@ -523,12 +522,12 @@ class Dynamips(BaseManager):
|
||||
:returns: relative path to the created config file
|
||||
"""
|
||||
|
||||
log.info("Creating config file {}".format(path))
|
||||
log.info(f"Creating config file {path}")
|
||||
config_dir = os.path.dirname(path)
|
||||
try:
|
||||
os.makedirs(config_dir, exist_ok=True)
|
||||
except OSError as e:
|
||||
raise DynamipsError("Could not create Dynamips configs directory: {}".format(e))
|
||||
raise DynamipsError(f"Could not create Dynamips configs directory: {e}")
|
||||
|
||||
if content is None or len(content) == 0:
|
||||
content = "!\n"
|
||||
@ -542,7 +541,7 @@ class Dynamips(BaseManager):
|
||||
content = content.replace('%h', vm.name)
|
||||
f.write(content.encode("utf-8"))
|
||||
except OSError as e:
|
||||
raise DynamipsError("Could not create config file '{}': {}".format(path, e))
|
||||
raise DynamipsError(f"Could not create config file '{path}': {e}")
|
||||
|
||||
return os.path.join("configs", os.path.basename(path))
|
||||
|
||||
@ -574,10 +573,10 @@ class Dynamips(BaseManager):
|
||||
if not match:
|
||||
continue
|
||||
await vm.set_idlepc(idlepc.split()[0])
|
||||
log.debug("Auto Idle-PC: trying idle-PC value {}".format(vm.idlepc))
|
||||
log.debug(f"Auto Idle-PC: trying idle-PC value {vm.idlepc}")
|
||||
start_time = time.time()
|
||||
initial_cpu_usage = await vm.get_cpu_usage()
|
||||
log.debug("Auto Idle-PC: initial CPU usage is {}%".format(initial_cpu_usage))
|
||||
log.debug(f"Auto Idle-PC: initial CPU usage is {initial_cpu_usage}%")
|
||||
await asyncio.sleep(3) # wait 3 seconds to probe the cpu again
|
||||
elapsed_time = time.time() - start_time
|
||||
cpu_usage = await vm.get_cpu_usage()
|
||||
@ -585,10 +584,10 @@ class Dynamips(BaseManager):
|
||||
cpu_usage = abs(cpu_elapsed_usage * 100.0 / elapsed_time)
|
||||
if cpu_usage > 100:
|
||||
cpu_usage = 100
|
||||
log.debug("Auto Idle-PC: CPU usage is {}% after {:.2} seconds".format(cpu_usage, elapsed_time))
|
||||
log.debug(f"Auto Idle-PC: CPU usage is {cpu_usage}% after {elapsed_time:.2} seconds")
|
||||
if cpu_usage < 70:
|
||||
validated_idlepc = vm.idlepc
|
||||
log.debug("Auto Idle-PC: idle-PC value {} has been validated".format(validated_idlepc))
|
||||
log.debug(f"Auto Idle-PC: idle-PC value {validated_idlepc} has been validated")
|
||||
break
|
||||
|
||||
if validated_idlepc is None:
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -16,7 +15,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class Adapter(object):
|
||||
class Adapter:
|
||||
|
||||
"""
|
||||
Base class for adapters.
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -16,7 +15,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class WIC_1ENET(object):
|
||||
class WIC_1ENET:
|
||||
|
||||
"""
|
||||
WIC-1ENET Ethernet
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -16,7 +15,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class WIC_1T(object):
|
||||
class WIC_1T:
|
||||
|
||||
"""
|
||||
WIC-1T Serial
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -16,7 +15,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class WIC_2T(object):
|
||||
class WIC_2T:
|
||||
|
||||
"""
|
||||
WIC-2T Serial
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -57,11 +56,11 @@ class DynamipsFactory:
|
||||
|
||||
if node_type == "dynamips":
|
||||
if platform not in PLATFORMS:
|
||||
raise DynamipsError("Unknown router platform: {}".format(platform))
|
||||
raise DynamipsError(f"Unknown router platform: {platform}")
|
||||
|
||||
return PLATFORMS[platform](name, node_id, project, manager, dynamips_id, **kwargs)
|
||||
else:
|
||||
if node_type not in DEVICES:
|
||||
raise DynamipsError("Unknown device type: {}".format(node_type))
|
||||
raise DynamipsError(f"Unknown device type: {node_type}")
|
||||
|
||||
return DEVICES[node_type](name, node_id, project, manager, **kwargs)
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -87,9 +86,9 @@ class DynamipsHypervisor:
|
||||
break
|
||||
|
||||
if not connection_success:
|
||||
raise DynamipsError("Couldn't connect to hypervisor on {}:{} :{}".format(host, self._port, last_exception))
|
||||
raise DynamipsError(f"Couldn't connect to hypervisor on {host}:{self._port} :{last_exception}")
|
||||
else:
|
||||
log.info("Connected to Dynamips hypervisor on {}:{} after {:.4f} seconds".format(host, self._port, time.time() - begin))
|
||||
log.info(f"Connected to Dynamips hypervisor on {host}:{self._port} after {time.time() - begin:.4f} seconds")
|
||||
|
||||
try:
|
||||
version = await self.send("hypervisor version")
|
||||
@ -134,7 +133,7 @@ class DynamipsHypervisor:
|
||||
await self._writer.drain()
|
||||
self._writer.close()
|
||||
except OSError as e:
|
||||
log.debug("Stopping hypervisor {}:{} {}".format(self._host, self._port, e))
|
||||
log.debug(f"Stopping hypervisor {self._host}:{self._port} {e}")
|
||||
self._reader = self._writer = None
|
||||
|
||||
async def reset(self):
|
||||
@ -152,9 +151,9 @@ class DynamipsHypervisor:
|
||||
"""
|
||||
|
||||
# encase working_dir in quotes to protect spaces in the path
|
||||
await self.send('hypervisor working_dir "{}"'.format(working_dir))
|
||||
await self.send(f'hypervisor working_dir "{working_dir}"')
|
||||
self._working_dir = working_dir
|
||||
log.debug("Working directory set to {}".format(self._working_dir))
|
||||
log.debug(f"Working directory set to {self._working_dir}")
|
||||
|
||||
@property
|
||||
def working_dir(self):
|
||||
@ -244,7 +243,7 @@ class DynamipsHypervisor:
|
||||
|
||||
try:
|
||||
command = command.strip() + '\n'
|
||||
log.debug("sending {}".format(command))
|
||||
log.debug(f"sending {command}")
|
||||
self._writer.write(command.encode())
|
||||
await self._writer.drain()
|
||||
except OSError as e:
|
||||
@ -269,7 +268,7 @@ class DynamipsHypervisor:
|
||||
# Sometimes WinError 64 (ERROR_NETNAME_DELETED) is returned here on Windows.
|
||||
# These happen if connection reset is received before IOCP could complete
|
||||
# a previous operation. Ignore and try again....
|
||||
log.warning("Connection reset received while reading Dynamips response: {}".format(e))
|
||||
log.warning(f"Connection reset received while reading Dynamips response: {e}")
|
||||
continue
|
||||
if not chunk:
|
||||
if retries > max_retries:
|
||||
@ -300,7 +299,7 @@ class DynamipsHypervisor:
|
||||
|
||||
# Does it contain an error code?
|
||||
if self.error_re.search(data[-1]):
|
||||
raise DynamipsError("Dynamips error when running command '{}': {}".format(command, data[-1][4:]))
|
||||
raise DynamipsError(f"Dynamips error when running command '{command}': {data[-1][4:]}")
|
||||
|
||||
# Or does the last line begin with '100-'? Then we are done!
|
||||
if data[-1][:4] == '100-':
|
||||
@ -314,5 +313,5 @@ class DynamipsHypervisor:
|
||||
if self.success_re.search(data[index]):
|
||||
data[index] = data[index][4:]
|
||||
|
||||
log.debug("returned result {}".format(data))
|
||||
log.debug(f"returned result {data}")
|
||||
return data
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -124,20 +123,20 @@ class Hypervisor(DynamipsHypervisor):
|
||||
if os.path.isdir(system_root):
|
||||
env["PATH"] = system_root + ';' + env["PATH"]
|
||||
try:
|
||||
log.info("Starting Dynamips: {}".format(self._command))
|
||||
self._stdout_file = os.path.join(self.working_dir, "dynamips_i{}_stdout.txt".format(self._id))
|
||||
log.info("Dynamips process logging to {}".format(self._stdout_file))
|
||||
log.info(f"Starting Dynamips: {self._command}")
|
||||
self._stdout_file = os.path.join(self.working_dir, f"dynamips_i{self._id}_stdout.txt")
|
||||
log.info(f"Dynamips process logging to {self._stdout_file}")
|
||||
with open(self._stdout_file, "w", encoding="utf-8") as fd:
|
||||
self._process = await asyncio.create_subprocess_exec(*self._command,
|
||||
stdout=fd,
|
||||
stderr=subprocess.STDOUT,
|
||||
cwd=self._working_dir,
|
||||
env=env)
|
||||
log.info("Dynamips process started PID={}".format(self._process.pid))
|
||||
log.info(f"Dynamips process started PID={self._process.pid}")
|
||||
self._started = True
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
log.error("Could not start Dynamips: {}".format(e))
|
||||
raise DynamipsError("Could not start Dynamips: {}".format(e))
|
||||
log.error(f"Could not start Dynamips: {e}")
|
||||
raise DynamipsError(f"Could not start Dynamips: {e}")
|
||||
|
||||
async def stop(self):
|
||||
"""
|
||||
@ -145,7 +144,7 @@ class Hypervisor(DynamipsHypervisor):
|
||||
"""
|
||||
|
||||
if self.is_running():
|
||||
log.info("Stopping Dynamips process PID={}".format(self._process.pid))
|
||||
log.info(f"Stopping Dynamips process PID={self._process.pid}")
|
||||
await DynamipsHypervisor.stop(self)
|
||||
# give some time for the hypervisor to properly stop.
|
||||
# time to delete UNIX NIOs for instance.
|
||||
@ -154,19 +153,19 @@ class Hypervisor(DynamipsHypervisor):
|
||||
await wait_for_process_termination(self._process, timeout=3)
|
||||
except asyncio.TimeoutError:
|
||||
if self._process.returncode is None:
|
||||
log.warning("Dynamips process {} is still running... killing it".format(self._process.pid))
|
||||
log.warning(f"Dynamips process {self._process.pid} is still running... killing it")
|
||||
try:
|
||||
self._process.kill()
|
||||
except OSError as e:
|
||||
log.error("Cannot stop the Dynamips process: {}".format(e))
|
||||
log.error(f"Cannot stop the Dynamips process: {e}")
|
||||
if self._process.returncode is None:
|
||||
log.warning('Dynamips hypervisor with PID={} is still running'.format(self._process.pid))
|
||||
log.warning(f'Dynamips hypervisor with PID={self._process.pid} is still running')
|
||||
|
||||
if self._stdout_file and os.access(self._stdout_file, os.W_OK):
|
||||
try:
|
||||
os.remove(self._stdout_file)
|
||||
except OSError as e:
|
||||
log.warning("could not delete temporary Dynamips log file: {}".format(e))
|
||||
log.warning(f"could not delete temporary Dynamips log file: {e}")
|
||||
self._started = False
|
||||
|
||||
def read_stdout(self):
|
||||
@ -181,7 +180,7 @@ class Hypervisor(DynamipsHypervisor):
|
||||
with open(self._stdout_file, "rb") as file:
|
||||
output = file.read().decode("utf-8", errors="replace")
|
||||
except OSError as e:
|
||||
log.warning("could not read {}: {}".format(self._stdout_file, e))
|
||||
log.warning(f"could not read {self._stdout_file}: {e}")
|
||||
return output
|
||||
|
||||
def is_running(self):
|
||||
@ -203,12 +202,12 @@ class Hypervisor(DynamipsHypervisor):
|
||||
|
||||
command = [self._path]
|
||||
command.extend(["-N1"]) # use instance IDs for filenames
|
||||
command.extend(["-l", "dynamips_i{}_log.txt".format(self._id)]) # log file
|
||||
command.extend(["-l", f"dynamips_i{self._id}_log.txt"]) # log file
|
||||
# Dynamips cannot listen for hypervisor commands and for console connections on
|
||||
# 2 different IP addresses.
|
||||
# See https://github.com/GNS3/dynamips/issues/62
|
||||
if self._console_host != "0.0.0.0" and self._console_host != "::":
|
||||
command.extend(["-H", "{}:{}".format(self._host, self._port)])
|
||||
command.extend(["-H", f"{self._host}:{self._port}"])
|
||||
else:
|
||||
command.extend(["-H", str(self._port)])
|
||||
return command
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -69,8 +68,8 @@ class NIO:
|
||||
if self._input_filter or self._output_filter:
|
||||
await self.unbind_filter("both")
|
||||
self._capturing = False
|
||||
await self._hypervisor.send("nio delete {}".format(self._name))
|
||||
log.info("NIO {name} has been deleted".format(name=self._name))
|
||||
await self._hypervisor.send(f"nio delete {self._name}")
|
||||
log.info(f"NIO {self._name} has been deleted")
|
||||
|
||||
async def rename(self, new_name):
|
||||
"""
|
||||
@ -79,9 +78,9 @@ class NIO:
|
||||
:param new_name: new NIO name
|
||||
"""
|
||||
|
||||
await self._hypervisor.send("nio rename {name} {new_name}".format(name=self._name, new_name=new_name))
|
||||
await self._hypervisor.send(f"nio rename {self._name} {new_name}")
|
||||
|
||||
log.info("NIO {name} renamed to {new_name}".format(name=self._name, new_name=new_name))
|
||||
log.info(f"NIO {self._name} renamed to {new_name}")
|
||||
self._name = new_name
|
||||
|
||||
async def debug(self, debug):
|
||||
@ -91,7 +90,7 @@ class NIO:
|
||||
:param debug: debug value (0 = disable, enable = 1)
|
||||
"""
|
||||
|
||||
await self._hypervisor.send("nio set_debug {name} {debug}".format(name=self._name, debug=debug))
|
||||
await self._hypervisor.send(f"nio set_debug {self._name} {debug}")
|
||||
|
||||
async def start_packet_capture(self, pcap_output_file, pcap_data_link_type="DLT_EN10MB"):
|
||||
"""
|
||||
@ -102,7 +101,7 @@ class NIO:
|
||||
"""
|
||||
|
||||
await self.bind_filter("both", "capture")
|
||||
await self.setup_filter("both", '{} "{}"'.format(pcap_data_link_type, pcap_output_file))
|
||||
await self.setup_filter("both", f'{pcap_data_link_type} "{pcap_output_file}"')
|
||||
self._capturing = True
|
||||
self._pcap_output_file = pcap_output_file
|
||||
self._pcap_data_link_type = pcap_data_link_type
|
||||
@ -128,7 +127,7 @@ class NIO:
|
||||
"""
|
||||
|
||||
if direction not in self._dynamips_direction:
|
||||
raise DynamipsError("Unknown direction {} to bind filter {}:".format(direction, filter_name))
|
||||
raise DynamipsError(f"Unknown direction {direction} to bind filter {filter_name}:")
|
||||
dynamips_direction = self._dynamips_direction[direction]
|
||||
|
||||
await self._hypervisor.send("nio bind_filter {name} {direction} {filter}".format(name=self._name,
|
||||
@ -151,7 +150,7 @@ class NIO:
|
||||
"""
|
||||
|
||||
if direction not in self._dynamips_direction:
|
||||
raise DynamipsError("Unknown direction {} to unbind filter:".format(direction))
|
||||
raise DynamipsError(f"Unknown direction {direction} to unbind filter:")
|
||||
dynamips_direction = self._dynamips_direction[direction]
|
||||
|
||||
await self._hypervisor.send("nio unbind_filter {name} {direction}".format(name=self._name,
|
||||
@ -185,7 +184,7 @@ class NIO:
|
||||
"""
|
||||
|
||||
if direction not in self._dynamips_direction:
|
||||
raise DynamipsError("Unknown direction {} to setup filter:".format(direction))
|
||||
raise DynamipsError(f"Unknown direction {direction} to setup filter:")
|
||||
dynamips_direction = self._dynamips_direction[direction]
|
||||
|
||||
await self._hypervisor.send("nio setup_filter {name} {direction} {options}".format(name=self._name,
|
||||
@ -227,7 +226,7 @@ class NIO:
|
||||
:returns: NIO statistics (string with packets in, packets out, bytes in, bytes out)
|
||||
"""
|
||||
|
||||
stats = await self._hypervisor.send("nio get_stats {}".format(self._name))
|
||||
stats = await self._hypervisor.send(f"nio get_stats {self._name}")
|
||||
return stats[0]
|
||||
|
||||
async def reset_stats(self):
|
||||
@ -235,7 +234,7 @@ class NIO:
|
||||
Resets statistics for this NIO.
|
||||
"""
|
||||
|
||||
await self._hypervisor.send("nio reset_stats {}".format(self._name))
|
||||
await self._hypervisor.send(f"nio reset_stats {self._name}")
|
||||
|
||||
@property
|
||||
def bandwidth(self):
|
||||
@ -254,7 +253,7 @@ class NIO:
|
||||
:param bandwidth: bandwidth integer value (in Kb/s)
|
||||
"""
|
||||
|
||||
await self._hypervisor.send("nio set_bandwidth {name} {bandwidth}".format(name=self._name, bandwidth=bandwidth))
|
||||
await self._hypervisor.send(f"nio set_bandwidth {self._name} {bandwidth}")
|
||||
self._bandwidth = bandwidth
|
||||
|
||||
@property
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -39,7 +38,7 @@ class NIOGenericEthernet(NIO):
|
||||
def __init__(self, hypervisor, ethernet_device):
|
||||
|
||||
# create an unique name
|
||||
name = 'generic_ethernet-{}'.format(uuid.uuid4())
|
||||
name = f'generic_ethernet-{uuid.uuid4()}'
|
||||
self._ethernet_device = ethernet_device
|
||||
super().__init__(name, hypervisor)
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -38,7 +37,7 @@ class NIOLinuxEthernet(NIO):
|
||||
|
||||
def __init__(self, hypervisor, ethernet_device):
|
||||
# create an unique name
|
||||
name = 'linux_ethernet-{}'.format(uuid.uuid4())
|
||||
name = f'linux_ethernet-{uuid.uuid4()}'
|
||||
self._ethernet_device = ethernet_device
|
||||
super().__init__(name, hypervisor)
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -38,13 +37,13 @@ class NIONull(NIO):
|
||||
def __init__(self, hypervisor):
|
||||
|
||||
# create an unique name
|
||||
name = 'null-{}'.format(uuid.uuid4())
|
||||
name = f'null-{uuid.uuid4()}'
|
||||
super().__init__(name, hypervisor)
|
||||
|
||||
async def create(self):
|
||||
|
||||
await self._hypervisor.send("nio create_null {}".format(self._name))
|
||||
log.info("NIO NULL {name} created.".format(name=self._name))
|
||||
await self._hypervisor.send(f"nio create_null {self._name}")
|
||||
log.info(f"NIO NULL {self._name} created.")
|
||||
|
||||
def __json__(self):
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -39,14 +38,14 @@ class NIOTAP(NIO):
|
||||
def __init__(self, hypervisor, tap_device):
|
||||
|
||||
# create an unique name
|
||||
name = 'tap-{}'.format(uuid.uuid4())
|
||||
name = f'tap-{uuid.uuid4()}'
|
||||
self._tap_device = tap_device
|
||||
super().__init__(name, hypervisor)
|
||||
|
||||
async def create(self):
|
||||
|
||||
await self._hypervisor.send("nio create_tap {name} {tap}".format(name=self._name, tap=self._tap_device))
|
||||
log.info("NIO TAP {name} created with device {device}".format(name=self._name, device=self._tap_device))
|
||||
await self._hypervisor.send(f"nio create_tap {self._name} {self._tap_device}")
|
||||
log.info(f"NIO TAP {self._name} created with device {self._tap_device}")
|
||||
|
||||
@property
|
||||
def tap_device(self):
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -44,7 +43,7 @@ class NIOUDP(NIO):
|
||||
def __init__(self, node, lport, rhost, rport):
|
||||
|
||||
# create an unique name
|
||||
name = 'udp-{}'.format(uuid.uuid4())
|
||||
name = f'udp-{uuid.uuid4()}'
|
||||
self._lport = lport
|
||||
self._rhost = rhost
|
||||
self._rport = rport
|
||||
@ -65,7 +64,7 @@ class NIOUDP(NIO):
|
||||
return
|
||||
self._local_tunnel_lport = self._node.manager.port_manager.get_free_udp_port(self._node.project)
|
||||
self._local_tunnel_rport = self._node.manager.port_manager.get_free_udp_port(self._node.project)
|
||||
self._bridge_name = 'DYNAMIPS-{}-{}'.format(self._local_tunnel_lport, self._local_tunnel_rport)
|
||||
self._bridge_name = f'DYNAMIPS-{self._local_tunnel_lport}-{self._local_tunnel_rport}'
|
||||
await self._hypervisor.send("nio create_udp {name} {lport} {rhost} {rport}".format(name=self._name,
|
||||
lport=self._local_tunnel_lport,
|
||||
rhost='127.0.0.1',
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -40,7 +39,7 @@ class NIOUNIX(NIO):
|
||||
def __init__(self, hypervisor, local_file, remote_file):
|
||||
|
||||
# create an unique name
|
||||
name = 'unix-{}'.format(uuid.uuid4())
|
||||
name = f'unix-{uuid.uuid4()}'
|
||||
self._local_file = local_file
|
||||
self._remote_file = remote_file
|
||||
super().__init__(name, hypervisor)
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -40,7 +39,7 @@ class NIOVDE(NIO):
|
||||
def __init__(self, hypervisor, control_file, local_file):
|
||||
|
||||
# create an unique name
|
||||
name = 'vde-{}'.format(uuid.uuid4())
|
||||
name = f'vde-{uuid.uuid4()}'
|
||||
self._control_file = control_file
|
||||
self._local_file = local_file
|
||||
super().__init__(name, hypervisor)
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -71,8 +70,8 @@ class ATMSwitch(Device):
|
||||
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
|
||||
self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir)
|
||||
|
||||
await self._hypervisor.send('atmsw create "{}"'.format(self._name))
|
||||
log.info('ATM switch "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
|
||||
await self._hypervisor.send(f'atmsw create "{self._name}"')
|
||||
log.info(f'ATM switch "{self._name}" [{self._id}] has been created')
|
||||
self._hypervisor.devices.append(self)
|
||||
|
||||
async def set_name(self, new_name):
|
||||
@ -82,7 +81,7 @@ class ATMSwitch(Device):
|
||||
:param new_name: New name for this switch
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('atmsw rename "{name}" "{new_name}"'.format(name=self._name, new_name=new_name))
|
||||
await self._hypervisor.send(f'atmsw rename "{self._name}" "{new_name}"')
|
||||
log.info('ATM switch "{name}" [{id}]: renamed to "{new_name}"'.format(name=self._name,
|
||||
id=self._id,
|
||||
new_name=new_name))
|
||||
@ -125,10 +124,10 @@ class ATMSwitch(Device):
|
||||
|
||||
if self._hypervisor:
|
||||
try:
|
||||
await self._hypervisor.send('atmsw delete "{}"'.format(self._name))
|
||||
log.info('ATM switch "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id))
|
||||
await self._hypervisor.send(f'atmsw delete "{self._name}"')
|
||||
log.info(f'ATM switch "{self._name}" [{self._id}] has been deleted')
|
||||
except DynamipsError:
|
||||
log.debug("Could not properly delete ATM switch {}".format(self._name))
|
||||
log.debug(f"Could not properly delete ATM switch {self._name}")
|
||||
if self._hypervisor and self in self._hypervisor.devices:
|
||||
self._hypervisor.devices.remove(self)
|
||||
if self._hypervisor and not self._hypervisor.devices:
|
||||
@ -162,7 +161,7 @@ class ATMSwitch(Device):
|
||||
"""
|
||||
|
||||
if port_number in self._nios:
|
||||
raise DynamipsError("Port {} isn't free".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} isn't free")
|
||||
|
||||
log.info('ATM switch "{name}" [id={id}]: NIO {nio} bound to port {port}'.format(name=self._name,
|
||||
id=self._id,
|
||||
@ -180,7 +179,7 @@ class ATMSwitch(Device):
|
||||
"""
|
||||
|
||||
if port_number not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} is not allocated")
|
||||
|
||||
await self.stop_capture(port_number)
|
||||
# remove VCs mapped with the port
|
||||
@ -235,12 +234,12 @@ class ATMSwitch(Device):
|
||||
"""
|
||||
|
||||
if port_number not in self._nios:
|
||||
raise DynamipsError("Port {} is not allocated".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} is not allocated")
|
||||
|
||||
nio = self._nios[port_number]
|
||||
|
||||
if not nio:
|
||||
raise DynamipsError("Port {} is not connected".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} is not connected")
|
||||
|
||||
return nio
|
||||
|
||||
@ -451,7 +450,7 @@ class ATMSwitch(Device):
|
||||
data_link_type = data_link_type[4:]
|
||||
|
||||
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
|
||||
raise DynamipsError("Port {} has already a filter applied".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} has already a filter applied")
|
||||
|
||||
await nio.start_packet_capture(output_file, data_link_type)
|
||||
log.info('ATM switch "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -47,7 +46,7 @@ class Bridge(Device):
|
||||
module_workdir = self.project.module_working_directory(self.manager.module_name.lower())
|
||||
self._hypervisor = await self.manager.start_new_hypervisor(working_dir=module_workdir)
|
||||
|
||||
await self._hypervisor.send('nio_bridge create "{}"'.format(self._name))
|
||||
await self._hypervisor.send(f'nio_bridge create "{self._name}"')
|
||||
self._hypervisor.devices.append(self)
|
||||
|
||||
async def set_name(self, new_name):
|
||||
@ -80,7 +79,7 @@ class Bridge(Device):
|
||||
if self._hypervisor and self in self._hypervisor.devices:
|
||||
self._hypervisor.devices.remove(self)
|
||||
if self._hypervisor and not self._hypervisor.devices:
|
||||
await self._hypervisor.send('nio_bridge delete "{}"'.format(self._name))
|
||||
await self._hypervisor.send(f'nio_bridge delete "{self._name}"')
|
||||
|
||||
async def add_nio(self, nio):
|
||||
"""
|
||||
@ -89,7 +88,7 @@ class Bridge(Device):
|
||||
:param nio: NIO instance to add
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('nio_bridge add_nio "{name}" {nio}'.format(name=self._name, nio=nio))
|
||||
await self._hypervisor.send(f'nio_bridge add_nio "{self._name}" {nio}')
|
||||
self._nios.append(nio)
|
||||
|
||||
async def remove_nio(self, nio):
|
||||
@ -99,7 +98,7 @@ class Bridge(Device):
|
||||
:param nio: NIO instance to remove
|
||||
"""
|
||||
if self._hypervisor:
|
||||
await self._hypervisor.send('nio_bridge remove_nio "{name}" {nio}'.format(name=self._name, nio=nio))
|
||||
await self._hypervisor.send(f'nio_bridge remove_nio "{self._name}" {nio}')
|
||||
self._nios.remove(nio)
|
||||
|
||||
@property
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -112,7 +111,7 @@ class C1700(Router):
|
||||
1720, 1721, 1750, 1751 or 1760
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c1700 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis))
|
||||
await self._hypervisor.send(f'c1700 set_chassis "{self._name}" {chassis}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name,
|
||||
id=self._id,
|
||||
@ -138,7 +137,7 @@ class C1700(Router):
|
||||
:param iomem: I/O memory size
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c1700 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
|
||||
await self._hypervisor.send(f'c1700 set_iomem "{self._name}" {iomem}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
|
||||
id=self._id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -122,7 +121,7 @@ class C2600(Router):
|
||||
2620XM, 2621XM, 2650XM or 2651XM
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c2600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis))
|
||||
await self._hypervisor.send(f'c2600 set_chassis "{self._name}" {chassis}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name,
|
||||
id=self._id,
|
||||
@ -147,7 +146,7 @@ class C2600(Router):
|
||||
:param iomem: I/O memory size
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c2600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
|
||||
await self._hypervisor.send(f'c2600 set_iomem "{self._name}" {iomem}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
|
||||
id=self._id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -88,7 +87,7 @@ class C2691(Router):
|
||||
:param iomem: I/O memory size
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c2691 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
|
||||
await self._hypervisor.send(f'c2691 set_iomem "{self._name}" {iomem}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
|
||||
id=self._id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -106,7 +105,7 @@ class C3600(Router):
|
||||
:param: chassis string: 3620, 3640 or 3660
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c3600 set_chassis "{name}" {chassis}'.format(name=self._name, chassis=chassis))
|
||||
await self._hypervisor.send(f'c3600 set_chassis "{self._name}" {chassis}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: chassis set to {chassis}'.format(name=self._name,
|
||||
id=self._id,
|
||||
@ -132,7 +131,7 @@ class C3600(Router):
|
||||
:param iomem: I/O memory size
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c3600 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
|
||||
await self._hypervisor.send(f'c3600 set_iomem "{self._name}" {iomem}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
|
||||
id=self._id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -88,7 +87,7 @@ class C3725(Router):
|
||||
:param iomem: I/O memory size
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c3725 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
|
||||
await self._hypervisor.send(f'c3725 set_iomem "{self._name}" {iomem}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
|
||||
id=self._id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -88,7 +87,7 @@ class C3745(Router):
|
||||
:param iomem: I/O memory size
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c3745 set_iomem "{name}" {size}'.format(name=self._name, size=iomem))
|
||||
await self._hypervisor.send(f'c3745 set_iomem "{self._name}" {iomem}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: I/O memory updated from {old_iomem}% to {new_iomem}%'.format(name=self._name,
|
||||
id=self._id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -123,7 +122,7 @@ class C7200(Router):
|
||||
if (await self.is_running()):
|
||||
raise DynamipsError("Cannot change NPE on running router")
|
||||
|
||||
await self._hypervisor.send('c7200 set_npe "{name}" {npe}'.format(name=self._name, npe=npe))
|
||||
await self._hypervisor.send(f'c7200 set_npe "{self._name}" {npe}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: NPE updated from {old_npe} to {new_npe}'.format(name=self._name,
|
||||
id=self._id,
|
||||
@ -148,7 +147,7 @@ class C7200(Router):
|
||||
:returns: midplane model string (e.g. "vxr" or "std")
|
||||
"""
|
||||
|
||||
await self._hypervisor.send('c7200 set_midplane "{name}" {midplane}'.format(name=self._name, midplane=midplane))
|
||||
await self._hypervisor.send(f'c7200 set_midplane "{self._name}" {midplane}')
|
||||
|
||||
log.info('Router "{name}" [{id}]: midplane updated from {old_midplane} to {new_midplane}'.format(name=self._name,
|
||||
id=self._id,
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2015 GNS3 Technologies Inc.
|
||||
#
|
||||
|
@ -1,4 +1,3 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (C) 2013 GNS3 Technologies Inc.
|
||||
#
|
||||
@ -50,7 +49,7 @@ class EthernetHub(Bridge):
|
||||
self._ports = []
|
||||
for port_number in range(0, 8):
|
||||
self._ports.append({"port_number": port_number,
|
||||
"name": "Ethernet{}".format(port_number)})
|
||||
"name": f"Ethernet{port_number}"})
|
||||
else:
|
||||
self._ports = ports
|
||||
|
||||
@ -86,7 +85,7 @@ class EthernetHub(Bridge):
|
||||
|
||||
port_number = 0
|
||||
for port in ports:
|
||||
port["name"] = "Ethernet{}".format(port_number)
|
||||
port["name"] = f"Ethernet{port_number}"
|
||||
port["port_number"] = port_number
|
||||
port_number += 1
|
||||
|
||||
@ -95,7 +94,7 @@ class EthernetHub(Bridge):
|
||||
async def create(self):
|
||||
|
||||
await Bridge.create(self)
|
||||
log.info('Ethernet hub "{name}" [{id}] has been created'.format(name=self._name, id=self._id))
|
||||
log.info(f'Ethernet hub "{self._name}" [{self._id}] has been created')
|
||||
|
||||
@property
|
||||
def mappings(self):
|
||||
@ -121,9 +120,9 @@ class EthernetHub(Bridge):
|
||||
|
||||
try:
|
||||
await Bridge.delete(self)
|
||||
log.info('Ethernet hub "{name}" [{id}] has been deleted'.format(name=self._name, id=self._id))
|
||||
log.info(f'Ethernet hub "{self._name}" [{self._id}] has been deleted')
|
||||
except DynamipsError:
|
||||
log.debug("Could not properly delete Ethernet hub {}".format(self._name))
|
||||
log.debug(f"Could not properly delete Ethernet hub {self._name}")
|
||||
if self._hypervisor and not self._hypervisor.devices:
|
||||
await self.hypervisor.stop()
|
||||
self._hypervisor = None
|
||||
@ -138,10 +137,10 @@ class EthernetHub(Bridge):
|
||||
"""
|
||||
|
||||
if port_number not in [port["port_number"] for port in self._ports]:
|
||||
raise DynamipsError("Port {} doesn't exist".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} doesn't exist")
|
||||
|
||||
if port_number in self._mappings:
|
||||
raise DynamipsError("Port {} isn't free".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} isn't free")
|
||||
|
||||
await Bridge.add_nio(self, nio)
|
||||
|
||||
@ -161,7 +160,7 @@ class EthernetHub(Bridge):
|
||||
"""
|
||||
|
||||
if port_number not in self._mappings:
|
||||
raise DynamipsError("Port {} is not allocated".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} is not allocated")
|
||||
|
||||
await self.stop_capture(port_number)
|
||||
nio = self._mappings[port_number]
|
||||
@ -187,12 +186,12 @@ class EthernetHub(Bridge):
|
||||
"""
|
||||
|
||||
if port_number not in self._mappings:
|
||||
raise DynamipsError("Port {} is not allocated".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} is not allocated")
|
||||
|
||||
nio = self._mappings[port_number]
|
||||
|
||||
if not nio:
|
||||
raise DynamipsError("Port {} is not connected".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} is not connected")
|
||||
|
||||
return nio
|
||||
|
||||
@ -211,7 +210,7 @@ class EthernetHub(Bridge):
|
||||
data_link_type = data_link_type[4:]
|
||||
|
||||
if nio.input_filter[0] is not None and nio.output_filter[0] is not None:
|
||||
raise DynamipsError("Port {} has already a filter applied".format(port_number))
|
||||
raise DynamipsError(f"Port {port_number} has already a filter applied")
|
||||
|
||||
await nio.start_packet_capture(output_file, data_link_type)
|
||||
log.info('Ethernet hub "{name}" [{id}]: starting packet capture on port {port}'.format(name=self._name,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user