2016-03-10 09:32:07 +00:00
#!/usr/bin/env python
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
2016-10-18 09:11:45 +00:00
import re
2016-04-26 12:34:49 +00:00
import os
2016-06-14 10:04:23 +00:00
import json
2016-07-25 12:47:37 +00:00
import uuid
2017-07-20 15:29:42 +00:00
import copy
2016-07-25 12:47:37 +00:00
import shutil
2019-02-27 10:57:07 +00:00
import time
2016-03-10 09:32:07 +00:00
import asyncio
2016-03-11 15:51:35 +00:00
import aiohttp
2019-02-27 10:57:07 +00:00
import aiofiles
2016-07-25 12:47:37 +00:00
import tempfile
2019-02-26 08:55:07 +00:00
import zipfile
2016-05-11 17:35:36 +00:00
2016-03-10 09:32:07 +00:00
from uuid import UUID , uuid4
2016-05-11 17:35:36 +00:00
from . node import Node
2016-11-11 09:38:59 +00:00
from . compute import ComputeError
2016-07-26 08:32:43 +00:00
from . snapshot import Snapshot
2016-06-23 09:17:23 +00:00
from . drawing import Drawing
2016-06-14 14:57:13 +00:00
from . topology import project_to_topology , load_topology
2016-03-14 15:51:47 +00:00
from . udp_link import UDPLink
2016-04-26 12:34:49 +00:00
from . . config import Config
2016-05-11 16:42:55 +00:00
from . . utils . path import check_path_allowed , get_default_project_directory
2020-02-10 07:20:49 +00:00
from . . utils . application_id import get_next_application_id
2016-08-16 13:45:06 +00:00
from . . utils . asyncio . pool import Pool
2018-08-25 07:10:47 +00:00
from . . utils . asyncio import locking
2019-02-26 08:55:07 +00:00
from . . utils . asyncio import aiozipstream
2016-07-25 12:47:37 +00:00
from . export_project import export_project
from . import_project import import_project
2016-06-14 14:57:13 +00:00
2016-06-14 10:04:23 +00:00
import logging
log = logging . getLogger ( __name__ )
2016-03-10 09:32:07 +00:00
2016-07-25 12:47:37 +00:00
def open_required ( func ) :
"""
Use this decorator to raise an error if the project is not opened
"""
def wrapper ( self , * args , * * kwargs ) :
if self . _status == " closed " :
raise aiohttp . web . HTTPForbidden ( text = " The project is not opened " )
return func ( self , * args , * * kwargs )
return wrapper
2016-03-10 09:32:07 +00:00
class Project :
"""
2016-05-14 00:48:10 +00:00
A project inside a controller
2016-03-10 09:32:07 +00:00
: param project_id : force project identifier ( None by default auto generate an UUID )
: param path : path of the project . ( None use the standard directory )
2016-06-14 21:08:30 +00:00
: param status : Status of the project ( opened / closed )
2016-03-10 09:32:07 +00:00
"""
2016-09-20 10:46:39 +00:00
def __init__ ( self , name = None , project_id = None , path = None , controller = None , status = " opened " ,
filename = None , auto_start = False , auto_open = False , auto_close = True ,
2017-07-03 08:01:22 +00:00
scene_height = 1000 , scene_width = 2000 , zoom = 100 , show_layers = False , snap_to_grid = False , show_grid = False ,
2018-11-25 13:25:29 +00:00
grid_size = 75 , drawing_grid_size = 25 , show_interface_labels = False , variables = None , supplier = None ) :
2016-03-10 09:32:07 +00:00
2016-05-18 16:37:18 +00:00
self . _controller = controller
2016-07-11 13:36:52 +00:00
assert name is not None
2016-03-10 09:32:07 +00:00
self . _name = name
2016-08-16 13:45:06 +00:00
self . _auto_start = auto_start
self . _auto_close = auto_close
self . _auto_open = auto_open
2016-06-14 21:08:30 +00:00
self . _status = status
2016-09-20 10:46:39 +00:00
self . _scene_height = scene_height
self . _scene_width = scene_width
2017-06-30 12:31:25 +00:00
self . _zoom = zoom
self . _show_layers = show_layers
self . _snap_to_grid = snap_to_grid
self . _show_grid = show_grid
2018-04-13 08:54:57 +00:00
self . _grid_size = grid_size
2018-11-25 13:25:29 +00:00
self . _drawing_grid_size = drawing_grid_size
2017-07-03 08:01:22 +00:00
self . _show_interface_labels = show_interface_labels
2018-05-04 12:34:44 +00:00
self . _variables = variables
self . _supplier = supplier
2016-11-18 16:35:28 +00:00
self . _loading = False
2019-03-14 08:15:27 +00:00
self . _closing = False
2016-07-18 19:30:30 +00:00
# Disallow overwrite of existing project
if project_id is None and path is not None :
if os . path . exists ( path ) :
raise aiohttp . web . HTTPForbidden ( text = " The path {} already exist. " . format ( path ) )
2016-03-10 09:32:07 +00:00
if project_id is None :
self . _id = str ( uuid4 ( ) )
else :
try :
UUID ( project_id , version = 4 )
except ValueError :
raise aiohttp . web . HTTPBadRequest ( text = " {} is not a valid UUID " . format ( project_id ) )
self . _id = project_id
2016-04-26 12:34:49 +00:00
if path is None :
2016-05-11 16:42:55 +00:00
path = os . path . join ( get_default_project_directory ( ) , self . _id )
2016-04-26 12:34:49 +00:00
self . path = path
2016-06-16 14:57:54 +00:00
2016-07-11 13:36:52 +00:00
if filename is not None :
2016-06-16 14:57:54 +00:00
self . _filename = filename
else :
self . _filename = self . name + " .gns3 "
2016-07-25 12:47:37 +00:00
2016-06-15 13:12:38 +00:00
self . reset ( )
2016-04-26 12:34:49 +00:00
2017-11-27 08:16:46 +00:00
# At project creation we write an empty .gns3 with the meta
2016-07-25 12:47:37 +00:00
if not os . path . exists ( self . _topology_file ( ) ) :
2017-11-27 08:16:46 +00:00
assert self . _status != " closed "
2016-07-25 12:47:37 +00:00
self . dump ( )
2020-02-10 07:20:49 +00:00
self . _iou_id_lock = asyncio . Lock ( )
2020-04-30 06:00:50 +00:00
log . debug ( ' Project " {name} " [ {id} ] loaded ' . format ( name = self . name , id = self . _id ) )
2019-02-23 14:08:52 +00:00
def emit_notification ( self , action , event ) :
"""
Emit a notification to all clients using this project .
: param action : Action name
: param event : Event to send
"""
self . controller . notification . project_emit ( action , event , project_id = self . id )
2018-10-15 10:05:49 +00:00
async def update ( self , * * kwargs ) :
2016-08-15 14:44:09 +00:00
"""
2016-08-16 13:45:06 +00:00
Update the project
: param kwargs : Project properties
2016-08-15 14:44:09 +00:00
"""
old_json = self . __json__ ( )
for prop in kwargs :
setattr ( self , prop , kwargs [ prop ] )
# We send notif only if object has changed
if old_json != self . __json__ ( ) :
2019-02-23 14:08:52 +00:00
self . emit_notification ( " project.updated " , self . __json__ ( ) )
2016-08-15 14:44:09 +00:00
self . dump ( )
2018-05-09 13:29:35 +00:00
# update on computes
for compute in list ( self . _project_created_on_compute ) :
2018-10-15 10:05:49 +00:00
await compute . put (
2018-05-09 13:29:35 +00:00
" /projects/ {} " . format ( self . _id ) , {
" variables " : self . variables
}
)
2016-06-15 13:12:38 +00:00
def reset ( self ) :
"""
Called when open / close a project . Cleanup internal stuff
"""
2016-06-11 23:31:30 +00:00
self . _allocated_node_names = set ( )
2016-05-11 17:35:36 +00:00
self . _nodes = { }
2016-03-11 15:51:35 +00:00
self . _links = { }
2016-06-23 09:17:23 +00:00
self . _drawings = { }
2016-07-26 08:32:43 +00:00
self . _snapshots = { }
2020-10-27 09:11:24 +00:00
self . _computes = [ ]
2016-07-26 08:32:43 +00:00
# List the available snapshots
snapshot_dir = os . path . join ( self . path , " snapshots " )
if os . path . exists ( snapshot_dir ) :
for snap in os . listdir ( snapshot_dir ) :
if snap . endswith ( " .gns3project " ) :
snapshot = Snapshot ( self , filename = snap )
self . _snapshots [ snapshot . id ] = snapshot
2016-03-10 09:32:07 +00:00
2016-05-11 08:56:43 +00:00
# Create the project on demand on the compute node
self . _project_created_on_compute = set ( )
2016-09-20 10:46:39 +00:00
@property
def scene_height ( self ) :
return self . _scene_height
@scene_height.setter
def scene_height ( self , val ) :
"""
Height of the drawing area
"""
self . _scene_height = val
@property
def scene_width ( self ) :
return self . _scene_width
@scene_width.setter
def scene_width ( self , val ) :
"""
Width of the drawing area
"""
self . _scene_width = val
2017-06-30 12:31:25 +00:00
@property
def zoom ( self ) :
"""
Zoom level in percentage
: return : integer > 0
"""
return self . _zoom
@zoom.setter
def zoom ( self , zoom ) :
"""
Setter for zoom level in percentage
"""
self . _zoom = zoom
@property
def show_layers ( self ) :
"""
Show layers mode
: return : bool
"""
return self . _show_layers
@show_layers.setter
def show_layers ( self , show_layers ) :
"""
Setter for show layers mode
"""
self . _show_layers = show_layers
@property
def snap_to_grid ( self ) :
"""
Snap to grid mode
: return : bool
"""
return self . _snap_to_grid
@snap_to_grid.setter
def snap_to_grid ( self , snap_to_grid ) :
"""
Setter for snap to grid mode
"""
self . _snap_to_grid = snap_to_grid
@property
def show_grid ( self ) :
"""
Show grid mode
: return : bool
"""
return self . _show_grid
@show_grid.setter
def show_grid ( self , show_grid ) :
"""
2017-07-03 08:01:22 +00:00
Setter for showing the grid mode
2017-06-30 12:31:25 +00:00
"""
self . _show_grid = show_grid
2018-04-13 08:54:57 +00:00
@property
def grid_size ( self ) :
"""
Grid size
: return : integer
"""
return self . _grid_size
@grid_size.setter
def grid_size ( self , grid_size ) :
"""
Setter for grid size
"""
self . _grid_size = grid_size
2018-11-25 13:25:29 +00:00
@property
def drawing_grid_size ( self ) :
"""
Grid size
: return : integer
"""
return self . _drawing_grid_size
@drawing_grid_size.setter
def drawing_grid_size ( self , grid_size ) :
"""
Setter for grid size
"""
self . _drawing_grid_size = grid_size
2017-07-03 08:01:22 +00:00
@property
def show_interface_labels ( self ) :
"""
Show interface labels mode
: return : bool
"""
return self . _show_interface_labels
@show_interface_labels.setter
def show_interface_labels ( self , show_interface_labels ) :
"""
Setter for show interface labels
"""
self . _show_interface_labels = show_interface_labels
2018-05-04 12:34:44 +00:00
@property
def variables ( self ) :
"""
Variables applied to the project
: return : list
"""
return self . _variables
@variables.setter
def variables ( self , variables ) :
"""
Setter for variables applied to the project
"""
self . _variables = variables
@property
def supplier ( self ) :
"""
Supplier of the project
: return : dict
"""
return self . _supplier
@supplier.setter
def supplier ( self , supplier ) :
"""
Setter for supplier of the project
"""
self . _supplier = supplier
2016-07-07 10:10:42 +00:00
@property
def auto_start ( self ) :
2016-08-15 18:51:59 +00:00
"""
Should project auto start when opened
"""
2016-07-07 10:10:42 +00:00
return self . _auto_start
2016-08-15 18:51:59 +00:00
@auto_start.setter
def auto_start ( self , val ) :
self . _auto_start = val
@property
def auto_close ( self ) :
"""
2018-08-09 09:59:10 +00:00
Should project automatically closed when client
2016-08-15 18:51:59 +00:00
stop listening for notification
"""
return self . _auto_close
@auto_close.setter
def auto_close ( self , val ) :
self . _auto_close = val
@property
def auto_open ( self ) :
return self . _auto_open
@auto_open.setter
def auto_open ( self , val ) :
self . _auto_open = val
2016-05-18 16:37:18 +00:00
@property
def controller ( self ) :
return self . _controller
2016-03-10 09:32:07 +00:00
@property
def name ( self ) :
return self . _name
2016-08-15 14:44:09 +00:00
@name.setter
def name ( self , val ) :
self . _name = val
2016-03-10 09:32:07 +00:00
@property
def id ( self ) :
return self . _id
@property
def path ( self ) :
return self . _path
2016-06-14 21:08:30 +00:00
@property
def status ( self ) :
return self . _status
2016-04-26 12:34:49 +00:00
@path.setter
def path ( self , path ) :
2016-05-11 13:59:32 +00:00
check_path_allowed ( path )
2016-04-26 12:34:49 +00:00
try :
os . makedirs ( path , exist_ok = True )
except OSError as e :
raise aiohttp . web . HTTPInternalServerError ( text = " Could not create project directory: {} " . format ( e ) )
if ' " ' in path :
2016-05-14 00:48:10 +00:00
raise aiohttp . web . HTTPForbidden ( text = " You are not allowed to use \" in the project directory path. Not supported by Dynamips. " )
2016-04-26 12:34:49 +00:00
self . _path = path
def _config ( self ) :
return Config . instance ( ) . get_section_config ( " Server " )
@property
def captures_directory ( self ) :
"""
2016-07-13 14:13:14 +00:00
Location of the captures files
2016-04-26 12:34:49 +00:00
"""
path = os . path . join ( self . _path , " project-files " , " captures " )
os . makedirs ( path , exist_ok = True )
return path
2016-07-13 14:13:14 +00:00
@property
def pictures_directory ( self ) :
"""
Location of the images files
"""
path = os . path . join ( self . _path , " project-files " , " images " )
os . makedirs ( path , exist_ok = True )
return path
2016-06-14 10:04:23 +00:00
@property
def computes ( self ) :
"""
2016-07-21 18:17:36 +00:00
: return : List of computes used by the project
2016-06-14 10:04:23 +00:00
"""
2020-05-26 09:04:20 +00:00
if self . _status == " closed " :
return self . _get_closed_data ( " computes " , " compute_id " ) . values ( )
2016-07-21 18:17:36 +00:00
return self . _project_created_on_compute
2016-03-10 09:32:07 +00:00
2016-07-18 17:30:38 +00:00
def remove_allocated_node_name ( self , name ) :
2016-06-11 23:31:30 +00:00
"""
2016-07-18 17:30:38 +00:00
Removes an allocated node name
: param name : allocated node name
"""
if name in self . _allocated_node_names :
self . _allocated_node_names . remove ( name )
2016-06-11 23:31:30 +00:00
2016-07-18 17:30:38 +00:00
def update_allocated_node_name ( self , base_name ) :
"""
Updates a node name or generate a new if no node
name is available .
2016-06-11 23:31:30 +00:00
2016-07-18 17:30:38 +00:00
: param base_name : new node base name
2016-06-11 23:31:30 +00:00
"""
2016-07-18 17:30:38 +00:00
if base_name is None :
return None
2016-10-18 09:11:45 +00:00
base_name = re . sub ( r " [ ] " , " " , base_name )
2017-07-27 14:06:52 +00:00
if base_name in self . _allocated_node_names :
base_name = re . sub ( r " [0-9]+$ " , " {0} " , base_name )
2017-07-20 15:29:42 +00:00
2016-06-11 23:31:30 +00:00
if ' {0} ' in base_name or ' {id} ' in base_name :
# base name is a template, replace {0} or {id} by an unique identifier
for number in range ( 1 , 1000000 ) :
2017-02-06 14:05:29 +00:00
try :
name = base_name . format ( number , id = number , name = " Node " )
except KeyError as e :
raise aiohttp . web . HTTPConflict ( text = " { " + e . args [ 0 ] + " } is not a valid replacement string in the node name " )
2017-02-28 17:03:10 +00:00
except ( ValueError , IndexError ) as e :
2017-02-27 10:24:06 +00:00
raise aiohttp . web . HTTPConflict ( text = " {} is not a valid replacement string in the node name " . format ( base_name ) )
2016-06-11 23:31:30 +00:00
if name not in self . _allocated_node_names :
self . _allocated_node_names . add ( name )
return name
else :
if base_name not in self . _allocated_node_names :
2016-07-18 17:30:38 +00:00
self . _allocated_node_names . add ( base_name )
2016-06-11 23:31:30 +00:00
return base_name
# base name is not unique, let's find a unique name by appending a number
for number in range ( 1 , 1000000 ) :
name = base_name + str ( number )
if name not in self . _allocated_node_names :
self . _allocated_node_names . add ( name )
return name
2016-07-18 17:30:38 +00:00
raise aiohttp . web . HTTPConflict ( text = " A node name could not be allocated (node limit reached?) " )
2016-06-11 23:31:30 +00:00
def update_node_name ( self , node , new_name ) :
if new_name and node . name != new_name :
2017-01-18 17:28:50 +00:00
self . remove_allocated_node_name ( node . name )
2016-07-18 17:30:38 +00:00
return self . update_allocated_node_name ( new_name )
return new_name
2016-06-11 23:31:30 +00:00
2017-04-12 12:35:49 +00:00
@open_required
2020-01-08 00:19:33 +00:00
async def add_node_from_template ( self , template_id , x = 0 , y = 0 , name = None , compute_id = None ) :
2017-04-12 12:35:49 +00:00
"""
2018-11-28 09:12:57 +00:00
Create a node from a template .
2017-04-12 12:35:49 +00:00
"""
try :
2019-01-14 09:09:06 +00:00
template = copy . deepcopy ( self . controller . template_manager . templates [ template_id ] . settings )
2017-04-12 12:35:49 +00:00
except KeyError :
2018-11-28 09:12:57 +00:00
msg = " Template {} doesn ' t exist " . format ( template_id )
2017-04-12 12:35:49 +00:00
log . error ( msg )
raise aiohttp . web . HTTPNotFound ( text = msg )
template [ " x " ] = x
template [ " y " ] = y
2018-11-28 09:12:57 +00:00
node_type = template . pop ( " template_type " )
2019-11-02 08:23:45 +00:00
if template . pop ( " builtin " , False ) is True :
2019-08-26 09:48:03 +00:00
# compute_id is selected by clients for builtin templates
compute = self . controller . get_compute ( compute_id )
else :
compute = self . controller . get_compute ( template . pop ( " compute_id " , compute_id ) )
2020-01-08 00:19:33 +00:00
template_name = template . pop ( " name " )
2017-04-12 12:35:49 +00:00
default_name_format = template . pop ( " default_name_format " , " {name} - {0} " )
2020-01-08 00:19:33 +00:00
if name is None :
name = default_name_format . replace ( " {name} " , template_name )
2017-04-12 12:35:49 +00:00
node_id = str ( uuid . uuid4 ( ) )
2018-11-11 12:13:58 +00:00
node = await self . add_node ( compute , name , node_id , node_type = node_type , * * template )
2017-04-12 12:35:49 +00:00
return node
2020-02-10 07:20:49 +00:00
async def _create_node ( self , compute , name , node_id , node_type = None , * * kwargs ) :
2016-09-26 13:35:58 +00:00
2018-03-12 06:38:50 +00:00
node = Node ( self , compute , name , node_id = node_id , node_type = node_type , * * kwargs )
if compute not in self . _project_created_on_compute :
# For a local server we send the project path
if compute . id == " local " :
2018-06-13 16:55:47 +00:00
data = {
2018-03-12 06:38:50 +00:00
" name " : self . _name ,
" project_id " : self . _id ,
2018-06-13 16:55:47 +00:00
" path " : self . _path
}
2018-03-12 06:38:50 +00:00
else :
2018-06-13 16:55:47 +00:00
data = {
2018-03-12 06:38:50 +00:00
" name " : self . _name ,
2018-06-13 16:55:47 +00:00
" project_id " : self . _id
}
if self . _variables :
data [ " variables " ] = self . _variables
2018-10-15 10:05:49 +00:00
await compute . post ( " /projects " , data = data )
2018-03-12 06:38:50 +00:00
self . _project_created_on_compute . add ( compute )
2020-02-10 07:20:49 +00:00
2018-10-15 10:05:49 +00:00
await node . create ( )
2018-03-12 06:38:50 +00:00
self . _nodes [ node . id ] = node
2020-02-10 07:20:49 +00:00
return node
@open_required
async def add_node ( self , compute , name , node_id , dump = True , node_type = None , * * kwargs ) :
"""
Create a node or return an existing node
: param dump : Dump topology to disk
: param kwargs : See the documentation of node
"""
if node_id in self . _nodes :
return self . _nodes [ node_id ]
2020-10-27 09:11:24 +00:00
if compute . id not in self . _computes :
self . _computes . append ( compute . id )
2020-02-10 07:20:49 +00:00
if node_type == " iou " :
async with self . _iou_id_lock :
# wait for a IOU node to be completely created before adding a new one
# this is important otherwise we allocate the same application ID (used
# to generate MAC addresses) when creating multiple IOU node at the same time
if " properties " in kwargs . keys ( ) :
# allocate a new application id for nodes loaded from the project
2020-10-27 09:11:24 +00:00
kwargs . get ( " properties " ) [ " application_id " ] = get_next_application_id ( self . _controller . projects , self . _computes )
2020-02-10 07:20:49 +00:00
elif " application_id " not in kwargs . keys ( ) and not kwargs . get ( " properties " ) :
# allocate a new application id for nodes added to the project
2020-10-27 09:11:24 +00:00
kwargs [ " application_id " ] = get_next_application_id ( self . _controller . projects , self . _computes )
2020-02-10 07:20:49 +00:00
node = await self . _create_node ( compute , name , node_id , node_type , * * kwargs )
else :
node = await self . _create_node ( compute , name , node_id , node_type , * * kwargs )
2019-02-23 14:08:52 +00:00
self . emit_notification ( " node.created " , node . __json__ ( ) )
2018-03-12 06:38:50 +00:00
if dump :
self . dump ( )
2016-09-26 13:35:58 +00:00
return node
2016-03-10 20:51:29 +00:00
2018-08-25 07:10:47 +00:00
@locking
2018-10-15 10:05:49 +00:00
async def __delete_node_links ( self , node ) :
2016-10-03 14:35:07 +00:00
"""
Delete all link connected to this node .
2016-07-05 14:07:05 +00:00
2016-10-03 14:35:07 +00:00
The operation use a lock to avoid cleaning links from
multiple nodes at the same time .
"""
2016-07-05 14:07:05 +00:00
for link in list ( self . _links . values ( ) ) :
if node in link . nodes :
2018-10-15 10:05:49 +00:00
await self . delete_link ( link . id , force_delete = True )
2016-07-05 14:07:05 +00:00
2016-10-03 14:35:07 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def delete_node ( self , node_id ) :
2016-10-03 14:35:07 +00:00
node = self . get_node ( node_id )
2019-04-10 08:43:51 +00:00
if node . locked :
raise aiohttp . web . HTTPConflict ( text = " Node {} cannot be deleted because it is locked " . format ( node . name ) )
2018-10-15 10:05:49 +00:00
await self . __delete_node_links ( node )
2016-06-11 23:31:30 +00:00
self . remove_allocated_node_name ( node . name )
2016-05-17 11:11:43 +00:00
del self . _nodes [ node . id ]
2018-10-15 10:05:49 +00:00
await node . destroy ( )
2020-10-27 09:11:24 +00:00
# refresh the compute IDs list
self . _computes = [ n . compute . id for n in self . nodes . values ( ) ]
2016-06-14 10:04:23 +00:00
self . dump ( )
2019-02-23 14:08:52 +00:00
self . emit_notification ( " node.deleted " , node . __json__ ( ) )
2016-05-17 11:11:43 +00:00
2016-07-25 12:47:37 +00:00
@open_required
2016-05-11 17:35:36 +00:00
def get_node ( self , node_id ) :
2016-03-11 15:51:35 +00:00
"""
2016-05-11 17:35:36 +00:00
Return the node or raise a 404 if the node is unknown
2016-03-11 15:51:35 +00:00
"""
try :
2016-05-11 17:35:36 +00:00
return self . _nodes [ node_id ]
2016-03-11 15:51:35 +00:00
except KeyError :
2016-05-11 17:35:36 +00:00
raise aiohttp . web . HTTPNotFound ( text = " Node ID {} doesn ' t exist " . format ( node_id ) )
2016-03-11 15:51:35 +00:00
2017-11-27 08:16:46 +00:00
def _get_closed_data ( self , section , id_key ) :
"""
Get the data for a project from the . gns3 when
the project is close
: param section : The section name in the . gns3
: param id_key : The key for the element unique id
"""
try :
path = self . _topology_file ( )
with open ( path , " r " ) as f :
topology = json . load ( f )
except OSError as e :
raise aiohttp . web . HTTPInternalServerError ( text = " Could not load topology: {} " . format ( e ) )
try :
data = { }
for elem in topology [ " topology " ] [ section ] :
data [ elem [ id_key ] ] = elem
return data
except KeyError :
raise aiohttp . web . HTTPNotFound ( text = " Section {} not found in the topology " . format ( section ) )
2016-03-15 10:32:10 +00:00
@property
2016-05-11 17:35:36 +00:00
def nodes ( self ) :
2016-03-15 10:32:10 +00:00
"""
2016-05-11 17:35:36 +00:00
: returns : Dictionary of the nodes
2016-03-15 10:32:10 +00:00
"""
2017-11-27 08:16:46 +00:00
if self . _status == " closed " :
return self . _get_closed_data ( " nodes " , " node_id " )
2016-05-11 17:35:36 +00:00
return self . _nodes
2016-03-15 10:32:10 +00:00
2016-06-20 16:45:31 +00:00
@property
2016-06-23 09:17:23 +00:00
def drawings ( self ) :
2016-06-20 16:45:31 +00:00
"""
2016-06-23 09:17:23 +00:00
: returns : Dictionary of the drawings
2016-06-20 16:45:31 +00:00
"""
2017-11-27 08:16:46 +00:00
if self . _status == " closed " :
return self . _get_closed_data ( " drawings " , " drawing_id " )
2016-06-23 09:17:23 +00:00
return self . _drawings
2016-06-20 16:45:31 +00:00
2016-07-25 12:47:37 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def add_drawing ( self , drawing_id = None , dump = True , * * kwargs ) :
2016-06-20 16:45:31 +00:00
"""
2016-06-23 09:17:23 +00:00
Create an drawing or return an existing drawing
2016-06-20 16:45:31 +00:00
2017-02-06 10:40:00 +00:00
: param dump : Dump the topology to disk
2016-06-23 09:17:23 +00:00
: param kwargs : See the documentation of drawing
2016-06-20 16:45:31 +00:00
"""
2016-06-23 09:17:23 +00:00
if drawing_id not in self . _drawings :
drawing = Drawing ( self , drawing_id = drawing_id , * * kwargs )
self . _drawings [ drawing . id ] = drawing
2019-02-23 14:08:52 +00:00
self . emit_notification ( " drawing.created " , drawing . __json__ ( ) )
2017-02-06 10:40:00 +00:00
if dump :
self . dump ( )
2016-06-23 09:17:23 +00:00
return drawing
return self . _drawings [ drawing_id ]
2016-06-20 16:45:31 +00:00
2016-07-25 12:47:37 +00:00
@open_required
2016-06-23 09:17:23 +00:00
def get_drawing ( self , drawing_id ) :
2016-06-20 16:45:31 +00:00
"""
2016-06-23 09:17:23 +00:00
Return the Drawing or raise a 404 if the drawing is unknown
2016-06-20 16:45:31 +00:00
"""
try :
2016-06-23 09:17:23 +00:00
return self . _drawings [ drawing_id ]
2016-06-20 16:45:31 +00:00
except KeyError :
2016-06-23 09:17:23 +00:00
raise aiohttp . web . HTTPNotFound ( text = " Drawing ID {} doesn ' t exist " . format ( drawing_id ) )
2016-06-20 16:45:31 +00:00
2016-07-25 12:47:37 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def delete_drawing ( self , drawing_id ) :
2016-06-23 09:17:23 +00:00
drawing = self . get_drawing ( drawing_id )
2020-03-16 06:00:08 +00:00
if drawing . locked :
raise aiohttp . web . HTTPConflict ( text = " Drawing ID {} cannot be deleted because it is locked " . format ( drawing_id ) )
2016-06-23 09:17:23 +00:00
del self . _drawings [ drawing . id ]
2016-06-20 16:45:31 +00:00
self . dump ( )
2019-02-23 14:08:52 +00:00
self . emit_notification ( " drawing.deleted " , drawing . __json__ ( ) )
2016-06-20 16:45:31 +00:00
2016-07-25 12:47:37 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def add_link ( self , link_id = None , dump = True ) :
2016-03-11 15:51:35 +00:00
"""
Create a link . By default the link is empty
2017-02-06 10:40:00 +00:00
: param dump : Dump topology to disk
2016-03-11 15:51:35 +00:00
"""
2016-06-14 14:57:13 +00:00
if link_id and link_id in self . _links :
2017-02-06 10:07:35 +00:00
return self . _links [ link_id ]
2016-06-14 14:57:13 +00:00
link = UDPLink ( self , link_id = link_id )
2016-03-11 15:51:35 +00:00
self . _links [ link . id ] = link
2017-02-06 10:40:00 +00:00
if dump :
self . dump ( )
2016-03-11 15:51:35 +00:00
return link
2016-07-25 12:47:37 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def delete_link ( self , link_id , force_delete = False ) :
2016-05-18 16:37:18 +00:00
link = self . get_link ( link_id )
del self . _links [ link . id ]
2017-11-23 04:19:41 +00:00
try :
2018-10-15 10:05:49 +00:00
await link . delete ( )
2017-11-23 04:19:41 +00:00
except Exception :
if force_delete is False :
raise
2016-06-14 10:04:23 +00:00
self . dump ( )
2019-02-23 14:08:52 +00:00
self . emit_notification ( " link.deleted " , link . __json__ ( ) )
2016-05-18 16:37:18 +00:00
2016-07-25 12:47:37 +00:00
@open_required
2016-05-11 17:35:36 +00:00
def get_link ( self , link_id ) :
2016-03-11 15:51:35 +00:00
"""
2016-05-11 17:35:36 +00:00
Return the Link or raise a 404 if the link is unknown
2016-03-11 15:51:35 +00:00
"""
try :
return self . _links [ link_id ]
except KeyError :
raise aiohttp . web . HTTPNotFound ( text = " Link ID {} doesn ' t exist " . format ( link_id ) )
2016-03-15 10:32:10 +00:00
@property
def links ( self ) :
"""
2016-05-11 17:35:36 +00:00
: returns : Dictionary of the Links
2016-03-15 10:32:10 +00:00
"""
2017-11-27 08:16:46 +00:00
if self . _status == " closed " :
return self . _get_closed_data ( " links " , " link_id " )
2016-03-15 10:32:10 +00:00
return self . _links
2016-07-26 08:32:43 +00:00
@property
def snapshots ( self ) :
"""
: returns : Dictionary of snapshots
"""
return self . _snapshots
@open_required
def get_snapshot ( self , snapshot_id ) :
"""
Return the snapshot or raise a 404 if the snapshot is unknown
"""
try :
return self . _snapshots [ snapshot_id ]
except KeyError :
raise aiohttp . web . HTTPNotFound ( text = " Snapshot ID {} doesn ' t exist " . format ( snapshot_id ) )
@open_required
2018-10-15 10:05:49 +00:00
async def snapshot ( self , name ) :
2016-07-26 08:32:43 +00:00
"""
Snapshot the project
: param name : Name of the snapshot
"""
2018-04-28 09:01:43 +00:00
if name in [ snap . name for snap in self . _snapshots . values ( ) ] :
raise aiohttp . web . HTTPConflict ( text = " The snapshot name {} already exists " . format ( name ) )
2016-07-26 08:32:43 +00:00
snapshot = Snapshot ( self , name = name )
2018-10-15 10:05:49 +00:00
await snapshot . create ( )
2016-07-26 08:32:43 +00:00
self . _snapshots [ snapshot . id ] = snapshot
return snapshot
@open_required
2018-10-15 10:05:49 +00:00
async def delete_snapshot ( self , snapshot_id ) :
2016-07-26 08:32:43 +00:00
snapshot = self . get_snapshot ( snapshot_id )
del self . _snapshots [ snapshot . id ]
os . remove ( snapshot . path )
2019-03-14 08:15:27 +00:00
@locking
2018-10-15 10:05:49 +00:00
async def close ( self , ignore_notification = False ) :
2019-03-14 08:15:27 +00:00
if self . _status == " closed " or self . _closing :
2019-03-12 19:15:58 +00:00
return
if self . _loading :
log . warning ( " Closing project ' {} ' ignored because it is being loaded " . format ( self . name ) )
2017-11-27 08:16:46 +00:00
return
2019-03-14 08:15:27 +00:00
self . _closing = True
2018-10-15 10:05:49 +00:00
await self . stop_all ( )
2017-02-13 14:18:00 +00:00
for compute in list ( self . _project_created_on_compute ) :
2016-08-26 12:09:18 +00:00
try :
2018-10-15 10:05:49 +00:00
await compute . post ( " /projects/ {} /close " . format ( self . _id ) , dont_connect = True )
2016-08-26 12:09:18 +00:00
# We don't care if a compute is down at this step
2016-11-11 09:38:59 +00:00
except ( ComputeError , aiohttp . web . HTTPError , aiohttp . ClientResponseError , TimeoutError ) :
2016-08-26 12:09:18 +00:00
pass
2019-02-21 16:58:54 +00:00
self . _clean_pictures ( )
2016-06-15 13:12:38 +00:00
self . _status = " closed "
2016-08-15 11:30:02 +00:00
if not ignore_notification :
2019-02-23 14:08:52 +00:00
self . emit_notification ( " project.closed " , self . __json__ ( ) )
2017-11-27 08:16:46 +00:00
self . reset ( )
2019-03-14 08:15:27 +00:00
self . _closing = False
2016-03-10 09:32:07 +00:00
2019-02-21 16:58:54 +00:00
def _clean_pictures ( self ) :
2016-07-13 14:13:14 +00:00
"""
2019-02-21 16:58:54 +00:00
Delete unused pictures .
2016-07-13 14:13:14 +00:00
"""
2019-02-21 16:58:54 +00:00
# Project have been deleted or is loading or is not opened
if not os . path . exists ( self . path ) or self . _loading or self . _status != " opened " :
2016-09-23 08:22:33 +00:00
return
2016-07-13 14:13:14 +00:00
try :
pictures = set ( os . listdir ( self . pictures_directory ) )
for drawing in self . _drawings . values ( ) :
2016-09-02 09:46:08 +00:00
try :
2019-02-21 16:58:54 +00:00
resource_filename = drawing . resource_filename
if resource_filename :
pictures . remove ( resource_filename )
2016-09-02 09:46:08 +00:00
except KeyError :
pass
2016-07-13 14:13:14 +00:00
2018-05-08 14:22:35 +00:00
# don't remove supplier's logo
if self . supplier :
try :
logo = self . supplier [ ' logo ' ]
pictures . remove ( logo )
except KeyError :
pass
2019-02-21 16:58:54 +00:00
for pic_filename in pictures :
path = os . path . join ( self . pictures_directory , pic_filename )
log . info ( " Deleting unused picture ' {} ' " . format ( path ) )
os . remove ( path )
2016-07-13 14:13:14 +00:00
except OSError as e :
2019-02-21 16:58:54 +00:00
log . warning ( " Could not delete unused pictures: {} " . format ( e ) )
2016-07-13 14:13:14 +00:00
2018-10-15 10:05:49 +00:00
async def delete ( self ) :
2017-09-06 11:12:22 +00:00
2016-12-12 21:41:43 +00:00
if self . _status != " opened " :
2017-09-06 11:12:22 +00:00
try :
2018-10-15 10:05:49 +00:00
await self . open ( )
2017-09-06 11:12:22 +00:00
except aiohttp . web . HTTPConflict as e :
# ignore missing images or other conflicts when deleting a project
log . warning ( " Conflict while deleting project: {} " . format ( e . text ) )
2018-10-15 10:05:49 +00:00
await self . delete_on_computes ( )
await self . close ( )
2016-09-30 08:47:37 +00:00
try :
2019-10-30 08:25:06 +00:00
project_directory = get_default_project_directory ( )
if not os . path . commonprefix ( [ project_directory , self . path ] ) == project_directory :
raise aiohttp . web . HTTPConflict ( text = " Project ' {} ' cannot be deleted because it is not in the default project directory: ' {} ' " . format ( self . _name , project_directory ) )
2016-09-30 08:47:37 +00:00
shutil . rmtree ( self . path )
except OSError as e :
2019-10-30 08:25:06 +00:00
raise aiohttp . web . HTTPConflict ( text = " Cannot delete project directory {} : {} " . format ( self . path , str ( e ) ) )
2016-03-10 09:32:07 +00:00
2018-10-15 10:05:49 +00:00
async def delete_on_computes ( self ) :
2016-07-26 08:32:43 +00:00
"""
Delete the project on computes but not on controller
"""
2016-10-18 14:17:49 +00:00
for compute in list ( self . _project_created_on_compute ) :
2016-07-26 08:32:43 +00:00
if compute . id != " local " :
2018-10-15 10:05:49 +00:00
await compute . delete ( " /projects/ {} " . format ( self . _id ) )
2016-10-18 14:17:49 +00:00
self . _project_created_on_compute . remove ( compute )
2016-07-26 08:32:43 +00:00
2016-04-26 12:34:49 +00:00
@classmethod
def _get_default_project_directory ( cls ) :
"""
Return the default location for the project directory
depending of the operating system
"""
server_config = Config . instance ( ) . get_section_config ( " Server " )
path = os . path . expanduser ( server_config . get ( " projects_path " , " ~/GNS3/projects " ) )
path = os . path . normpath ( path )
try :
os . makedirs ( path , exist_ok = True )
except OSError as e :
raise aiohttp . web . HTTPInternalServerError ( text = " Could not create project directory: {} " . format ( e ) )
return path
2016-06-15 16:22:11 +00:00
def _topology_file ( self ) :
2016-06-16 14:57:54 +00:00
return os . path . join ( self . path , self . _filename )
2016-06-14 14:57:13 +00:00
2018-08-25 07:10:47 +00:00
@locking
2018-10-15 10:05:49 +00:00
async def open ( self ) :
2016-06-14 14:57:13 +00:00
"""
Load topology elements
"""
2019-03-14 08:15:27 +00:00
if self . _closing is True :
raise aiohttp . web . HTTPConflict ( text = " Project is closing, please try again in a few seconds... " )
2016-06-17 15:50:06 +00:00
if self . _status == " opened " :
return
2016-06-15 13:12:38 +00:00
self . reset ( )
2016-11-18 16:35:28 +00:00
self . _loading = True
2016-07-25 12:47:37 +00:00
self . _status = " opened "
2016-06-14 14:57:13 +00:00
path = self . _topology_file ( )
2016-09-05 15:28:49 +00:00
if not os . path . exists ( path ) :
2016-11-18 16:35:28 +00:00
self . _loading = False
2016-09-05 15:28:49 +00:00
return
2016-11-18 16:17:14 +00:00
try :
shutil . copy ( path , path + " .backup " )
except OSError :
pass
2016-09-05 15:28:49 +00:00
try :
2017-08-22 07:36:52 +00:00
project_data = load_topology ( path )
#load meta of project
keys_to_load = [
" auto_start " ,
" auto_close " ,
" auto_open " ,
" scene_height " ,
" scene_width " ,
" zoom " ,
" show_layers " ,
" snap_to_grid " ,
" show_grid " ,
2018-04-13 08:54:57 +00:00
" grid_size " ,
2018-11-25 13:25:29 +00:00
" drawing_grid_size " ,
2017-08-22 07:36:52 +00:00
" show_interface_labels "
]
for key in keys_to_load :
val = project_data . get ( key , None )
if val is not None :
setattr ( self , key , val )
topology = project_data [ " topology " ]
2016-06-20 16:45:31 +00:00
for compute in topology . get ( " computes " , [ ] ) :
2018-10-15 10:05:49 +00:00
await self . controller . add_compute ( * * compute )
2020-10-27 09:11:24 +00:00
# Get all compute used in the project
# used to allocate application IDs for IOU nodes.
for node in topology . get ( " nodes " , [ ] ) :
compute_id = node . get ( " compute_id " )
if compute_id not in self . _computes :
self . _computes . append ( compute_id )
2016-06-20 16:45:31 +00:00
for node in topology . get ( " nodes " , [ ] ) :
2016-06-15 13:12:38 +00:00
compute = self . controller . get_compute ( node . pop ( " compute_id " ) )
name = node . pop ( " name " )
2017-03-20 15:21:09 +00:00
node_id = node . pop ( " node_id " , str ( uuid . uuid4 ( ) ) )
2018-10-15 10:05:49 +00:00
await self . add_node ( compute , name , node_id , dump = False , * * node )
2016-06-20 16:45:31 +00:00
for link_data in topology . get ( " links " , [ ] ) :
2017-08-10 08:02:18 +00:00
if ' link_id ' not in link_data . keys ( ) :
# skip the link
continue
2018-10-15 10:05:49 +00:00
link = await self . add_link ( link_id = link_data [ " link_id " ] )
2017-07-18 16:04:03 +00:00
if " filters " in link_data :
2018-10-15 10:05:49 +00:00
await link . update_filters ( link_data [ " filters " ] )
2021-06-07 04:38:21 +00:00
if " link_style " in link_data :
await link . update_link_style ( link_data [ " link_style " ] )
2018-09-11 13:06:01 +00:00
for node_link in link_data . get ( " nodes " , [ ] ) :
2016-06-15 13:12:38 +00:00
node = self . get_node ( node_link [ " node_id " ] )
2017-09-14 10:57:58 +00:00
port = node . get_port ( node_link [ " adapter_number " ] , node_link [ " port_number " ] )
2018-03-12 06:38:50 +00:00
if port is None :
log . warning ( " Port {} / {} for {} not found " . format ( node_link [ " adapter_number " ] , node_link [ " port_number " ] , node . name ) )
continue
2017-09-14 10:57:58 +00:00
if port . link is not None :
2018-03-12 06:38:50 +00:00
log . warning ( " Port {} / {} is already connected to link ID {} " . format ( node_link [ " adapter_number " ] , node_link [ " port_number " ] , port . link . id ) )
2017-09-14 10:57:58 +00:00
continue
2018-10-15 10:05:49 +00:00
await link . add_node ( node , node_link [ " adapter_number " ] , node_link [ " port_number " ] , label = node_link . get ( " label " ) , dump = False )
2017-09-14 10:57:58 +00:00
if len ( link . nodes ) != 2 :
# a link should have 2 attached nodes, this can happen with corrupted projects
2018-10-15 10:05:49 +00:00
await self . delete_link ( link . id , force_delete = True )
2016-06-23 09:17:23 +00:00
for drawing_data in topology . get ( " drawings " , [ ] ) :
2018-10-15 10:05:49 +00:00
await self . add_drawing ( dump = False , * * drawing_data )
2016-07-25 12:47:37 +00:00
2017-02-06 10:40:00 +00:00
self . dump ( )
2016-09-05 15:28:49 +00:00
# We catch all error to be able to rollback the .gns3 to the previous state
except Exception as e :
2017-04-19 07:17:32 +00:00
for compute in list ( self . _project_created_on_compute ) :
2016-09-05 15:28:49 +00:00
try :
2018-10-15 10:05:49 +00:00
await compute . post ( " /projects/ {} /close " . format ( self . _id ) )
2016-09-05 15:28:49 +00:00
# We don't care if a compute is down at this step
2017-07-27 09:31:34 +00:00
except ( ComputeError , aiohttp . web . HTTPNotFound , aiohttp . web . HTTPConflict , aiohttp . ServerDisconnectedError ) :
2016-09-05 15:28:49 +00:00
pass
2017-01-27 09:52:17 +00:00
try :
if os . path . exists ( path + " .backup " ) :
shutil . copy ( path + " .backup " , path )
2018-09-06 07:49:12 +00:00
except OSError :
2017-01-27 09:52:17 +00:00
pass
2016-09-05 15:28:49 +00:00
self . _status = " closed "
2016-11-18 16:35:28 +00:00
self . _loading = False
2017-03-16 11:05:30 +00:00
if isinstance ( e , ComputeError ) :
raise aiohttp . web . HTTPConflict ( text = str ( e ) )
else :
raise e
2016-11-18 16:17:14 +00:00
try :
os . remove ( path + " .backup " )
except OSError :
pass
2016-09-05 15:28:49 +00:00
2016-11-18 16:35:28 +00:00
self . _loading = False
2016-09-05 15:28:49 +00:00
# Should we start the nodes when project is open
if self . _auto_start :
2017-05-22 18:18:07 +00:00
# Start all in the background without waiting for completion
# we ignore errors because we want to let the user open
# their project and fix it
2018-10-15 10:05:49 +00:00
asyncio . ensure_future ( self . start_all ( ) )
2016-08-16 13:45:06 +00:00
2018-10-15 10:05:49 +00:00
async def wait_loaded ( self ) :
2016-11-18 16:35:28 +00:00
"""
Wait until the project finish loading
"""
while self . _loading :
2018-10-15 10:05:49 +00:00
await asyncio . sleep ( 0.5 )
2016-11-18 16:35:28 +00:00
2020-05-27 02:44:47 +00:00
async def duplicate ( self , name = None , location = None , reset_mac_addresses = True ) :
2016-07-25 12:47:37 +00:00
"""
Duplicate a project
It ' s the save as feature of the 1.X. It ' s implemented on top of the
export / import features . It will generate a gns3p and reimport it .
It ' s a little slower but we have only one implementation to maintain.
: param name : Name of the new project . A new one will be generated in case of conflicts
: param location : Parent directory of the new project
2020-05-27 02:44:47 +00:00
: param reset_mac_addresses : Reset MAC addresses for the new project
2016-07-25 12:47:37 +00:00
"""
2016-10-18 14:50:41 +00:00
# If the project was not open we open it temporary
previous_status = self . _status
if self . _status == " closed " :
2018-10-15 10:05:49 +00:00
await self . open ( )
2016-07-25 12:47:37 +00:00
2017-03-20 13:57:20 +00:00
self . dump ( )
2017-11-27 08:16:46 +00:00
assert self . _status != " closed "
2017-01-10 09:16:45 +00:00
try :
2019-02-27 10:57:07 +00:00
begin = time . time ( )
2020-07-17 05:39:43 +00:00
# use the parent directory of the project we are duplicating as a
# temporary directory to avoid no space left issues when '/tmp'
# is location on another partition.
if location :
working_dir = os . path . abspath ( os . path . join ( location , os . pardir ) )
else :
working_dir = os . path . abspath ( os . path . join ( self . path , os . pardir ) )
with tempfile . TemporaryDirectory ( dir = working_dir ) as tmpdir :
2019-03-06 16:00:01 +00:00
# Do not compress the exported project when duplicating
with aiozipstream . ZipFile ( compression = zipfile . ZIP_STORED ) as zstream :
2020-05-27 02:44:47 +00:00
await export_project ( zstream , self , tmpdir , keep_compute_id = True , allow_all_nodes = True , reset_mac_addresses = reset_mac_addresses )
2019-02-27 10:57:07 +00:00
# export the project to a temporary location
2019-02-26 08:55:07 +00:00
project_path = os . path . join ( tmpdir , " project.gns3p " )
2019-02-27 10:57:07 +00:00
log . info ( " Exporting project to ' {} ' " . format ( project_path ) )
async with aiofiles . open ( project_path , ' wb ' ) as f :
async for chunk in zstream :
await f . write ( chunk )
# import the temporary project
with open ( project_path , " rb " ) as f :
project = await import_project ( self . _controller , str ( uuid . uuid4 ( ) ) , f , location = location , name = name , keep_compute_id = True )
2019-03-06 16:00:01 +00:00
log . info ( " Project ' {} ' duplicated in {:.4f} seconds " . format ( project . name , time . time ( ) - begin ) )
2018-07-26 18:34:23 +00:00
except ( ValueError , OSError , UnicodeEncodeError ) as e :
2018-10-04 13:22:42 +00:00
raise aiohttp . web . HTTPConflict ( text = " Cannot duplicate project: {} " . format ( str ( e ) ) )
2016-10-18 14:50:41 +00:00
if previous_status == " closed " :
2018-10-15 10:05:49 +00:00
await self . close ( )
2016-10-18 14:50:41 +00:00
2016-07-25 12:47:37 +00:00
return project
2016-06-14 14:57:13 +00:00
2016-07-21 07:45:02 +00:00
def is_running ( self ) :
"""
If a node is started or paused return True
"""
for node in self . _nodes . values ( ) :
2016-09-15 16:48:14 +00:00
# Some node type are always running we ignore them
2017-07-20 15:29:42 +00:00
if node . status != " stopped " and not node . is_always_running ( ) :
2016-07-21 07:45:02 +00:00
return True
return False
2016-06-14 10:04:23 +00:00
def dump ( self ) :
"""
Dump topology to disk
"""
try :
topo = project_to_topology ( self )
2016-06-14 14:57:13 +00:00
path = self . _topology_file ( )
log . debug ( " Write %s " , path )
2016-09-18 20:23:52 +00:00
with open ( path + " .tmp " , " w+ " , encoding = " utf-8 " ) as f :
2016-06-14 10:04:23 +00:00
json . dump ( topo , f , indent = 4 , sort_keys = True )
2016-06-17 15:13:36 +00:00
shutil . move ( path + " .tmp " , path )
2016-06-14 10:04:23 +00:00
except OSError as e :
raise aiohttp . web . HTTPInternalServerError ( text = " Could not write topology: {} " . format ( e ) )
2019-07-10 15:07:13 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def start_all ( self ) :
2016-08-16 13:45:06 +00:00
"""
Start all nodes
"""
pool = Pool ( concurrency = 3 )
for node in self . nodes . values ( ) :
pool . append ( node . start )
2018-10-15 10:05:49 +00:00
await pool . join ( )
2016-08-16 13:45:06 +00:00
2019-07-10 15:07:13 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def stop_all ( self ) :
2016-08-16 13:45:06 +00:00
"""
Stop all nodes
"""
pool = Pool ( concurrency = 3 )
for node in self . nodes . values ( ) :
pool . append ( node . stop )
2018-10-15 10:05:49 +00:00
await pool . join ( )
2016-08-16 13:45:06 +00:00
2019-07-10 15:07:13 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def suspend_all ( self ) :
2016-08-16 13:45:06 +00:00
"""
Suspend all nodes
"""
pool = Pool ( concurrency = 3 )
for node in self . nodes . values ( ) :
pool . append ( node . suspend )
2018-10-15 10:05:49 +00:00
await pool . join ( )
2016-08-16 13:45:06 +00:00
2019-07-10 15:07:13 +00:00
@open_required
2018-10-15 10:05:49 +00:00
async def duplicate_node ( self , node , x , y , z ) :
2017-07-20 15:29:42 +00:00
"""
Duplicate a node
: param node : Node instance
: param x : X position
: param y : Y position
: param z : Z position
: returns : New node
"""
if node . status != " stopped " and not node . is_always_running ( ) :
2017-07-24 08:52:14 +00:00
raise aiohttp . web . HTTPConflict ( text = " Cannot duplicate node data while the node is running " )
2017-07-20 15:29:42 +00:00
data = copy . deepcopy ( node . __json__ ( topology_dump = True ) )
2018-01-17 06:01:44 +00:00
# Some properties like internal ID should not be duplicated
2017-07-20 15:29:42 +00:00
for unique_property in (
' node_id ' ,
' name ' ,
2018-01-17 06:01:44 +00:00
' mac_addr ' ,
2018-01-17 06:13:06 +00:00
' mac_address ' ,
2017-07-20 15:29:42 +00:00
' compute_id ' ,
' application_id ' ,
' dynamips_id ' ) :
data . pop ( unique_property , None )
if ' properties ' in data :
data [ ' properties ' ] . pop ( unique_property , None )
node_type = data . pop ( ' node_type ' )
data [ ' x ' ] = x
data [ ' y ' ] = y
data [ ' z ' ] = z
2019-03-02 09:39:05 +00:00
data [ ' locked ' ] = False # duplicated node must not be locked
2017-07-20 15:29:42 +00:00
new_node_uuid = str ( uuid . uuid4 ( ) )
2020-02-10 07:20:49 +00:00
new_node = await self . add_node ( node . compute ,
node . name ,
new_node_uuid ,
node_type = node_type ,
* * data )
2017-07-20 15:29:42 +00:00
try :
2018-10-15 10:05:49 +00:00
await node . post ( " /duplicate " , timeout = None , data = {
2017-07-20 15:29:42 +00:00
" destination_node_id " : new_node_uuid
} )
except aiohttp . web . HTTPNotFound as e :
2018-10-15 10:05:49 +00:00
await self . delete_node ( new_node_uuid )
2017-07-24 08:52:14 +00:00
raise aiohttp . web . HTTPConflict ( text = " This node type cannot be duplicated " )
2017-07-20 15:29:42 +00:00
except aiohttp . web . HTTPConflict as e :
2018-10-15 10:05:49 +00:00
await self . delete_node ( new_node_uuid )
2017-07-20 15:29:42 +00:00
raise e
return new_node
2018-03-15 08:21:29 +00:00
def stats ( self ) :
return {
" nodes " : len ( self . _nodes ) ,
" links " : len ( self . _links ) ,
" drawings " : len ( self . _drawings ) ,
" snapshots " : len ( self . _snapshots )
}
2016-03-10 09:32:07 +00:00
def __json__ ( self ) :
return {
" name " : self . _name ,
" project_id " : self . _id ,
2016-06-14 21:08:30 +00:00
" path " : self . _path ,
2016-06-16 14:57:54 +00:00
" filename " : self . _filename ,
2016-08-15 18:51:59 +00:00
" status " : self . _status ,
" auto_start " : self . _auto_start ,
" auto_close " : self . _auto_close ,
2016-09-20 10:46:39 +00:00
" auto_open " : self . _auto_open ,
" scene_height " : self . _scene_height ,
2017-06-30 12:31:25 +00:00
" scene_width " : self . _scene_width ,
" zoom " : self . _zoom ,
" show_layers " : self . _show_layers ,
" snap_to_grid " : self . _snap_to_grid ,
2017-07-03 08:01:22 +00:00
" show_grid " : self . _show_grid ,
2018-04-13 08:54:57 +00:00
" grid_size " : self . _grid_size ,
2018-11-25 13:25:29 +00:00
" drawing_grid_size " : self . _drawing_grid_size ,
2018-05-04 12:34:44 +00:00
" show_interface_labels " : self . _show_interface_labels ,
" supplier " : self . _supplier ,
" variables " : self . _variables
2016-03-10 09:32:07 +00:00
}
2016-06-20 16:45:31 +00:00
def __repr__ ( self ) :
return " <gns3server.controller.Project {} {} > " . format ( self . _name , self . _id )