diff --git a/.travis.yml b/.travis.yml
index 6ac11c63..e4f308b5 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,6 +16,8 @@ before_deploy:
deploy:
provider: pypi
+ edge:
+ branch: v1.8.45
user: noplay
password:
secure: Fa66zp8ML4oSGwzkUMZi07MIYfO3tbS5gHFUaLN2mk2MBknhCjDYexmFJqT//sC/+xqv6sSJE6rz1EPoy/THbxj8R96ZgIyiUZIbDCbzgdy92d7J/eusrDoNdpApBLke8NqQqtFETb3addMZZNofQ3IDANFD2m2jY+KECU8z8NI=
diff --git a/CHANGELOG b/CHANGELOG
index f9041808..f7e7cc88 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,5 +1,57 @@
# Change Log
+## 2.1.4 12/03/2018
+
+* Add Juniper JunOS space appliance.
+* Sync checkpoint gaia appliance template.
+* Sync appliance templates.
+* Make sure we use an IPv4 address in the remote install script.
+* Delete old pcap file when starting a new packet capture.
+* Fix bug preventing to export portable projects with IOU images.
+* Ignore invalid BPF filters. Ref #1290.
+* Different approach to handle no data returned by uBridge hypervisors. Fixes #1289.
+* Do not raise exception if Dynamips or uBridge hypervisor don't return data and are still running. Fixes #1289
+* Fix Dynamips private config not loaded into nvram when starting a router. Fixes #1313.
+* Make sure we don't try to read when opening a file in binary more. Fixes #1301.
+* Compatybility with controller, default_symbol and hover_symbol, Fixes: #2444
+* Filter snapshots directory during the snapshot, Fixes: #1297
+* Handle docker env with last empty line, Fixes: #2420
+* Require uBridge version 0.9.14 on Linux
+* Pywin32 instead of pypiwin32, Ref. #1276
+* Fix missing 'locales' package in base image
+* Implement a minimum interval between psutil calls. Fixes #2262
+* Fix error when appliance template is broken (missing fields). Fixes #1287.
+* Fix "Change of linked base VM doesn't work with templates migrated from 2.0"
+* Fix "Unable to override non-custom VMware adapter".
+* Let a project be opened when a port cannot be found (can happens if a project is corrupted).
+* Add an error message when Docker container is not ready to be started. Ref #1281.
+* Update documentation.
+* Sync appliance files.
+* Fix issue when running multiple project containing IOU nodes on the same server. Ref #1239.
+* Set first byte to 52 when generating a random MAC address for a Qemu VM. Ref #1267.
+* Update link state and save project when a link is suspended or filters are added/removed (without node properties set).
+* More generic dependency for pypiwin32, Ref. #1276
+
+## 2.1.3 19/01/2018
+
+* Update appliance files.
+* Suspend for Docker nodes.
+* Unlock yarl version and multidict
+* Fix same MAC address for duplicated Qemu nodes.
+* Fix same base MAC for duplicated IOS routers. Fixes #1264.
+* Fix "Creating multiple IOU nodes at once assigns the same application id". Fixes #1239.
+* Fix "Transport selection via DSN is deprecated" message. Sync is configured with HTTPTransport.
+* Refresh CPU/RAM info every 1 second. Ref #2262.
+* Rename ethernet switch arp command to mac
+* Fix error while getting appliance list. Fixes #1258.
+* Fix UnboundLocalError: local variable 'node' referenced before assignment. Fixes #1256.
+* Default symbol must be computer.svg
+* Compatibility for old node templates (those with default_symbol and hover_symbol properties).
+* Fix problem when searching for VBoxManage. Fixes #1261.
+* Improve the search for VBoxManage.
+* Fixing race condition when starting the GNS3 VM.
+* Default VPCS name format is now PC-{0}.
+
## 2.1.2 08/01/2018
* Do not show log message if configuration file doesn't exist. Fixes #1206.
diff --git a/Dockerfile b/Dockerfile
index fec7d333..96f84c4b 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -5,7 +5,6 @@ FROM ubuntu:16.04
ENV DEBIAN_FRONTEND noninteractive
# Set the locale
-RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
@@ -13,6 +12,7 @@ ENV LC_ALL en_US.UTF-8
RUN apt-get update && apt-get install -y software-properties-common
RUN add-apt-repository ppa:gns3/ppa
RUN apt-get update && apt-get install -y \
+ locales \
python3-pip \
python3-dev \
qemu-system-x86 \
@@ -21,6 +21,8 @@ RUN apt-get update && apt-get install -y \
libvirt-bin \
x11vnc
+RUN locale-gen en_US.UTF-8
+
# Install uninstall to install dependencies
RUN apt-get install -y vpcs ubridge
diff --git a/docs/curl.rst b/docs/curl.rst
index 02cc2b04..a4acd4d6 100644
--- a/docs/curl.rst
+++ b/docs/curl.rst
@@ -1,30 +1,34 @@
-Sample session using curl
-=========================
+Sample sessions using curl
+==========================
-You need to read the :doc:`glossary`, and :doc:`general` before.
+Read the :doc:`glossary`, and :doc:`general` pages first.
-Full endpoints list is available: :doc:`endpoints`
+A list of all endpoints is available in :doc:`endpoints`
.. warning::
- Beware the output of this sample is truncated in order
- to simplify the understanding. Please read the
- documentation for the exact output.
+ Note that the output of the samples can be truncated in
+ order to simplify their understanding. Please read the
+ documentation for the exact output meaning.
-You can check the server version with a simple curl command:
+Server version
+###############
+
+Check the server version with a simple curl command:
.. code-block:: shell-session
# curl "http://localhost:3080/v2/version"
{
- "version": "2.0.0dev1"
+ "local": false,
+ "version": "2.1.4"
}
List computes
##############
-We will list the computes node where we can run our nodes:
+List all the compute servers:
.. code-block:: shell-session
@@ -34,20 +38,20 @@ We will list the computes node where we can run our nodes:
"compute_id": "local",
"connected": true,
"host": "127.0.0.1",
- "name": "Local",
+ "name": "local",
"port": 3080,
"protocol": "http",
"user": "admin"
}
]
-In this sample we have only one compute where we can run our nodes. This compute as a special id: local. This
-mean it's the local server embed in the GNS3 controller.
+There is only one compute server where nodes can be run in this example.
+This compute as a special id: local, this is the local server which is embedded in the GNS3 controller.
-Create project
-###############
+Create a project
+#################
-The next step is to create a project.
+The next step is to create a project:
.. code-block:: shell-session
@@ -60,7 +64,7 @@ The next step is to create a project.
Create nodes
#############
-With this project id we can now create two VPCS Node.
+Using the project id, it is now possible to create two VPCS nodes:
.. code-block:: shell-session
@@ -87,15 +91,14 @@ With this project id we can now create two VPCS Node.
"node_id": "83892a4d-aea0-4350-8b3e-d0af3713da74",
"node_type": "vpcs",
"project_id": "b8c070f7-f34c-4b7b-ba6f-be3d26ed073f",
+ "properties": {},
"status": "stopped"
}
-The properties dictionnary contains all setting specific to a node type (dynamips, docker, vpcs...)
-
Link nodes
###########
-Now we need to link the two VPCS by connecting their port 0 together.
+The two VPCS nodes can be linked together using their port number 0 (VPCS has only one network adapter with one port):
.. code-block:: shell-session
@@ -123,7 +126,7 @@ Now we need to link the two VPCS by connecting their port 0 together.
Start nodes
###########
-Now we can start the two nodes.
+Start the two nodes:
.. code-block:: shell-session
@@ -133,8 +136,8 @@ Now we can start the two nodes.
Connect to nodes
#################
-Everything should be started now. You can connect via telnet to the different Node.
-The port is the field console in the create Node request.
+Use a Telnet client to connect to the nodes once they have been started.
+The port number can be found in the output when the nodes have been created above.
.. code-block:: shell-session
@@ -200,7 +203,7 @@ The port is the field console in the create Node request.
Stop nodes
##########
-And we stop the two nodes.
+Stop the two nodes:
.. code-block:: shell-session
@@ -208,40 +211,41 @@ And we stop the two nodes.
# curl -X POST "http://localhost:3080/v2/projects/b8c070f7-f34c-4b7b-ba6f-be3d26ed073f/nodes/83892a4d-aea0-4350-8b3e-d0af3713da74/stop" -d "{}"
-Add a visual element
-######################
+Add visual elements
+####################
-When you want add visual elements to the topology like rectangle, circle, images you can just send a raw SVG.
-This will display a red square in the middle of your topologies:
+Visual elements like rectangle, ellipses or images in the form of raw SVG can be added to a project.
+This will display a red square in the middle of your canvas:
.. code-block:: shell-session
# curl -X POST "http://localhost:3080/v2/projects/b8c070f7-f34c-4b7b-ba6f-be3d26ed073f/drawings" -d '{"x":0, "y": 12, "svg": ""}'
-Tips: you can embed png/jpg... by using a base64 encoding in the SVG.
+Tip: embed PNG, JPEG etc. images using base64 encoding in the SVG.
-Add filter to the link
-######################
+Add a packet filter
+####################
-Filter allow you to add error on a link.
+Packet filters allow to filter packet on a given link. Here to drop a packet every 5 packets:
.. code-block:: shell-session
curl -X PUT "http://localhost:3080/v2/projects/b8c070f7-f34c-4b7b-ba6f-be3d26ed073f/links/007f2177-6790-4e1b-ac28-41fa226b2a06" -d '{"filters": {"frequency_drop": [5]}}'
-Creation of nodes
-#################
+Node creation
+##############
-Their is two way of adding nodes. Manual by passing all the information require for a Node.
+There are two ways to add nodes.
-Or by using an appliance. The appliance is a node model saved in your server.
+1. Manually by passing all the information required to create a new node.
+2. Using an appliance template stored on your server.
-Using an appliance
-------------------
+Using an appliance template
+---------------------------
-First you need to list the available appliances
+List all the available appliance templates:
.. code-block:: shell-session
@@ -268,15 +272,15 @@ First you need to list the available appliances
}
]
-Now you can use the appliance and put it at a specific position
+Use the appliance template and add coordinates to select where the node will be put on the canvas:
.. code-block:: shell-session
- # curl -X POST http://localhost:3080/v2/projects/b8c070f7-f34c-4b7b-ba6f-be3d26ed073f -d '{"x": 12, "y": 42}'
+ # curl -X POST http://localhost:3080/v2/projects/b8c070f7-f34c-4b7b-ba6f-be3d26ed073f/appliances/9cd59d5a-c70f-4454-8313-6a9e81a8278f -d '{"x": 12, "y": 42}'
Manual creation of a Qemu node
--------------------------------
+------------------------------
.. code-block:: shell-session
@@ -360,7 +364,7 @@ Manual creation of a Qemu node
}
-Manual creation of a dynamips node
+Manual creation of a Dynamips node
-----------------------------------
.. code-block:: shell-session
@@ -486,7 +490,7 @@ Manual creation of a dynamips node
Notifications
#############
-You can see notification about the changes via the notification feed:
+Notifications can be seen by connection to the notification feed:
.. code-block:: shell-session
@@ -494,14 +498,14 @@ You can see notification about the changes via the notification feed:
{"action": "ping", "event": {"compute_id": "local", "cpu_usage_percent": 35.7, "memory_usage_percent": 80.7}}
{"action": "node.updated", "event": {"command_line": "/usr/local/bin/vpcs -p 5001 -m 1 -i 1 -F -R -s 10001 -c 10000 -t 127.0.0.1", "compute_id": "local", "console": 5001, "console_host": "127.0.0.1", "console_type": "telnet", "name": "VPCS 2", "node_id": "83892a4d-aea0-4350-8b3e-d0af3713da74", "node_type": "vpcs", "project_id": "b8c070f7-f34c-4b7b-ba6f-be3d26ed073f", "properties": {"startup_script": null, "startup_script_path": null}, "status": "started"}}
-A websocket version is also available on http://localhost:3080/v2/projects/b8c070f7-f34c-4b7b-ba6f-be3d26ed073f/notifications/ws
+A Websocket notification stream is also available on http://localhost:3080/v2/projects/b8c070f7-f34c-4b7b-ba6f-be3d26ed073f/notifications/ws
-Read :doc:`notifications` for more informations
+Read :doc:`notifications` for more information.
-How to found the endpoints?
+Where to find the endpoints?
###########################
-Full endpoints list is available: :doc:`endpoints`
+A list of all endpoints is available: :doc:`endpoints`
-If you start the server with **--debug** you can see all the requests made by the client and by the controller to the computes nodes.
+Tip: requests made by a client and by a controller to the computes nodes can been seen if the server is started with the **--debug** parameter.
diff --git a/docs/development.rst b/docs/development.rst
index a6fe5bb9..ec73a073 100644
--- a/docs/development.rst
+++ b/docs/development.rst
@@ -4,27 +4,25 @@ Development
Code convention
===============
-You should respect all the PEP8 convention except the
-rule about max line length.
+Respect all the PEP8 convention except the max line length rule.
Source code
===========
-Source code is available on github under GPL V3 licence:
+Source code is available on Github under the GPL V3 licence:
https://github.com/GNS3/
The GNS3 server: https://github.com/GNS3/gns3-server
-The Qt GUI: https://github.com/GNS3/gns3-gui
+The GNS3 user interface: https://github.com/GNS3/gns3-gui
Documentation
==============
-In the gns3-server project.
+The documentation can be found in the gns3-server project.
Build doc
----------
-In the project root folder:
.. code-block:: bash
@@ -41,4 +39,3 @@ Run tests
.. code-block:: bash
py.test -v
-
diff --git a/docs/endpoints.rst b/docs/endpoints.rst
index c8a2c596..a5e110ed 100644
--- a/docs/endpoints.rst
+++ b/docs/endpoints.rst
@@ -1,21 +1,22 @@
Endpoints
------------
-GNS3 expose two type of endpoints:
+GNS3 exposes two type of endpoints:
- * Controller
- * Compute
+ * Controller endpoints
+ * Compute endpoints
-Controller API Endpoints
-~~~~~~~~~~~~~~~~~~~~~~~~
+Controller endpoints
+~~~~~~~~~~~~~~~~~~~~~
-The controller manage all the running topologies. The controller
-has knowledge of everything on in GNS3. If you want to create and
-manage a topology it's here. The controller will call the compute API
-when needed.
+The controller manages everything, it is the central decision point
+and has a complete view of your network topologies, what nodes run on
+which compute server, the links between them etc.
-In a standard GNS3 installation you have one controller and one or many
-computes.
+This is the high level API which can be used by users to manually control
+the GNS3 backend. The controller will call the compute endpoints when needed.
+
+A standard GNS3 setup is to have one controller and one or many computes.
.. toctree::
:glob:
@@ -24,14 +25,15 @@ computes.
api/v2/controller/*
-Compute API Endpoints
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+Compute Endpoints
+~~~~~~~~~~~~~~~~~~
-The compute is the GNS3 process running on a server and controlling
-the VM process.
+A compute is the GNS3 process running on a host. It controls emulators in order to run nodes
+(e.g. VMware VMs with VMware Workstation, IOS routers with Dynamips etc.)
.. WARNING::
- Consider this endpoints as a private API used by the controller.
+ These endpoints should be considered low level and private.
+ They should only be used by the controller or for debugging purposes.
.. toctree::
:glob:
diff --git a/docs/file_format.rst b/docs/file_format.rst
index d25b51ef..89a2e3ff 100644
--- a/docs/file_format.rst
+++ b/docs/file_format.rst
@@ -1,11 +1,11 @@
-GNS3 file formats
-=================
+The GNS3 files
+===============
-The .gns3
-##########
+.gns3 files
+############
-It's the topology file of GNS3 this file is a JSON with all
-the informations about what is inside the topology.
+GNS3 project files in JSON file format with all
+the information necessary to save a project.
A minimal version:
@@ -30,34 +30,34 @@ The revision is the version of file format:
* 4: GNS3 1.5
* 3: GNS3 1.4
* 2: GNS3 1.3
-* 1: GNS3 1.0, 1.1, 1.2 (Not mentionned in the topology file)
+* 1: GNS3 1.0, 1.1, 1.2 (Not mentioned in the file)
-And the full JSON schema:
+The full JSON schema can be found there:
.. literalinclude:: gns3_file.json
-The .net
-#########
-It's topologies made for GNS3 0.8
+.net files
+###########
+
+Topology files made for GNS3 <= version 1.0. Not supported.
-The .gns3p or .gns3project
-###########################
-
-It's a zipped version of the .gns3 and all files require for
-a topology. The images could be included inside but are optionnals.
-
-The zip could be a ZIP64 if the project is too big for standard
-zip file.
-
-The .gns3a or .gns3appliance
+.gns3p or .gns3project files
#############################
-This file contains details on how to import an appliance in GNS3.
+This this a zipped version of a.gns3 file and includes all the required files to easily share a project.
+The binary images can optionally be included.
-A JSON schema is available here:
+The zip can be a ZIP64 if the project is too big for standard zip file.
+
+.gns3a or .gns3appliance files
+##############################
+
+These files contain everything needed to create a new appliance template in GNS3.
+
+A JSON schema is available there:
https://github.com/GNS3/gns3-registry/blob/master/schemas/appliance.json
-And samples here:
+And samples there:
https://github.com/GNS3/gns3-registry/tree/master/appliances
diff --git a/docs/general.rst b/docs/general.rst
index 4bb1dd52..f5f34d80 100644
--- a/docs/general.rst
+++ b/docs/general.rst
@@ -1,29 +1,27 @@
General
-################
+#######
Architecture
============
-GNS3 is splitted in four part:
+GNS3 can be divided in four part:
- * the GUI (project gns3-gui, gns3-web)
- * the controller (project gns3-server)
- * the compute (project gns3-server)
- * the emulators (qemu, iou, dynamips...)
+ * the user interface or GUI (gns3-gui or gns3-web projects)
+ * the controller (gns3-server project)
+ * the compute (part of the gns3-server project)
+ * the emulators (Qemu, Dynamips, VirtualBox...)
-The controller pilot everything it's the part that manage the state
-of a project, save it on disk. Only one controller exists.
+The controller pilots everything, it manages the state
+of each project. Only one controller should run.
+The GUI displays a topology representing a project on a canvas and allow to
+perform actions on given project, sending API requests to the controller.
-The GUI display the topology. The GUI has only direct contact with
-the controller.
+The compute controls emulators to run nodes. A compute that is on
+the same server as the controller is the same process.
-The compute are where emulator are executed. If the compute is on
-the same server as the controller, they are in the same process.
-
-
-For each node of the topology will start an emulator instance.
+The compute usually starts an emulator instance for each node.
A small schema::
@@ -42,19 +40,18 @@ A small schema::
+--------+
-If you want to pilot GNS3 you need to use the controller API.
+Use the controller API to work with the GNS3 backend
Communications
-===============
+==============
-All the communication are done over HTTP using JSON.
+All communication are done over HTTP using the JSON format.
Errors
======
-In case of error a standard HTTP error is raise and you got a
-JSON like that
+A standard HTTP error is sent in case of an error:
.. code-block:: json
@@ -63,10 +60,6 @@ JSON like that
"message": "Conflict"
}
-409 error could be display to the user. They are normal behavior
-they are used to warn user about something he should change and
-they are not an internal software error.
-
Limitations
============
@@ -74,37 +67,34 @@ Limitations
Concurrency
------------
-A node can't process multiple request in the same time. But you can make
-multiple request on multiple node. It's transparent for the client
-when the first request on a Node start a lock is acquire for this node id
-and released for the next request at the end. You can safely send all
-the requests in the same time and let the server manage an efficent concurrency.
-
-We think it can be a little slower for some operations, but it's remove a big
-complexity for the client due to the fact only some command on some node can be
-concurrent.
+A node cannot processes multiple requests at the same time. However,
+multiple requests on multiple nodes can be executed concurrently.
+This should be transparent for clients since internal locks are used inside the server,
+so it is safe to send multiple requests at the same time and let the server
+manage the concurrency.
Authentication
------------------
+--------------
-You can use HTTP basic auth to protect the access to the API. And run
-the API over HTTPS.
+HTTP basic authentication can be used to prevent unauthorized API requests.
+It is recommended to set up a VPN if the communication between clients and the server must be encrypted.
Notifications
=============
-You can receive notification from the server if you listen the HTTP stream /notifications or the websocket.
+Notifications can be received from the server by listening to a HTTP stream or via a Websocket.
-Read :doc:`notifications` for more informations
+Read :doc:`notifications` for more information
Previous versions
=================
API version 1
-------------
-Shipped with GNS3 1.3, 1.4 and 1.5.
-This API doesn't support the controller system and save used a commit system instead of live save.
+
+Shipped with GNS3 1.3, 1.4 and 1.5.
+This API doesn't support the controller architecture.
diff --git a/docs/glossary.rst b/docs/glossary.rst
index 9b25df9f..2260eb45 100644
--- a/docs/glossary.rst
+++ b/docs/glossary.rst
@@ -4,44 +4,41 @@ Glossary
Topology
--------
-The place where you have all things (node, drawing, link...)
-
+Contains everything to represent a virtual network (nodes, visual elements, links...)
Node
------
+----
-A Virtual Machine (Dynamips, IOU, Qemu, VPCS...), a cloud, a builtin device (switch, hub...)
+A Virtual Machine (Dynamips, IOU, Qemu, VPCS...) or builtin node (cloud, switch, hub...) that run on a compute.
Appliance
---------
-A model for a node. When you drag an appliance to the topology a node is created.
-
+A model for a node used to create a node. When you drag an appliance to the topology a node is created.
Appliance template
------------------
-A file (.gns3a) use for creating new node model.
+A file (.gns3a) used to create a new node.
Drawing
---------
+-------
-Drawing are visual element not used by the network emulation. Like
-text, images, rectangle... They are pure SVG elements.
+A Drawing is a visual element like annotations, images, rectangles etc. There are pure SVG elements.
Adapter
-------
-The physical network interface. The adapter can contain multiple ports.
+A physical network interface, like a PCI card. The adapter can contain multiple ports.
Port
----
-A port is an opening on network adapter that cable plug into.
+A port is an opening on a network adapter where can be plugged into.
-For example a VM can have a serial and an ethernet adapter plugged in.
-The ethernet adapter can have 4 ports.
+For example a VM can have a serial and an Ethernet adapter.
+The Ethernet adapter itself can have 4 ports.
Controller
----------
@@ -50,20 +47,23 @@ The central server managing everything in GNS3. A GNS3 controller
will manage multiple GNS3 compute node.
Compute
-----------
+-------
The process running on each server with GNS3. The GNS3 compute node
is controlled by the GNS3 controller.
Symbol
------
-Symbol are the icon used for nodes.
+
+A symbol is an icon used to represent a node on a scene.
Scene
-----
-The drawing area
+
+A scene is the drawing area or canvas.
Filter
------
-Packet filter this allow to add latency or packet drop.
+
+Packet filter, for instance to add latency on a link or drop packets
diff --git a/docs/index.rst b/docs/index.rst
index c02cea54..dead8e53 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -2,17 +2,13 @@ Welcome to API documentation!
======================================
.. WARNING::
- This documentation are for developers for user documentation go
- to https://gns3.com/
-
- The API is not stable, feel free to post comments on our website
- https://gns3.com/
+ This documentation is intended for developers. The user documentation is
+ available on https://gns3.com/
-This documentation cover the GNS3 API and ressources for GNS3 developers.
+This documentation describe the GNS3 API and provide information for GNS3 developers.
-
-If you want a quick demo on how to use the API read: :doc:`curl`
+For a quick demo on how to use the API read: :doc:`curl`
API
----
@@ -26,8 +22,8 @@ API
position
endpoints
-GNS3 developements
-------------------
+GNS3 development
+----------------
.. toctree::
development
file_format
diff --git a/docs/notifications.rst b/docs/notifications.rst
index e0430723..13dc7878 100644
--- a/docs/notifications.rst
+++ b/docs/notifications.rst
@@ -1,17 +1,17 @@
Notifications
=============
-You can receive notification from the controller allowing you to update your local data.
+Notifications can be received from the controller, they can be used to update your local data.
-Notifications endpoints
-***********************
+Notification endpoints
+**********************
-You can listen the HTTP stream /notifications or the websocket.
+Listen to the HTTP stream endpoint or to the Websocket endpoint.
* :doc:`api/v2/controller/project/projectsprojectidnotifications`
* :doc:`api/v2/controller/project/projectsprojectidnotificationsws`
-We recommend using the websocket.
+It is recommended to use the Websocket endpoint.
Available notifications
***********************
@@ -21,7 +21,7 @@ Available notifications
ping
----
-Keep the connection between client and controller.
+Keep-alive between client and controller. Also used to receive the current CPU and memory usage.
.. literalinclude:: api/notifications/ping.json
@@ -29,7 +29,7 @@ Keep the connection between client and controller.
compute.created
----------------
-Compute has been created.
+A compute has been created.
.. literalinclude:: api/notifications/compute.created.json
@@ -37,9 +37,7 @@ Compute has been created.
compute.updated
----------------
-Compute has been updated. You will receive a lot of this
-event because it's include change of CPU and memory usage
-on the compute node.
+A compute has been updated.
.. literalinclude:: api/notifications/compute.updated.json
@@ -47,7 +45,7 @@ on the compute node.
compute.deleted
---------------
-Compute has been deleted.
+A compute has been deleted.
.. literalinclude:: api/notifications/compute.deleted.json
@@ -55,7 +53,7 @@ Compute has been deleted.
node.created
------------
-Node has been created.
+A node has been created.
.. literalinclude:: api/notifications/node.created.json
@@ -63,7 +61,7 @@ Node has been created.
node.updated
------------
-Node has been updated.
+A node has been updated.
.. literalinclude:: api/notifications/node.updated.json
@@ -71,7 +69,7 @@ Node has been updated.
node.deleted
------------
-Node has been deleted.
+A node has been deleted.
.. literalinclude:: api/notifications/node.deleted.json
@@ -79,8 +77,8 @@ Node has been deleted.
link.created
------------
-Link has been created. Note that a link when created
-is not yet connected to both part.
+A link has been created. Note that a link is not connected
+to any node when it is created.
.. literalinclude:: api/notifications/link.created.json
@@ -88,7 +86,7 @@ is not yet connected to both part.
link.updated
------------
-Link has been updated.
+A link has been updated.
.. literalinclude:: api/notifications/link.updated.json
@@ -96,7 +94,7 @@ Link has been updated.
link.deleted
------------
-Link has been deleted.
+A link has been deleted.
.. literalinclude:: api/notifications/link.deleted.json
@@ -104,7 +102,7 @@ Link has been deleted.
drawing.created
---------------
-Drawing has been created.
+A drawing has been created.
.. literalinclude:: api/notifications/drawing.created.json
@@ -112,8 +110,8 @@ Drawing has been created.
drawing.updated
---------------
-Drawing has been updated. To reduce data transfert if the
-svg field has not change the field is not included.
+A drawing has been updated. The svg field is only included if it
+has changed in order to reduce data transfer.
.. literalinclude:: api/notifications/drawing.updated.json
@@ -121,7 +119,7 @@ svg field has not change the field is not included.
drawing.deleted
---------------
-Drawing has been deleted.
+A drawing has been deleted.
.. literalinclude:: api/notifications/drawing.deleted.json
@@ -129,7 +127,7 @@ Drawing has been deleted.
project.updated
---------------
-Project has been updated.
+A project has been updated.
.. literalinclude:: api/notifications/project.updated.json
@@ -137,7 +135,7 @@ Project has been updated.
project.closed
---------------
-Project has been closed.
+A project has been closed.
.. literalinclude:: api/notifications/project.closed.json
@@ -145,14 +143,14 @@ Project has been closed.
snapshot.restored
--------------------------
-Snapshot has been restored
+A snapshot has been restored
.. literalinclude:: api/notifications/project.snapshot_restored.json
log.error
---------
-Send an error to the user
+Sends an error
.. literalinclude:: api/notifications/log.error.json
@@ -160,7 +158,7 @@ Send an error to the user
log.warning
------------
-Send a warning to the user
+Sends a warning
.. literalinclude:: api/notifications/log.warning.json
@@ -168,7 +166,7 @@ Send a warning to the user
log.info
---------
-Send an information to the user
+Sends an information
.. literalinclude:: api/notifications/log.info.json
@@ -176,8 +174,6 @@ Send an information to the user
settings.updated
-----------------
-GUI settings updated. Will be removed in a later release.
+GUI settings have been updated. Will be removed in a later release.
.. literalinclude:: api/notifications/settings.updated.json
-
-
diff --git a/docs/position.rst b/docs/position.rst
index c3d6511d..dac20da6 100644
--- a/docs/position.rst
+++ b/docs/position.rst
@@ -1,7 +1,7 @@
Positions
=========
-In a the project object you have properties scene_height and scene_width this define the
-size of the drawing area as px.
+A project object contains the scene_height and scene_width properties. This defines the
+size of the drawing area in px.
-The position of the node are relative to this with 0,0 as center of the area.
+The position of the nodes are relative with 0,0 as center of the area.
diff --git a/gns3server/appliances/brocade-vtm.gns3a b/gns3server/appliances/brocade-vtm.gns3a
index 2bb2b143..71b849a0 100644
--- a/gns3server/appliances/brocade-vtm.gns3a
+++ b/gns3server/appliances/brocade-vtm.gns3a
@@ -25,6 +25,13 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "VirtualTrafficManager-174.qcow2",
+ "version": "17.4",
+ "md5sum": "3c44f385e5faf310ca8e3d46bf4e0564",
+ "filesize": 2036465664,
+ "download_url": "http://www1.brocade.com/forms/jsp/steelapp-traffic-manager-developer/index.jsp?src=WS&lsd=BRCD&lst=English&cn=PA-GDG-16Q1-EVAL-TrafficManagerDeveloper&intcmp=lp_en_vTMdeveloper_eval_bn_00001"
+ },
{
"filename": "VirtualTrafficManager-173.qcow2",
"version": "17.3",
@@ -76,6 +83,12 @@
}
],
"versions": [
+ {
+ "name": "17.4",
+ "images": {
+ "hda_disk_image": "VirtualTrafficManager-174.qcow2"
+ }
+ },
{
"name": "17.3",
"images": {
diff --git a/gns3server/appliances/centos7.gns3a b/gns3server/appliances/centos7.gns3a
index 6761fe70..8c80ac89 100644
--- a/gns3server/appliances/centos7.gns3a
+++ b/gns3server/appliances/centos7.gns3a
@@ -9,6 +9,7 @@
"product_url": "https://www.centos.org/download/",
"registry_version": 5,
"status": "stable",
+ "availability": "free",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Username: osboxes.org\nPassword: osboxes.org",
diff --git a/gns3server/appliances/checkpoint-gaia.gns3a b/gns3server/appliances/checkpoint-gaia.gns3a
index ddf8e58d..122e6fc3 100644
--- a/gns3server/appliances/checkpoint-gaia.gns3a
+++ b/gns3server/appliances/checkpoint-gaia.gns3a
@@ -6,15 +6,16 @@
"vendor_url": "https://www.checkpoint.com",
"documentation_url": "http://downloads.checkpoint.com/dc/download.htm?ID=26770",
"product_name": "Gaia",
- "registry_version": 3,
+ "registry_version": 4,
"status": "experimental",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "At boot choose the install on disk options. You need to open quickly the terminal after launching the appliance if you want to see the menu. You need a web browser in order to finalize the installation. You can use the firefox appliance for this.",
"qemu": {
+ "cpus": 2,
"adapter_type": "e1000",
"adapters": 8,
- "ram": 2048,
+ "ram": 4096,
"arch": "x86_64",
"console_type": "telnet",
"boot_priority": "dc",
@@ -44,33 +45,33 @@
"download_url": "https://supportcenter.checkpoint.com/supportcenter/portal?eventSubmit_doGoviewsolutiondetails=&solutionid=sk104859"
},
{
- "filename": "empty8G.qcow2",
+ "filename": "empty100G.qcow2",
"version": "1.0",
- "md5sum": "f1d2c25b6990f99bd05b433ab603bdb4",
+ "md5sum": "1e6409a4523ada212dea2ebc50e50a65",
"filesize": 197120,
"download_url": "https://sourceforge.net/projects/gns-3/files/Empty%20Qemu%20disk/",
- "direct_download_url": "https://sourceforge.net/projects/gns-3/files/Empty%20Qemu%20disk/empty8G.qcow2/download"
+ "direct_download_url": "https://sourceforge.net/projects/gns-3/files/Empty%20Qemu%20disk/empty100G.qcow2/download"
}
],
"versions": [
{
"name": "80.10",
"images": {
- "hda_disk_image": "empty8G.qcow2",
+ "hda_disk_image": "empty100G.qcow2",
"cdrom_image": "Check_Point_R80.10_T421_Gaia.iso"
}
},
{
"name": "77.30",
"images": {
- "hda_disk_image": "empty8G.qcow2",
+ "hda_disk_image": "empty100G.qcow2",
"cdrom_image": "Check_Point_R77.30_T204_Install_and_Upgrade.Gaia.iso"
}
},
{
"name": "77.20",
"images": {
- "hda_disk_image": "empty8G.qcow2",
+ "hda_disk_image": "empty100G.qcow2",
"cdrom_image": "Check_Point_R77.20_T124_Install.Gaia.iso"
}
}
diff --git a/gns3server/appliances/cisco-asav.gns3a b/gns3server/appliances/cisco-asav.gns3a
index 50d8b7b9..edf2c265 100644
--- a/gns3server/appliances/cisco-asav.gns3a
+++ b/gns3server/appliances/cisco-asav.gns3a
@@ -25,6 +25,13 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "asav991.qcow2",
+ "version": "9.9.1",
+ "md5sum": "179c52e62b417f38fe21ff1792b8cfc7",
+ "filesize": 200671232,
+ "download_url": "https://software.cisco.com/download/type.html?mdfid=286119613&flowid=50242"
+ },
{
"filename": "asav981-5.qcow2",
"version": "9.8.1-5",
@@ -97,6 +104,12 @@
}
],
"versions": [
+ {
+ "name": "9.9.1",
+ "images": {
+ "hda_disk_image": "asav991.qcow2"
+ }
+ },
{
"name": "9.8.1-5",
"images": {
diff --git a/gns3server/appliances/cisco-csr1000v.gns3a b/gns3server/appliances/cisco-csr1000v.gns3a
index bf98fc07..505cbcb8 100644
--- a/gns3server/appliances/cisco-csr1000v.gns3a
+++ b/gns3server/appliances/cisco-csr1000v.gns3a
@@ -25,23 +25,30 @@
{
"filename": "csr1000v-universalk9.16.07.01-serial.qcow2",
"version": "16.7.1",
- "md5sum": "13adbfc2586d06c9802b9805168c0c44",
- "filesize": 882769920,
- "download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&flowid=39582&softwareid=282046477&release=Fuji-16.7.1&relind=AVAILABLE&rellifecycle=ED&reltype=latest"
+ "md5sum": "bad9000d4ae8317bbc99a34a8cdd2eb4",
+ "filesize": 884539392,
+ "download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&flowid=39582&softwareid=282046477&release=Fuji-16.7.1"
+ },
+ {
+ "filename": "csr1000v-universalk9.16.06.02-serial.qcow2",
+ "version": "16.6.2",
+ "md5sum": "11e393b31ab9d1ace8e5f7551c491ba2",
+ "filesize": 1570242560,
+ "download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&flowid=39582&softwareid=282046477&release=Everest-16.6.2"
},
{
"filename": "csr1000v-universalk9.16.06.01-serial.qcow2",
"version": "16.6.1",
"md5sum": "909e74446d3ff0b82c14327c0058fdc2",
"filesize": 1566179328,
- "download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&flowid=39582&softwareid=282046477&release=Denali-16.3.5&relind=AVAILABLE&rellifecycle=ED&reltype=latest"
+ "download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&flowid=39582&softwareid=282046477&release=Everest-16.6.1"
},
{
"filename": "csr1000v-universalk9.16.05.02-serial.qcow2",
"version": "16.5.2",
"md5sum": "59a84da28d59ee75176aa05ecde7f72a",
"filesize": 1322385408,
- "download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&flowid=39582&softwareid=282046477&release=Denali-16.3.5&relind=AVAILABLE&rellifecycle=ED&reltype=latest"
+ "download_url": "https://software.cisco.com/download/release.html?mdfid=284364978&flowid=39582&softwareid=282046477&release=Everest-16.5.2"
},
{
"filename": "csr1000v-universalk9.16.5.1b-serial.qcow2",
@@ -93,6 +100,12 @@
"hda_disk_image": "csr1000v-universalk9.16.07.01-serial.qcow2"
}
},
+ {
+ "name": "16.6.2",
+ "images": {
+ "hda_disk_image": "csr1000v-universalk9.16.06.02-serial.qcow2"
+ }
+ },
{
"name": "16.6.1",
"images": {
diff --git a/gns3server/appliances/cisco-fmcv.gns3a b/gns3server/appliances/cisco-fmcv.gns3a
index 9510d6ef..4aa9bef2 100644
--- a/gns3server/appliances/cisco-fmcv.gns3a
+++ b/gns3server/appliances/cisco-fmcv.gns3a
@@ -9,6 +9,7 @@
"product_url": "http://www.cisco.com/c/en/us/td/docs/security/firepower/quick_start/kvm/fmcv-kvm-qsg.html",
"registry_version": 4,
"status": "experimental",
+ "availability": "service-contract",
"maintainer": "Community",
"maintainer_email":"",
"usage": "BE PATIENT\nOn first boot FMCv generates about 6GB of data. This can take 30 minutes or more. Plan on a long wait after the following line in the boot up:\n\n usbcore: registered new interface driver usb-storage\n\nInitial IP address: 192.168.45.45.\n\nDefault username/password: admin/Admin123.",
diff --git a/gns3server/appliances/cisco-ftdv.gns3a b/gns3server/appliances/cisco-ftdv.gns3a
index 617607bb..21b806e6 100644
--- a/gns3server/appliances/cisco-ftdv.gns3a
+++ b/gns3server/appliances/cisco-ftdv.gns3a
@@ -9,6 +9,7 @@
"product_url": "http://www.cisco.com/c/en/us/td/docs/security/firepower/quick_start/kvm/ftdv-kvm-qsg.html",
"registry_version": 4,
"status": "experimental",
+ "availability": "service-contract",
"maintainer": "Community",
"maintainer_email": "",
"usage": "Default username/password: admin/Admin123.",
diff --git a/gns3server/appliances/cisco-ngipsv.gns3a b/gns3server/appliances/cisco-ngipsv.gns3a
index 32679f5f..cefe1a8f 100644
--- a/gns3server/appliances/cisco-ngipsv.gns3a
+++ b/gns3server/appliances/cisco-ngipsv.gns3a
@@ -9,6 +9,7 @@
"product_url": "http://www.cisco.com/c/en/us/support/security/ngips-virtual-appliance/tsd-products-support-series-home.html",
"registry_version": 4,
"status": "experimental",
+ "availability": "service-contract",
"maintainer": "Community",
"maintainer_email": "",
"usage": "Default username/password: admin/Admin123.",
diff --git a/gns3server/appliances/cisco-nxosv9k.gns3a b/gns3server/appliances/cisco-nxosv9k.gns3a
index ddda402b..653f4920 100644
--- a/gns3server/appliances/cisco-nxosv9k.gns3a
+++ b/gns3server/appliances/cisco-nxosv9k.gns3a
@@ -25,6 +25,20 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "nxosv-final.7.0.3.I7.3.qcow2",
+ "version": "7.0.3.I7.3",
+ "md5sum": "9d7a20367bf681a239f14097bbce470a",
+ "filesize": 983629824,
+ "download_url": "https://software.cisco.com/download/"
+ },
+ {
+ "filename": "nxosv-final.7.0.3.I7.2.qcow2",
+ "version": "7.0.3.I7.2",
+ "md5sum": "17295efb13e83b24a439148449bfd5ab",
+ "filesize": 906231808,
+ "download_url": "https://software.cisco.com/download/"
+ },
{
"filename": "nxosv-final.7.0.3.I7.1.qcow2",
"version": "7.0.3.I7.1",
@@ -64,6 +78,20 @@
}
],
"versions": [
+ {
+ "name": "7.0.3.I7.3",
+ "images": {
+ "bios_image": "OVMF-20160813.fd",
+ "hda_disk_image": "nxosv-final.7.0.3.I7.3.qcow2"
+ }
+ },
+ {
+ "name": "7.0.3.I7.2",
+ "images": {
+ "bios_image": "OVMF-20160813.fd",
+ "hda_disk_image": "nxosv-final.7.0.3.I7.2.qcow2"
+ }
+ },
{
"name": "7.0.3.I7.1",
"images": {
diff --git a/gns3server/appliances/clearos.gns3a b/gns3server/appliances/clearos.gns3a
index b0be6314..25cd1876 100644
--- a/gns3server/appliances/clearos.gns3a
+++ b/gns3server/appliances/clearos.gns3a
@@ -22,6 +22,13 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "ClearOS-7.4-DVD-x86_64.iso",
+ "version": "7.4",
+ "md5sum": "826da592f9cd4b59f5fc996ff2d569f1",
+ "filesize": 1029701632,
+ "download_url": "https://www.clearos.com/clearfoundation/software/clearos-downloads"
+ },
{
"filename": "ClearOS-7.3-DVD-x86_64.iso",
"version": "7.3",
@@ -46,6 +53,13 @@
}
],
"versions": [
+ {
+ "name": "7.4",
+ "images": {
+ "hda_disk_image": "empty30G.qcow2",
+ "cdrom_image": "ClearOS-7.4-DVD-x86_64.iso"
+ }
+ },
{
"name": "7.3",
"images": {
diff --git a/gns3server/appliances/coreos.gns3a b/gns3server/appliances/coreos.gns3a
index d7ca0450..396ca39f 100644
--- a/gns3server/appliances/coreos.gns3a
+++ b/gns3server/appliances/coreos.gns3a
@@ -21,6 +21,24 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "coreos_production_qemu_image.1632.2.1.img",
+ "version": "1632.2.1",
+ "md5sum": "facd05ca85eb87e2dc6aefd6779f6806",
+ "filesize": 885719040,
+ "download_url": "http://stable.release.core-os.net/amd64-usr/1632.2.1/",
+ "direct_download_url": "http://stable.release.core-os.net/amd64-usr/1632.2.1/coreos_production_qemu_image.img.bz2",
+ "compression": "bzip2"
+ },
+ {
+ "filename": "coreos_production_qemu_image.1576.4.0.img",
+ "version": "1576.4.0",
+ "md5sum": "7d3c647807afe1f18fd0c76730e612b4",
+ "filesize": 849739776,
+ "download_url": "http://stable.release.core-os.net/amd64-usr/1576.4.0/",
+ "direct_download_url": "http://stable.release.core-os.net/amd64-usr/1576.4.0/coreos_production_qemu_image.img.bz2",
+ "compression": "bzip2"
+ },
{
"filename": "coreos_production_qemu_image.1520.8.0.img",
"version": "1520.8.0",
@@ -140,6 +158,18 @@
}
],
"versions": [
+ {
+ "name": "1632.2.1",
+ "images": {
+ "hda_disk_image": "coreos_production_qemu_image.1576.4.0.img"
+ }
+ },
+ {
+ "name": "1576.4.0",
+ "images": {
+ "hda_disk_image": "coreos_production_qemu_image.1576.4.0.img"
+ }
+ },
{
"name": "1520.8.0",
"images": {
diff --git a/gns3server/appliances/cumulus-vx.gns3a b/gns3server/appliances/cumulus-vx.gns3a
index 5f9f9b4a..42a3f9f3 100644
--- a/gns3server/appliances/cumulus-vx.gns3a
+++ b/gns3server/appliances/cumulus-vx.gns3a
@@ -23,6 +23,22 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "cumulus-linux-3.5.2-vx-amd64.qcow2",
+ "version": "3.5.2",
+ "md5sum": "87d1d8b297e5ebd77924669dfb7e4c9f",
+ "filesize": 996605952,
+ "download_url": "https://cumulusnetworks.com/cumulus-vx/download/",
+ "direct_download_url": "http://cumulusfiles.s3.amazonaws.com/cumulus-linux-3.5.0-vx-amd64.qcow2"
+ },
+ {
+ "filename": "cumulus-linux-3.5.0-vx-amd64.qcow2",
+ "version": "3.5.0",
+ "md5sum": "9ad1f352d0603becf4bcc749b77c99dd",
+ "filesize": 1044250624,
+ "download_url": "https://cumulusnetworks.com/cumulus-vx/download/",
+ "direct_download_url": "http://cumulusfiles.s3.amazonaws.com/cumulus-linux-3.5.0-vx-amd64.qcow2"
+ },
{
"filename": "cumulus-linux-3.4.3-vx-amd64.qcow2",
"version": "3.4.3",
@@ -125,6 +141,18 @@
}
],
"versions": [
+ {
+ "name": "3.5.2",
+ "images": {
+ "hda_disk_image": "cumulus-linux-3.5.2-vx-amd64.qcow2"
+ }
+ },
+ {
+ "name": "3.5.0",
+ "images": {
+ "hda_disk_image": "cumulus-linux-3.5.0-vx-amd64.qcow2"
+ }
+ },
{
"name": "3.4.3",
"images": {
diff --git a/gns3server/appliances/exos.gns3a b/gns3server/appliances/exos.gns3a
index 427732e8..3f09c669 100644
--- a/gns3server/appliances/exos.gns3a
+++ b/gns3server/appliances/exos.gns3a
@@ -26,6 +26,14 @@
"options": "-smp 2 -cpu core2duo"
},
"images": [
+ {
+ "filename": "exosvm-22.4.1.4.iso",
+ "version": "22.4.1.4",
+ "md5sum": "2134a511084519a5f8ad00a6f7cd71a9",
+ "filesize": 49993728,
+ "download_url": "https://github.com/extremenetworks/Virtual_EXOS",
+ "direct_download_url": "https://github.com/extremenetworks/Virtual_EXOS/raw/master/vm-22.4.1.4.iso"
+ },
{
"filename": "exosvm-22.2.1.5.iso",
"version": "22.2.1.5",
@@ -92,6 +100,13 @@
}
],
"versions": [
+ {
+ "name": "22.4.1.4",
+ "images": {
+ "hda_disk_image": "empty8G.qcow2",
+ "cdrom_image": "exosvm-22.4.1.4.iso"
+ }
+ },
{
"name": "22.2.1.5",
"images": {
diff --git a/gns3server/appliances/f5-bigip.gns3a b/gns3server/appliances/f5-bigip.gns3a
index a6db2c5b..863e1944 100644
--- a/gns3server/appliances/f5-bigip.gns3a
+++ b/gns3server/appliances/f5-bigip.gns3a
@@ -27,6 +27,20 @@
"options": "-smp 2 -cpu host"
},
"images": [
+ {
+ "filename": "BIGIP-13.1.0.2.0.0.6.qcow2",
+ "version": "13.1.0 HF2",
+ "md5sum": "d29eb861d8906fc36f88d9861a0055f4",
+ "filesize": 4363649024,
+ "download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-ip/big-ip_v13.x/13.1.0/english/13.1.0.2_virtual-edition/&sw=BIG-IP&pro=big-ip_v13.x&ver=13.1.0&container=13.1.0.2_Virtual-Edition&file=BIGIP-13.1.0.2.0.0.6.ALL.qcow2.zip"
+ },
+ {
+ "filename": "BIGIP-13.1.0.1.0.0.8.qcow2",
+ "version": "13.1.0 HF1",
+ "md5sum": "70f92192e66a82cb8f47bdae0cb267d8",
+ "filesize": 4352966656,
+ "download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-ip/big-ip_v13.x/13.1.0/english/13.1.0.1_virtual-edition/&sw=BIG-IP&pro=big-ip_v13.x&ver=13.1.0&container=13.1.0.1_Virtual-Edition&file=BIGIP-13.1.0.1.0.0.8.ALL.qcow2.zip"
+ },
{
"filename": "BIGIP-13.0.0.2.0.1671.qcow2",
"version": "13.0.0 HF2",
@@ -107,6 +121,20 @@
}
],
"versions": [
+ {
+ "name": "13.1.0 HF2",
+ "images": {
+ "hda_disk_image": "BIGIP-13.1.0.2.0.0.6.qcow2",
+ "hdb_disk_image": "empty100G.qcow2"
+ }
+ },
+ {
+ "name": "13.1.0 HF1",
+ "images": {
+ "hda_disk_image": "BIGIP-13.1.0.1.0.0.8.qcow2",
+ "hdb_disk_image": "empty100G.qcow2"
+ }
+ },
{
"name": "13.0.0 HF2",
"images": {
diff --git a/gns3server/appliances/f5-bigiq.gns3a b/gns3server/appliances/f5-bigiq.gns3a
index 1543ca4f..97050cf7 100644
--- a/gns3server/appliances/f5-bigiq.gns3a
+++ b/gns3server/appliances/f5-bigiq.gns3a
@@ -29,6 +29,13 @@
"options": "-smp 2 -cpu host"
},
"images": [
+ {
+ "filename": "BIG-IQ-5.4.0.0.0.7437.qcow2",
+ "version": "5.4.0",
+ "md5sum": "068b1f4d21048b9b2a082c0c27ef4d53",
+ "filesize": 3300917248,
+ "download_url": "https://downloads.f5.com/esd/serveDownload.jsp?path=/big-iq/big-iq_cm/5.4.0/english/v5.4.0/&sw=BIG-IQ&pro=big-iq_CM&ver=5.4.0&container=v5.4.0&file=BIG-IQ-5.4.0.0.0.7437.qcow2.zip"
+ },
{
"filename": "BIG-IQ-5.3.0.0.0.1119.qcow2",
"version": "5.3.0",
@@ -74,6 +81,13 @@
}
],
"versions": [
+ {
+ "name": "5.4.0",
+ "images": {
+ "hda_disk_image": "BIG-IQ-5.4.0.0.0.7437.qcow2",
+ "hdb_disk_image": "empty100G.qcow2"
+ }
+ },
{
"name": "5.3.0",
"images": {
diff --git a/gns3server/appliances/fortiadc.gns3a b/gns3server/appliances/fortiadc.gns3a
index b2b5004e..10f47817 100644
--- a/gns3server/appliances/fortiadc.gns3a
+++ b/gns3server/appliances/fortiadc.gns3a
@@ -34,6 +34,20 @@
"filesize": 30998528,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
+ {
+ "filename": "FAD_KVM-V400-build0989-FORTINET.out.kvm-boot.qcow2",
+ "version": "4.8.4",
+ "md5sum": "c1926d5979ef24d9d14d3394c0bb832b",
+ "filesize": 72810496,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
+ {
+ "filename": "FAD_KVM-V400-build0983-FORTINET.out.kvm-boot.qcow2",
+ "version": "4.8.3",
+ "md5sum": "d4cfc3b215780b2fb4c9d8f55208e8be",
+ "filesize": 72876032,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FAD_KVM-V400-build0977-FORTINET.out.kvm-boot.qcow2",
"version": "4.8.2",
@@ -141,6 +155,20 @@
}
],
"versions": [
+ {
+ "name": "4.8.4",
+ "images": {
+ "hda_disk_image": "FAD_KVM-V400-build0989-FORTINET.out.kvm-boot.qcow2",
+ "hdb_disk_image": "FAD_KVM-v400-FORTINET.out.kvm-data.qcow2"
+ }
+ },
+ {
+ "name": "4.8.3",
+ "images": {
+ "hda_disk_image": "FAD_KVM-V400-build0983-FORTINET.out.kvm-boot.qcow2",
+ "hdb_disk_image": "FAD_KVM-v400-FORTINET.out.kvm-data.qcow2"
+ }
+ },
{
"name": "4.8.2",
"images": {
diff --git a/gns3server/appliances/fortianalyzer.gns3a b/gns3server/appliances/fortianalyzer.gns3a
index cd44d55a..611dc6c1 100644
--- a/gns3server/appliances/fortianalyzer.gns3a
+++ b/gns3server/appliances/fortianalyzer.gns3a
@@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "FAZ_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
+ "version": "5.6.1",
+ "md5sum": "1bd94c920f8747de671832ef92e8dfbc",
+ "filesize": 105705472,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FAZ_VM64_KVM-v5-build1557-FORTINET.out.kvm.qcow2",
"version": "5.6.0",
@@ -106,6 +113,13 @@
}
],
"versions": [
+ {
+ "name": "5.6.1",
+ "images": {
+ "hda_disk_image": "FAZ_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
{
"name": "5.6.0",
"images": {
diff --git a/gns3server/appliances/fortiauthenticator.gns3a b/gns3server/appliances/fortiauthenticator.gns3a
index 204d6dfd..e9771cbc 100644
--- a/gns3server/appliances/fortiauthenticator.gns3a
+++ b/gns3server/appliances/fortiauthenticator.gns3a
@@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "FAC_VM_KVM-v5-build0155-FORTINET.out.kvm.qcow2",
+ "version": "5.2.0",
+ "md5sum": "69b55ce7c8094ccd736bbfe8a3262b31",
+ "filesize": 71782400,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FAC_VM_KVM-v500-build0091-FORTINET.out.kvm.qcow2",
"version": "5.1.2",
@@ -63,6 +70,13 @@
}
],
"versions": [
+ {
+ "name": "5.2.0",
+ "images": {
+ "hda_disk_image": "FAC_VM_KVM-v5-build0155-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "FAC_VM_KVM-v500-DATADRIVE.qcow2"
+ }
+ },
{
"name": "5.1.2",
"images": {
diff --git a/gns3server/appliances/forticache.gns3a b/gns3server/appliances/forticache.gns3a
index 4052c0f3..501ec3b0 100644
--- a/gns3server/appliances/forticache.gns3a
+++ b/gns3server/appliances/forticache.gns3a
@@ -26,6 +26,13 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "FCHKVM-v400-build0216-FORTINET.out.kvm.qcow2",
+ "version": "4.2.6",
+ "md5sum": "867e0569b8466db744547422a1d6f17a",
+ "filesize": 27553792,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FCHKVM-v400-build0213-FORTINET.out.kvm.qcow2",
"version": "4.2.5",
@@ -92,6 +99,13 @@
}
],
"versions": [
+ {
+ "name": "4.2.6",
+ "images": {
+ "hda_disk_image": "FCHKVM-v400-build0216-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty100G.qcow2"
+ }
+ },
{
"name": "4.2.5",
"images": {
diff --git a/gns3server/appliances/fortigate.gns3a b/gns3server/appliances/fortigate.gns3a
index 739132a7..caca6d17 100644
--- a/gns3server/appliances/fortigate.gns3a
+++ b/gns3server/appliances/fortigate.gns3a
@@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "FGT_VM64_KVM-v5-build1547-FORTINET.out.kvm.qcow2",
+ "version": "5.6.3",
+ "md5sum": "a908f8620e8bbccce8794733f3637e13",
+ "filesize": 40939520,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FGT_VM64_KVM-v5-build1486-FORTINET.out.kvm.qcow2",
"version": "5.6.2",
@@ -47,6 +54,20 @@
"filesize": 38760448,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
+ {
+ "filename": "FGT_VM64_KVM-v5-build1183-FORTINET.out.kvm.qcow2",
+ "version": "5.4.8",
+ "md5sum": "c1eb02996a0919c934785d5f48df9507",
+ "filesize": 38608896,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
+ {
+ "filename": "FGT_VM64_KVM-v5-build6446-FORTINET.out.kvm.qcow2",
+ "version": "5.4.7",
+ "md5sum": "17d3dfebd4b222569cf10cfab83e0e56",
+ "filesize": 38715392,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FGT_VM64_KVM-v5-build1165-FORTINET.out.kvm.qcow2",
"version": "5.4.6",
@@ -96,6 +117,20 @@
"filesize": 35373056,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
+ {
+ "filename": "FGT_VM64_KVM-v5-build0762-FORTINET.out.kvm.qcow2",
+ "version": "5.2.13",
+ "md5sum": "78df232e516a863f233de88ffba5bc4b",
+ "filesize": 38776832,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
+ {
+ "filename": "FGT_VM64_KVM-v5-build0760-FORTINET.out.kvm.qcow2",
+ "version": "5.2.12",
+ "md5sum": "2efa0c110abed83b71927145d1e87805",
+ "filesize": 38363136,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FGT_VM64_KVM-v5-build0754-FORTINET.out.kvm.qcow2",
"version": "5.2.11",
@@ -148,6 +183,13 @@
}
],
"versions": [
+ {
+ "name": "5.6.3",
+ "images": {
+ "hda_disk_image": "FGT_VM64_KVM-v5-build1547-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
{
"name": "5.6.2",
"images": {
@@ -169,6 +211,20 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
+ {
+ "name": "5.4.8",
+ "images": {
+ "hda_disk_image": "FGT_VM64_KVM-v5-build1183-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
+ {
+ "name": "5.4.7",
+ "images": {
+ "hda_disk_image": "FGT_VM64_KVM-v5-build6446-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
{
"name": "5.4.6",
"images": {
@@ -218,6 +274,20 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
+ {
+ "name": "5.2.13",
+ "images": {
+ "hda_disk_image": "FGT_VM64_KVM-v5-build0762-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
+ {
+ "name": "5.2.12",
+ "images": {
+ "hda_disk_image": "FGT_VM64_KVM-v5-build0760-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
{
"name": "5.2.11",
"images": {
diff --git a/gns3server/appliances/fortimail.gns3a b/gns3server/appliances/fortimail.gns3a
index a8c755d7..d52bb26f 100644
--- a/gns3server/appliances/fortimail.gns3a
+++ b/gns3server/appliances/fortimail.gns3a
@@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "FML_VMKV-64-v54-build0712-FORTINET.out.kvm.qcow2",
+ "version": "5.4.3",
+ "md5sum": "977effe7b885ca5cedec7740a2a637aa",
+ "filesize": 93454336,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FML_VMKV-64-v54-build0707-FORTINET.out.kvm.qcow2",
"version": "5.4.2",
@@ -127,6 +134,13 @@
}
],
"versions": [
+ {
+ "name": "5.4.3",
+ "images": {
+ "hda_disk_image": "FML_VMKV-64-v54-build0712-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
{
"name": "5.4.2",
"images": {
diff --git a/gns3server/appliances/fortimanager.gns3a b/gns3server/appliances/fortimanager.gns3a
index f4642202..6b809983 100644
--- a/gns3server/appliances/fortimanager.gns3a
+++ b/gns3server/appliances/fortimanager.gns3a
@@ -26,6 +26,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "FMG_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
+ "version": "5.6.1",
+ "md5sum": "8cc553842564d232af295d6a0c784c1f",
+ "filesize": 106831872,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FMG_VM64_KVM-v5-build1557-FORTINET.out.kvm.qcow2",
"version": "5.6.0",
@@ -106,6 +113,13 @@
}
],
"versions": [
+ {
+ "name": "5.6.1",
+ "images": {
+ "hda_disk_image": "FMG_VM64_KVM-v5-build1619-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "empty30G.qcow2"
+ }
+ },
{
"name": "5.6.0",
"images": {
diff --git a/gns3server/appliances/fortisandbox.gns3a b/gns3server/appliances/fortisandbox.gns3a
index 5382e406..d7d702f4 100644
--- a/gns3server/appliances/fortisandbox.gns3a
+++ b/gns3server/appliances/fortisandbox.gns3a
@@ -27,6 +27,13 @@
"options": "-smp 2"
},
"images": [
+ {
+ "filename": "FSA_KVM-v200-build0329-FORTINET.out.kvm.qcow2",
+ "version": "2.5.1",
+ "md5sum": "782ba56a644d78da59b89f4ac91bd319",
+ "filesize": 114491904,
+ "download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
+ },
{
"filename": "FSA_KVM-v200-build0261-FORTINET.out.kvm.qcow2",
"version": "2.4.1",
@@ -71,6 +78,13 @@
}
],
"versions": [
+ {
+ "name": "2.5.1",
+ "images": {
+ "hda_disk_image": "FSA_KVM-v200-build0329-FORTINET.out.kvm.qcow2",
+ "hdb_disk_image": "FSA_v200-datadrive.qcow2"
+ }
+ },
{
"name": "2.4.1",
"images": {
diff --git a/gns3server/appliances/freenas.gns3a b/gns3server/appliances/freenas.gns3a
index e71c00ff..e92a05ee 100644
--- a/gns3server/appliances/freenas.gns3a
+++ b/gns3server/appliances/freenas.gns3a
@@ -15,7 +15,7 @@
"qemu": {
"adapter_type": "e1000",
"adapters": 1,
- "ram": 8096,
+ "ram": 8192,
"hda_disk_interface": "ide",
"hdb_disk_interface": "ide",
"arch": "x86_64",
@@ -24,9 +24,25 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "FreeNAS-11.1-U1.iso",
+ "version": "11.1 U1",
+ "md5sum": "ccbd9990a5878d35c6bc0cc6eea34b16",
+ "filesize": 626601984,
+ "download_url": "http://www.freenas.org/download/",
+ "direct_download_url": "http://download.freenas.org/11/11.1-RELEASE/x64/FreeNAS-11.1-RELEASE.iso"
+ },
+ {
+ "filename": "FreeNAS-11.1-RELEASE.iso",
+ "version": "11.1",
+ "md5sum": "67bea5816bc889169e5e3054362b2053",
+ "filesize": 626761728,
+ "download_url": "http://www.freenas.org/download/",
+ "direct_download_url": "http://download.freenas.org/11/11.1-RELEASE/x64/FreeNAS-11.1-RELEASE.iso"
+ },
{
"filename": "FreeNAS-11.0-U4.iso",
- "version": "11.0-U4",
+ "version": "11.0 U4",
"md5sum": "4c210f1a6510d1fa95257d81ef569ff8",
"filesize": 567312384,
"download_url": "http://www.freenas.org/download/",
@@ -34,7 +50,7 @@
},
{
"filename": "FreeNAS-9.10.1-U4.iso",
- "version": "9.10",
+ "version": "9.10 U4",
"md5sum": "b4fb14513dcbb4eb4c5596c5911ca9cc",
"filesize": 533098496,
"download_url": "http://www.freenas.org/download/",
@@ -51,7 +67,23 @@
],
"versions": [
{
- "name": "11.0",
+ "name": "11.1 U1",
+ "images": {
+ "hda_disk_image": "empty30G.qcow2",
+ "hdb_disk_image": "empty30G.qcow2",
+ "cdrom_image": "FreeNAS-11.1-U1.iso"
+ }
+ },
+ {
+ "name": "11.1",
+ "images": {
+ "hda_disk_image": "empty30G.qcow2",
+ "hdb_disk_image": "empty30G.qcow2",
+ "cdrom_image": "FreeNAS-11.1-RELEASE.iso"
+ }
+ },
+ {
+ "name": "11.0 U4",
"images": {
"hda_disk_image": "empty30G.qcow2",
"hdb_disk_image": "empty30G.qcow2",
@@ -59,7 +91,7 @@
}
},
{
- "name": "9.10",
+ "name": "9.10 U4",
"images": {
"hda_disk_image": "empty30G.qcow2",
"hdb_disk_image": "empty30G.qcow2",
diff --git a/gns3server/appliances/ipfire.gns3a b/gns3server/appliances/ipfire.gns3a
index 656333e7..3f671dc4 100644
--- a/gns3server/appliances/ipfire.gns3a
+++ b/gns3server/appliances/ipfire.gns3a
@@ -24,6 +24,15 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "ipfire-2.19.1gb-ext4-scon.x86_64-full-core117.img",
+ "version": "2.19.117",
+ "md5sum": "657673d88b94ed7d22332aebe817bc86",
+ "filesize": 1063256064,
+ "download_url": "http://www.ipfire.org/download",
+ "direct_download_url": "https://downloads.ipfire.org/releases/ipfire-2.x/2.19-core117/ipfire-2.19.1gb-ext4-scon.x86_64-full-core117.img.gz",
+ "compression": "gzip"
+ },
{
"filename": "ipfire-2.19.1gb-ext4-scon.x86_64-full-core116.img",
"version": "2.19.116",
@@ -53,6 +62,12 @@
}
],
"versions": [
+ {
+ "name": "2.19.117",
+ "images": {
+ "hda_disk_image": "ipfire-2.19.1gb-ext4-scon.x86_64-full-core117.img"
+ }
+ },
{
"name": "2.19.116",
"images": {
diff --git a/gns3server/appliances/juniper-junos-space.gns3a b/gns3server/appliances/juniper-junos-space.gns3a
new file mode 100644
index 00000000..6df47223
--- /dev/null
+++ b/gns3server/appliances/juniper-junos-space.gns3a
@@ -0,0 +1,43 @@
+{
+ "name": "Junos Space",
+ "category": "guest",
+ "description": "Junos Space Network Management Platform works with Juniper's management applications to simplify and automate management of Juniper's switching, routing, and security devices. As part of a complete solution, the platform provides broad fault, configuration, accounting, performance, and security management (FCAPS) capability, same day support for new devices and Junos OS releases, a task-specific user interface, and northbound APIs for integration with existing network management systems (NMS) or operations/business support systems (OSS/BSS).\n\nThe platform helps network operators at enterprises and service providers scale operations, reduce complexity, and enable new applications and services to be brought to market quickly, through multilayered network abstractions, operator-centric automation schemes, and a simple point-and-click UI.",
+ "vendor_name": "Juniper",
+ "vendor_url": "https://www.juniper.net/us/en/",
+ "documentation_url": "http://www.juniper.net/techpubs/",
+ "product_name": "Junos Space",
+ "product_url": "https://www.juniper.net/us/en/dm/free-vqfx-trial/",
+ "registry_version": 3,
+ "status": "stable",
+ "maintainer": "GNS3 Team",
+ "maintainer_email": "developers@gns3.net",
+ "symbol": "juniper-vqfx.svg",
+ "usage": "16 GB RAM is the bare minimum; you should use 32/64 GB in production deplyments.\nDefault credentials:\n- CLI: admin / abc123\n- WebUI: super / juniper123",
+ "port_name_format": "em{0}",
+ "qemu": {
+ "adapter_type": "e1000",
+ "adapters": 4,
+ "ram": 16384,
+ "arch": "x86_64",
+ "console_type": "telnet",
+ "kvm": "require",
+ "options": "-smp 4 -nographic"
+ },
+ "images": [
+ {
+ "filename": "space-17.2R1.4.qcow2",
+ "version": "17.2R1.4",
+ "md5sum": "4124fa756c3a78be0619e876b8ee687e",
+ "filesize": 5150474240,
+ "download_url": "https://www.juniper.net/support/downloads/?p=space#sw"
+ }
+ ],
+ "versions": [
+ {
+ "name": "17.2R1.4",
+ "images": {
+ "hda_disk_image": "space-17.2R1.4.qcow2"
+ }
+ }
+ ]
+}
diff --git a/gns3server/appliances/juniper-vsrx.gns3a b/gns3server/appliances/juniper-vsrx.gns3a
index 7f2ad9d9..002b9bd9 100644
--- a/gns3server/appliances/juniper-vsrx.gns3a
+++ b/gns3server/appliances/juniper-vsrx.gns3a
@@ -23,6 +23,13 @@
"options": "-smp 2"
},
"images": [
+ {
+ "filename": "media-vsrx-vmdisk-17.4R1.16.qcow2",
+ "version": "17.4R1",
+ "md5sum": "616c4742b09652318c73a7cc598468e7",
+ "filesize": 3965386752,
+ "download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
+ },
{
"filename": "media-vsrx-vmdisk-17.3R1.10.qcow2",
"version": "17.3R1",
@@ -30,6 +37,13 @@
"filesize": 3782541312,
"download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
},
+ {
+ "filename": "media-vsrx-vmdisk-15.1X49-D120.3.qcow2",
+ "version": "15.1X49-D120",
+ "md5sum": "02cf4df3dc988a407ccd5ddc30ee5385",
+ "filesize": 3280273408,
+ "download_url": "https://www.juniper.net/us/en/dm/free-vsrx-trial/"
+ },
{
"filename": "media-vsrx-vmdisk-15.1X49-D110.4.qcow2",
"version": "15.1X49-D110",
@@ -109,12 +123,24 @@
}
],
"versions": [
+ {
+ "name": "17.4R1",
+ "images": {
+ "hda_disk_image": "media-vsrx-vmdisk-17.4R1.16.qcow2"
+ }
+ },
{
"name": "17.3R1",
"images": {
"hda_disk_image": "media-vsrx-vmdisk-17.3R1.10.qcow2"
}
},
+ {
+ "name": "15.1X49-D120",
+ "images": {
+ "hda_disk_image": "media-vsrx-vmdisk-15.1X49-D120.3.qcow2"
+ }
+ },
{
"name": "15.1X49-D110",
"images": {
diff --git a/gns3server/appliances/kali-linux.gns3a b/gns3server/appliances/kali-linux.gns3a
index 4294010c..78732d07 100644
--- a/gns3server/appliances/kali-linux.gns3a
+++ b/gns3server/appliances/kali-linux.gns3a
@@ -20,6 +20,14 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "kali-linux-2017.3-amd64.iso",
+ "version": "2017.3",
+ "md5sum": "b465580c897e94675ac1daf031fa66b9",
+ "filesize": 2886402048,
+ "download_url": "http://cdimage.kali.org/kali-2017.3/",
+ "direct_download_url": "http://cdimage.kali.org/kali-2017.3/kali-linux-2017.3-amd64.iso"
+ },
{
"filename": "kali-linux-2017.2-amd64.iso",
"version": "2017.2",
@@ -62,6 +70,12 @@
}
],
"versions": [
+ {
+ "name": "2017.3",
+ "images": {
+ "cdrom_image": "kali-linux-2017.3-amd64.iso"
+ }
+ },
{
"name": "2017.2",
"images": {
diff --git a/gns3server/appliances/lede.gns3a b/gns3server/appliances/lede.gns3a
index 63c024e9..74c388a4 100644
--- a/gns3server/appliances/lede.gns3a
+++ b/gns3server/appliances/lede.gns3a
@@ -21,6 +21,22 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "lede-17.01.4-x86-generic-combined-squashfs.img",
+ "version": "17.01.4",
+ "md5sum": "ae5d8d3fcab109565fe337d28e51c4b4",
+ "filesize": 19779546,
+ "download_url": "https://downloads.lede-project.org/releases/17.01.4/targets/x86/generic/",
+ "direct_download_url": "https://downloads.lede-project.org/releases/17.01.4/targets/x86/generic/lede-17.01.4-x86-generic-combined-squashfs.img"
+ },
+ {
+ "filename": "lede-17.01.3-x86-generic-combined-squashfs.img",
+ "version": "17.01.3",
+ "md5sum": "d315fc638160a9aec0966d58828bfccf",
+ "filesize": 19775618,
+ "download_url": "https://downloads.lede-project.org/releases/17.01.3/targets/x86/generic/",
+ "direct_download_url": "https://downloads.lede-project.org/releases/17.01.3/targets/x86/generic/lede-17.01.3-x86-generic-combined-squashfs.img"
+ },
{
"filename": "lede-17.01.2-x86-generic-combined-squashfs.img",
"version": "17.01.2",
@@ -47,6 +63,18 @@
}
],
"versions": [
+ {
+ "name": "lede 17.01.4",
+ "images": {
+ "hda_disk_image": "lede-17.01.4-x86-generic-combined-squashfs.img"
+ }
+ },
+ {
+ "name": "lede 17.01.3",
+ "images": {
+ "hda_disk_image": "lede-17.01.3-x86-generic-combined-squashfs.img"
+ }
+ },
{
"name": "lede 17.01.2",
"images": {
diff --git a/gns3server/appliances/opensuse.gns3a b/gns3server/appliances/opensuse.gns3a
index 508dfbc7..6abdd916 100644
--- a/gns3server/appliances/opensuse.gns3a
+++ b/gns3server/appliances/opensuse.gns3a
@@ -9,6 +9,7 @@
"product_url": "https://www.opensuse.org/#Leap",
"registry_version": 4,
"status": "stable",
+ "availability": "free",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "Username: osboxes\nPassword: osboxes.org\n\nroot password: osboxes.org",
diff --git a/gns3server/appliances/ostinato.gns3a b/gns3server/appliances/ostinato.gns3a
index 8f1ca322..f65e24f1 100644
--- a/gns3server/appliances/ostinato.gns3a
+++ b/gns3server/appliances/ostinato.gns3a
@@ -25,12 +25,12 @@
},
"images": [
{
- "filename": "ostinato-0.8-97c7d79.qcow2",
- "version": "0.8-97c7d79",
- "md5sum": "5aad15c1eb7baac588a4c8c3faafa380",
- "filesize": 98631680,
+ "filename": "ostinato-0.9-1.qcow2",
+ "version": "0.9",
+ "md5sum": "00b4856ec9fffbcbcab7a8f757355d69",
+ "filesize": 101646336,
"download_url": "http://www.bernhard-ehlers.de/projects/ostinato4gns3/index.html",
- "direct_download_url": "http://www.bernhard-ehlers.de/projects/ostinato4gns3/ostinato-0.8-97c7d79.qcow2"
+ "direct_download_url": "http://www.bernhard-ehlers.de/projects/ostinato4gns3/ostinato-0.9-1.qcow2"
},
{
"filename": "ostinato-0.8-1.qcow2",
@@ -43,9 +43,9 @@
],
"versions": [
{
- "name": "0.8-97c7d79",
+ "name": "0.9",
"images": {
- "hda_disk_image": "ostinato-0.8-97c7d79.qcow2"
+ "hda_disk_image": "ostinato-0.9-1.qcow2"
}
},
{
diff --git a/gns3server/appliances/packetfence-zen.gns3a b/gns3server/appliances/packetfence-zen.gns3a
index 7b41fcb6..efc68820 100644
--- a/gns3server/appliances/packetfence-zen.gns3a
+++ b/gns3server/appliances/packetfence-zen.gns3a
@@ -22,6 +22,15 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "PacketFenceZEN_USB-7.4.0.img",
+ "version": "7.4.0",
+ "md5sum": "83951211540f16dd5813c26955c52429",
+ "filesize": 3221225472,
+ "download_url": "https://packetfence.org/download.html#/zen",
+ "direct_download_url": "https://sourceforge.net/projects/packetfence/files/PacketFence%20ZEN/7.4.0/PacketFenceZEN_USB-7.4.0.tar.bz2/download",
+ "compression": "bzip2"
+ },
{
"filename": "PacketFenceZEN_USB-7.3.0.img",
"version": "7.3.0",
@@ -96,6 +105,12 @@
}
],
"versions": [
+ {
+ "name": "7.4.0",
+ "images": {
+ "hda_disk_image": "PacketFenceZEN_USB-7.4.0.img"
+ }
+ },
{
"name": "7.3.0",
"images": {
diff --git a/gns3server/appliances/sophos-utm.gns3a b/gns3server/appliances/sophos-utm.gns3a
index 8d9af0a2..30584d40 100644
--- a/gns3server/appliances/sophos-utm.gns3a
+++ b/gns3server/appliances/sophos-utm.gns3a
@@ -24,6 +24,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "asg-9.506-2.1.iso",
+ "version": "9.506-2.1",
+ "md5sum": "6b4374f8c5ee66ccdf9683f7349f59cb",
+ "filesize": 1006057472,
+ "download_url": "https://www.sophos.com/en-us/support/utm-downloads.aspx"
+ },
{
"filename": "asg-9.500-9.1.iso",
"version": "9.500-9.1",
@@ -31,6 +38,13 @@
"filesize": 981612544,
"download_url": "https://www.sophos.com/en-us/support/utm-downloads.aspx"
},
+ {
+ "filename": "asg-9.415-1.1.iso",
+ "version": "9.415-1.1",
+ "md5sum": "505004bf5a5d5f2234b2056ec7b553d8",
+ "filesize": 961087488,
+ "download_url": "https://www.sophos.com/en-us/support/utm-downloads.aspx"
+ },
{
"filename": "asg-9.413-4.1.iso",
"version": "9.413-4.1",
@@ -125,6 +139,13 @@
}
],
"versions": [
+ {
+ "name": "9.506-2.1",
+ "images": {
+ "hda_disk_image": "empty30G.qcow2",
+ "cdrom_image": "asg-9.506-2.1.iso"
+ }
+ },
{
"name": "9.500-9.1",
"images": {
@@ -132,6 +153,13 @@
"cdrom_image": "asg-9.500-9.1.iso"
}
},
+ {
+ "name": "9.415-1.1",
+ "images": {
+ "hda_disk_image": "empty30G.qcow2",
+ "cdrom_image": "asg-9.415-1.1.iso"
+ }
+ },
{
"name": "9.413-4.1",
"images": {
diff --git a/gns3server/appliances/sophos-xg.gns3a b/gns3server/appliances/sophos-xg.gns3a
index 30674902..d190ee48 100644
--- a/gns3server/appliances/sophos-xg.gns3a
+++ b/gns3server/appliances/sophos-xg.gns3a
@@ -23,6 +23,20 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "VI-SFOS_17.0.2_MR-2.KVM-116-PRIMARY.qcow2",
+ "version": "17.0.2 MR2",
+ "md5sum": "2555fa6dcdcecad02c9f02dcb1c0c5e5",
+ "filesize": 324599808,
+ "download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
+ },
+ {
+ "filename": "VI-SFOS_17.0.2_MR-2.KVM-116-AUXILARY.qcow2",
+ "version": "16.05.1 MR1",
+ "md5sum": "c3ef795423dbfc01771348b0daa75125",
+ "filesize": 59441152,
+ "download_url": "https://secure2.sophos.com/en-us/products/next-gen-firewall/free-trial.aspx"
+ },
{
"filename": "VI-SFOS_16.05.4_MR-4.KVM-215-PRIMARY.qcow2",
"version": "16.05.4 MR4",
@@ -95,6 +109,13 @@
}
],
"versions": [
+ {
+ "name": "17.0.2 MR2",
+ "images": {
+ "hda_disk_image": "VI-SFOS_17.0.2_MR-2.KVM-116-PRIMARY.qcow2",
+ "hdb_disk_image": "VI-SFOS_17.0.2_MR-2.KVM-116-AUXILARY.qcow2"
+ }
+ },
{
"name": "16.05.4 MR4",
"images": {
diff --git a/gns3server/appliances/ubuntu-cloud.gns3a b/gns3server/appliances/ubuntu-cloud.gns3a
new file mode 100644
index 00000000..e8bb8040
--- /dev/null
+++ b/gns3server/appliances/ubuntu-cloud.gns3a
@@ -0,0 +1,94 @@
+{
+ "name": "Ubuntu Cloud Guest",
+ "category": "guest",
+ "description": "The term 'Ubuntu Cloud Guest' refers to the Official Ubuntu images that are available at http://cloud-images.ubuntu.com . These images are built by Canonical. They are then registered on EC2, and compressed tarfiles are made also available for download. For using those images on a public cloud such as Amazon EC2, you simply choose an image and launch it. To use those images on a private cloud, or to run the image on a local hypervisor (such as KVM) you would need to download those images and either publish them to your private cloud, or launch them directly on a hypervisor. The following sections explain in more details how to perform each of those actions",
+ "vendor_name": "Canonical Inc.",
+ "vendor_url": "https://www.ubuntu.com",
+ "documentation_url": "https://help.ubuntu.com/community/UEC/Images",
+ "product_name": "Ubuntu Cloud Guest",
+ "product_url": "https://www.ubuntu.com/cloud",
+ "registry_version": 3,
+ "status": "stable",
+ "maintainer": "GNS3 Team",
+ "maintainer_email": "developers@gns3.net",
+ "usage": "Username: ubuntu\nPassword: ubuntu",
+ "port_name_format": "Ethernet{0}",
+ "qemu": {
+ "adapter_type": "virtio-net-pci",
+ "adapters": 1,
+ "ram": 1024,
+ "hda_disk_interface": "virtio",
+ "arch": "x86_64",
+ "console_type": "telnet",
+ "boot_priority": "c",
+ "kvm": "require",
+ "options": "-nographic"
+ },
+ "images": [
+ {
+ "filename": "ubuntu-17.10-server-cloudimg-amd64.img",
+ "version": "17.10",
+ "md5sum": "5d221878d8b2e49c5de7ebb58a2b35e3",
+ "filesize": 318373888,
+ "download_url": "https://cloud-images.ubuntu.com/releases/17.10/release/"
+ },
+ {
+ "filename": "ubuntu-17.04-server-cloudimg-amd64.img",
+ "version": "17.04",
+ "md5sum": "d4da8157dbf2e64f2fa1fb5d121398e5",
+ "filesize": 351993856,
+ "download_url": "https://cloud-images.ubuntu.com/releases/17.04/release/"
+ },
+ {
+ "filename": "ubuntu-16.04-server-cloudimg-amd64-disk1.img",
+ "version": "16.04.3",
+ "md5sum": "bd0c168a83b1f483bd240b3d874edd6c",
+ "filesize": 288686080,
+ "download_url": "https://cloud-images.ubuntu.com/releases/16.04/release/"
+ },
+ {
+ "filename": "ubuntu-14.04-server-cloudimg-amd64-disk1.img",
+ "version": "14.04.5",
+ "md5sum": "d7b4112c7d797e5e77ef9995d06a76f1",
+ "filesize": 262406656,
+ "download_url": "https://cloud-images.ubuntu.com/releases/14.04/release/"
+ },
+ {
+ "filename": "ubuntu-cloud-init-data.iso",
+ "version": "1.0",
+ "md5sum": "328469100156ae8dbf262daa319c27ff",
+ "filesize": 131072,
+ "download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/ubuntu-cloud-init-data.iso/download"
+ }
+ ],
+ "versions": [
+ {
+ "name": "17.10",
+ "images": {
+ "hda_disk_image": "ubuntu-17.10-server-cloudimg-amd64.img",
+ "cdrom_image": "ubuntu-cloud-init-data.iso"
+ }
+ },
+ {
+ "name": "17.04",
+ "images": {
+ "hda_disk_image": "ubuntu-17.04-server-cloudimg-amd64.img",
+ "cdrom_image": "ubuntu-cloud-init-data.iso"
+ }
+ },
+ {
+ "name": "16.04 (LTS)",
+ "images": {
+ "hda_disk_image": "ubuntu-16.04-server-cloudimg-amd64-disk1.img",
+ "cdrom_image": "ubuntu-cloud-init-data.iso"
+ }
+ },
+ {
+ "name": "14.04 (LTS)",
+ "images": {
+ "hda_disk_image": "ubuntu-14.04-server-cloudimg-amd64-disk1.img",
+ "cdrom_image": "ubuntu-cloud-init-data.iso"
+ }
+ }
+ ]
+}
diff --git a/gns3server/appliances/ubuntu.gns3a b/gns3server/appliances/ubuntu-docker.gns3a
similarity index 95%
rename from gns3server/appliances/ubuntu.gns3a
rename to gns3server/appliances/ubuntu-docker.gns3a
index b64737fb..9a9bbc20 100644
--- a/gns3server/appliances/ubuntu.gns3a
+++ b/gns3server/appliances/ubuntu-docker.gns3a
@@ -1,5 +1,5 @@
{
- "name": "Ubuntu",
+ "name": "Ubuntu Docker Guest",
"category": "guest",
"description": "Ubuntu is a Debian-based Linux operating system, with Unity as its default desktop environment. It is based on free software and named after the Southern African philosophy of ubuntu (literally, \"human-ness\"), which often is translated as \"humanity towards others\" or \"the belief in a universal bond of sharing that connects all humanity\".",
"vendor_name": "Canonical",
diff --git a/gns3server/appliances/ubuntu-gui.gns3a b/gns3server/appliances/ubuntu-gui.gns3a
index 259dbad8..410abe1e 100644
--- a/gns3server/appliances/ubuntu-gui.gns3a
+++ b/gns3server/appliances/ubuntu-gui.gns3a
@@ -1,5 +1,5 @@
{
- "name": "Ubuntu",
+ "name": "Ubuntu Desktop Guest",
"category": "guest",
"description": "Ubuntu is a full-featured Linux operating system which is based on Debian distribution and freely available with both community and professional support, it comes with Unity as its default desktop environment. There are other flavors of Ubuntu available with other desktops as default like Ubuntu Gnome, Lubuntu, Xubuntu, and so on. A tightly-integrated selection of excellent applications is included, and an incredible variety of add-on software is just a few clicks away. A default installation of Ubuntu contains a wide range of software that includes LibreOffice, Firefox, Empathy, Transmission, etc.",
"vendor_name": "Canonical Inc.",
diff --git a/gns3server/appliances/untangle.gns3a b/gns3server/appliances/untangle.gns3a
index 3bc88d75..6c26cac7 100644
--- a/gns3server/appliances/untangle.gns3a
+++ b/gns3server/appliances/untangle.gns3a
@@ -24,6 +24,13 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "untangle_1320_x64.iso",
+ "version": "13.2.0",
+ "md5sum": "0ce2293acec0f37f1339e703653727f8",
+ "filesize": 768000000,
+ "download_url": "https://www.untangle.com/get-untangle/"
+ },
{
"filename": "untangle_1310_x64.iso",
"version": "13.1.0",
@@ -90,6 +97,13 @@
}
],
"versions": [
+ {
+ "name": "13.2.0",
+ "images": {
+ "hda_disk_image": "empty30G.qcow2",
+ "cdrom_image": "untangle_1320_x64.iso"
+ }
+ },
{
"name": "13.1.0",
"images": {
diff --git a/gns3server/appliances/vyos.gns3a b/gns3server/appliances/vyos.gns3a
index 6a7994a6..fc68dedd 100644
--- a/gns3server/appliances/vyos.gns3a
+++ b/gns3server/appliances/vyos.gns3a
@@ -31,29 +31,37 @@
"download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/",
"direct_download_url": "http://dev.packages.vyos.net/iso/preview/1.2.0-beta1/vyos-1.2.0-beta1-amd64.iso"
},
+ {
+ "filename": "vyos-1.1.8-amd64.iso",
+ "version": "1.1.8",
+ "md5sum": "95a141d4b592b81c803cdf7e9b11d8ea",
+ "filesize": 241172480,
+ "download_url": "https://downloads.vyos.io/?dir=release/1.1.8",
+ "direct_download_url": "https://downloads.vyos.io/release/1.1.8/vyos-1.1.8-amd64.iso"
+ },
{
"filename": "vyos-1.1.7-amd64.iso",
"version": "1.1.7",
"md5sum": "9a7f745a0b0db0d4f1d9eee2a437fb54",
"filesize": 245366784,
- "download_url": "http://mirror.vyos.net/iso/release/1.1.7/",
- "direct_download_url": "http://mirror.vyos.net/iso/release/1.1.7/vyos-1.1.7-amd64.iso"
+ "download_url": "https://downloads.vyos.io/?dir=release/1.1.7/",
+ "direct_download_url": "https://downloads.vyos.io/release/1.1.7/vyos-1.1.7-amd64.iso"
},
{
"filename": "vyos-1.1.6-amd64.iso",
"version": "1.1.6",
"md5sum": "3128954d026e567402a924c2424ce2bf",
"filesize": 245366784,
- "download_url": "http://mirror.vyos.net/iso/release/1.1.6/",
- "direct_download_url": "http://mirror.vyos.net/iso/release/1.1.6/vyos-1.1.6-amd64.iso"
+ "download_url": "hhttps://downloads.vyos.io/?dir=release/1.1.6/",
+ "direct_download_url": "https://downloads.vyos.io/release/1.1.6/vyos-1.1.6-amd64.iso"
},
{
"filename": "vyos-1.1.5-amd64.iso",
"version": "1.1.5",
"md5sum": "193179532011ceaa87ee725bd8f22022",
"filesize": 247463936,
- "download_url": "http://mirror.vyos.net/iso/release/1.1.5/",
- "direct_download_url": "http://mirror.vyos.net/iso/release/1.1.5/vyos-1.1.5-amd64.iso"
+ "download_url": "https://downloads.vyos.io/?dir=release/1.1.5/",
+ "direct_download_url": "https://downloads.vyos.io/release/1.1.5/vyos-1.1.5-amd64.iso"
},
{
"filename": "empty8G.qcow2",
diff --git a/gns3server/appliances/windows.gns3a b/gns3server/appliances/windows.gns3a
index 289e6c20..ac84deee 100644
--- a/gns3server/appliances/windows.gns3a
+++ b/gns3server/appliances/windows.gns3a
@@ -9,6 +9,7 @@
"product_url": "https://www.microsoft.com/en-us/windows",
"registry_version": 4,
"status": "stable",
+ "availability": "free-to-try",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"usage": "These virtual machines expire after 90 days; i.e. you have to re-create them in your project after this time but you don't have to re-import the appliance.\n\nDefault credentials: IEUser / Passw0rd!",
@@ -25,6 +26,13 @@
"kvm": "require"
},
"images": [
+ {
+ "filename": "MSEdge-Win10-VMWare-disk1.vmdk",
+ "version": "10 w/ Edge",
+ "md5sum": "fef74c69e1949480d4e2095324a169af",
+ "filesize": 5636608512,
+ "download_url": "https://developer.microsoft.com/en-us/microsoft-edge/tools/vms/"
+ },
{
"filename": "MSEdge_-_Win10_preview.vmdk",
"version": "10 w/ Edge",
@@ -71,6 +79,12 @@
"versions": [
{
"name": "10 w/ Edge",
+ "images": {
+ "hda_disk_image": "MSEdge-Win10-VMWare-disk1.vmdk"
+ }
+ },
+ {
+ "name": "10 w/ Edge (Preview)",
"images": {
"hda_disk_image": "MSEdge_-_Win10_preview.vmdk"
}
diff --git a/gns3server/appliances/windows_server.gns3a b/gns3server/appliances/windows_server.gns3a
index e7bfb379..a84c06d5 100644
--- a/gns3server/appliances/windows_server.gns3a
+++ b/gns3server/appliances/windows_server.gns3a
@@ -9,6 +9,7 @@
"product_url": "https://www.microsoft.com/en-us/windows",
"registry_version": 4,
"status": "stable",
+ "availability": "free-to-try",
"maintainer": "GNS3 Team",
"maintainer_email": "developers@gns3.net",
"symbol": "microsoft.svg",
@@ -21,7 +22,8 @@
"arch": "x86_64",
"console_type": "vnc",
"boot_priority": "c",
- "kvm": "require"
+ "kvm": "require",
+ "options": "-usbdevice tablet"
},
"images": [
{
diff --git a/gns3server/appliances/zeroshell.gns3a b/gns3server/appliances/zeroshell.gns3a
index f689636c..fd82ef59 100644
--- a/gns3server/appliances/zeroshell.gns3a
+++ b/gns3server/appliances/zeroshell.gns3a
@@ -20,6 +20,24 @@
"kvm": "allow"
},
"images": [
+ {
+ "filename": "ZeroShell-3.8.2-X86-USB.img",
+ "version": "3.8.2",
+ "md5sum": "bb8c7f24c86eb59e26ce36ff1979ecd4",
+ "filesize": 1992294400,
+ "download_url": "http://www.zeroshell.org/download/",
+ "direct_download_url": "http://www.zeroshell.net/listing/ZeroShell-3.8.2-X86-USB.img.gz",
+ "compression": "gzip"
+ },
+ {
+ "filename": "ZeroShell-3.8.1-X86-USB.img",
+ "version": "3.8.1",
+ "md5sum": "49256e396d160e88fbc3a3889e172482",
+ "filesize": 1992294400,
+ "download_url": "http://www.zeroshell.org/download/",
+ "direct_download_url": "http://www.zeroshell.net/listing/ZeroShell-3.8.1-X86-USB.img.gz",
+ "compression": "gzip"
+ },
{
"filename": "ZeroShell-3.8.0-X86-USB.img",
"version": "3.8.0",
@@ -40,6 +58,18 @@
}
],
"versions": [
+ {
+ "name": "3.8.2",
+ "images": {
+ "hda_disk_image": "ZeroShell-3.8.2-X86-USB.img"
+ }
+ },
+ {
+ "name": "3.8.1",
+ "images": {
+ "hda_disk_image": "ZeroShell-3.8.1-X86-USB.img"
+ }
+ },
{
"name": "3.8.0",
"images": {
diff --git a/gns3server/compute/base_manager.py b/gns3server/compute/base_manager.py
index 59e9c90a..f383a197 100644
--- a/gns3server/compute/base_manager.py
+++ b/gns3server/compute/base_manager.py
@@ -335,11 +335,14 @@ class BaseManager:
:returns: Node instance
"""
+ node = None
try:
- node = yield from self.close_node(node_id)
+ node = self.get_node(node_id)
+ yield from self.close_node(node_id)
finally:
- node.project.emit("node.deleted", node)
- yield from node.project.remove_node(node)
+ if node:
+ node.project.emit("node.deleted", node)
+ yield from node.project.remove_node(node)
if node.id in self._nodes:
del self._nodes[node.id]
return node
@@ -546,7 +549,7 @@ class BaseManager:
# We store the file under his final name only when the upload is finished
tmp_path = path + ".tmp"
os.makedirs(os.path.dirname(path), exist_ok=True)
- with open(tmp_path, 'wb+') as f:
+ with open(tmp_path, 'wb') as f:
while True:
packet = yield from stream.read(4096)
if not packet:
diff --git a/gns3server/compute/base_node.py b/gns3server/compute/base_node.py
index 288c8930..c2a4e68e 100644
--- a/gns3server/compute/base_node.py
+++ b/gns3server/compute/base_node.py
@@ -25,6 +25,7 @@ import asyncio
import tempfile
import psutil
import platform
+import re
from gns3server.utils.interfaces import interfaces
from ..compute.port_manager import PortManager
@@ -598,15 +599,24 @@ class BaseNode:
@asyncio.coroutine
def _ubridge_apply_filters(self, bridge_name, filters):
"""
- Apply filter like rate limiting
+ Apply packet filters
:param bridge_name: bridge name in uBridge
- :param filters: Array of filter dictionnary
+ :param filters: Array of filter dictionary
"""
yield from self._ubridge_send('bridge reset_packet_filters ' + bridge_name)
- for filter in self._build_filter_list(filters):
- cmd = 'bridge add_packet_filter {} {}'.format(bridge_name, filter)
- yield from self._ubridge_send(cmd)
+ for packet_filter in self._build_filter_list(filters):
+ cmd = 'bridge add_packet_filter {} {}'.format(bridge_name, packet_filter)
+ try:
+ yield from self._ubridge_send(cmd)
+ except UbridgeError as e:
+ match = re.search("Cannot compile filter '(.*)': syntax error", str(e))
+ if match:
+ message = "Warning: ignoring BPF packet filter '{}' due to syntax error".format(self.name, match.group(1))
+ log.warning(message)
+ self.project.emit("log.warning", {"message": message})
+ else:
+ raise
def _build_filter_list(self, filters):
"""
diff --git a/gns3server/compute/builtin/nodes/ethernet_hub.py b/gns3server/compute/builtin/nodes/ethernet_hub.py
index 5aa95b32..ebc561ec 100644
--- a/gns3server/compute/builtin/nodes/ethernet_hub.py
+++ b/gns3server/compute/builtin/nodes/ethernet_hub.py
@@ -17,7 +17,6 @@
import asyncio
-from ...error import NodeError
from ...base_node import BaseNode
import logging
diff --git a/gns3server/compute/builtin/nodes/ethernet_switch.py b/gns3server/compute/builtin/nodes/ethernet_switch.py
index 0d0119ff..523c3d01 100644
--- a/gns3server/compute/builtin/nodes/ethernet_switch.py
+++ b/gns3server/compute/builtin/nodes/ethernet_switch.py
@@ -17,7 +17,6 @@
import asyncio
-from ...error import NodeError
from ...base_node import BaseNode
import logging
diff --git a/gns3server/compute/builtin/nodes/nat.py b/gns3server/compute/builtin/nodes/nat.py
index 1d7557cd..59c8f072 100644
--- a/gns3server/compute/builtin/nodes/nat.py
+++ b/gns3server/compute/builtin/nodes/nat.py
@@ -16,7 +16,7 @@
# along with this program. If not, see .
import sys
-import asyncio
+
from .cloud import Cloud
from ...error import NodeError
diff --git a/gns3server/compute/docker/__init__.py b/gns3server/compute/docker/__init__.py
index 539f98bc..d2fca166 100644
--- a/gns3server/compute/docker/__init__.py
+++ b/gns3server/compute/docker/__init__.py
@@ -99,7 +99,7 @@ class Docker(BaseManager):
:param method: HTTP method
:param path: Endpoint in API
- :param data: Dictionnary with the body. Will be transformed to a JSON
+ :param data: Dictionary with the body. Will be transformed to a JSON
:param params: Parameters added as a query arg
"""
diff --git a/gns3server/compute/docker/docker_vm.py b/gns3server/compute/docker/docker_vm.py
index 4ac9e21d..3d291550 100644
--- a/gns3server/compute/docker/docker_vm.py
+++ b/gns3server/compute/docker/docker_vm.py
@@ -314,7 +314,7 @@ class DockerVM(BaseNode):
params["Env"].append("GNS3_VOLUMES={}".format(":".join(self._volumes)))
if self._environment:
- for e in self._environment.split("\n"):
+ for e in self._environment.strip().split("\n"):
e = e.strip()
if not e.startswith("GNS3_"):
params["Env"].append(e)
@@ -352,7 +352,11 @@ class DockerVM(BaseNode):
def start(self):
"""Starts this Docker container."""
- state = yield from self._get_container_state()
+ try:
+ state = yield from self._get_container_state()
+ except DockerHttp404Error:
+ raise DockerError("Docker container '{name}' with ID {cid} does not exist or is not ready yet. Please try again in a few seconds.".format(name=self.name,
+ cid=self._cid))
if state == "paused":
yield from self.unpause()
elif state == "running":
diff --git a/gns3server/compute/dynamips/__init__.py b/gns3server/compute/dynamips/__init__.py
index c6fd1c05..0254d969 100644
--- a/gns3server/compute/dynamips/__init__.py
+++ b/gns3server/compute/dynamips/__init__.py
@@ -547,7 +547,7 @@ class Dynamips(BaseManager):
content = content.replace('%h', vm.name)
f.write(content.encode("utf-8"))
except OSError as e:
- raise DynamipsError("Could not create config file {}: {}".format(path, e))
+ raise DynamipsError("Could not create config file '{}': {}".format(path, e))
return os.path.join("configs", os.path.basename(path))
diff --git a/gns3server/compute/dynamips/dynamips_hypervisor.py b/gns3server/compute/dynamips/dynamips_hypervisor.py
index 1eb9ae77..936707d6 100644
--- a/gns3server/compute/dynamips/dynamips_hypervisor.py
+++ b/gns3server/compute/dynamips/dynamips_hypervisor.py
@@ -260,6 +260,8 @@ class DynamipsHypervisor:
# Now retrieve the result
data = []
buf = ''
+ retries = 0
+ max_retries = 10
while True:
try:
try:
@@ -276,8 +278,14 @@ class DynamipsHypervisor:
log.warning("Connection reset received while reading Dynamips response: {}".format(e))
continue
if not chunk:
- raise DynamipsError("No data returned from {host}:{port}, Dynamips process running: {run}"
- .format(host=self._host, port=self._port, run=self.is_running()))
+ if retries > max_retries:
+ raise DynamipsError("No data returned from {host}:{port}, Dynamips process running: {run}"
+ .format(host=self._host, port=self._port, run=self.is_running()))
+ else:
+ retries += 1
+ yield from asyncio.sleep(0.1)
+ continue
+ retries = 0
buf += chunk.decode("utf-8", errors="ignore")
except OSError as e:
raise DynamipsError("Could not read response for '{command}' from {host}:{port}: {error}, process running: {run}"
diff --git a/gns3server/compute/dynamips/nodes/ethernet_switch.py b/gns3server/compute/dynamips/nodes/ethernet_switch.py
index 6d044bcd..841a47c9 100644
--- a/gns3server/compute/dynamips/nodes/ethernet_switch.py
+++ b/gns3server/compute/dynamips/nodes/ethernet_switch.py
@@ -44,9 +44,9 @@ class EthernetSwitchConsole(EmbedShell):
self._node = node
@asyncio.coroutine
- def arp(self):
+ def mac(self):
"""
- Show arp table
+ Show MAC address table
"""
res = 'Port Mac VLAN\n'
result = (yield from self._node._hypervisor.send('ethsw show_mac_addr_table {}'.format(self._node.name)))
diff --git a/gns3server/compute/dynamips/nodes/router.py b/gns3server/compute/dynamips/nodes/router.py
index 4e31ea4f..a05e61aa 100644
--- a/gns3server/compute/dynamips/nodes/router.py
+++ b/gns3server/compute/dynamips/nodes/router.py
@@ -283,12 +283,15 @@ class Router(BaseNode):
if not self._ghost_flag:
self.check_available_ram(self.ram)
+ # config paths are relative to the working directory configured on Dynamips hypervisor
startup_config_path = os.path.join("configs", "i{}_startup-config.cfg".format(self._dynamips_id))
private_config_path = os.path.join("configs", "i{}_private-config.cfg".format(self._dynamips_id))
- if not os.path.exists(private_config_path) or not os.path.getsize(private_config_path):
+ if not os.path.exists(os.path.join(self._working_directory, private_config_path)) or \
+ not os.path.getsize(os.path.join(self._working_directory, private_config_path)):
# an empty private-config can prevent a router to boot.
private_config_path = ''
+
yield from self._hypervisor.send('vm set_config "{name}" "{startup}" "{private}"'.format(
name=self._name,
startup=startup_config_path,
diff --git a/gns3server/compute/iou/__init__.py b/gns3server/compute/iou/__init__.py
index acc994df..4cba97d7 100644
--- a/gns3server/compute/iou/__init__.py
+++ b/gns3server/compute/iou/__init__.py
@@ -25,6 +25,7 @@ import asyncio
from ..base_manager import BaseManager
from .iou_error import IOUError
from .iou_vm import IOUVM
+from .utils.application_id import get_next_application_id
import logging
log = logging.getLogger(__name__)
@@ -38,8 +39,7 @@ class IOU(BaseManager):
def __init__(self):
super().__init__()
- self._free_application_ids = list(range(1, 512))
- self._used_application_ids = {}
+ self._iou_id_lock = asyncio.Lock()
@asyncio.coroutine
def create_node(self, *args, **kwargs):
@@ -49,40 +49,14 @@ class IOU(BaseManager):
:returns: IOUVM instance
"""
- node = yield from super().create_node(*args, **kwargs)
- try:
- self._used_application_ids[node.id] = self._free_application_ids.pop(0)
- except IndexError:
- raise IOUError("Cannot create a new IOU VM (limit of 512 VMs reached on this host)")
+ with (yield from self._iou_id_lock):
+ # wait for a node to be completely created before adding a new one
+ # this is important otherwise we allocate the same application ID
+ # when creating multiple IOU node at the same time
+ application_id = get_next_application_id(self.nodes)
+ node = yield from super().create_node(*args, application_id=application_id, **kwargs)
return node
- @asyncio.coroutine
- def close_node(self, node_id, *args, **kwargs):
- """
- Closes an IOU VM.
-
- :returns: IOUVM instance
- """
-
- node = self.get_node(node_id)
- if node_id in self._used_application_ids:
- i = self._used_application_ids[node_id]
- self._free_application_ids.insert(0, i)
- del self._used_application_ids[node_id]
- yield from super().close_node(node_id, *args, **kwargs)
- return node
-
- def get_application_id(self, node_id):
- """
- Get an unique application identifier for IOU.
-
- :param node_id: Node identifier
-
- :returns: IOU application identifier
- """
-
- return self._used_application_ids.get(node_id, 1)
-
@staticmethod
def get_legacy_vm_workdir(legacy_vm_id, name):
"""
diff --git a/gns3server/compute/iou/iou_vm.py b/gns3server/compute/iou/iou_vm.py
index a604eac8..c863d22e 100644
--- a/gns3server/compute/iou/iou_vm.py
+++ b/gns3server/compute/iou/iou_vm.py
@@ -65,7 +65,7 @@ class IOUVM(BaseNode):
:param console: TCP console port
"""
- def __init__(self, name, node_id, project, manager, path=None, console=None):
+ def __init__(self, name, node_id, project, manager, application_id=None, path=None, console=None):
super().__init__(name, node_id, project, manager, console=console)
@@ -86,7 +86,7 @@ class IOUVM(BaseNode):
self._startup_config = ""
self._private_config = ""
self._ram = 256 # Megabytes
- self._application_id = None
+ self._application_id = application_id
self._l1_keepalives = False # used to overcome the always-up Ethernet interfaces (not supported by all IOSes).
def _config(self):
@@ -348,14 +348,14 @@ class IOUVM(BaseNode):
# reload
path = os.path.join(os.path.expanduser("~/"), ".iourc")
try:
- with open(path, "wb+") as f:
+ with open(path, "wb") as f:
f.write(value.encode("utf-8"))
except OSError as e:
raise IOUError("Could not write the iourc file {}: {}".format(path, e))
path = os.path.join(self.temporary_directory, "iourc")
try:
- with open(path, "wb+") as f:
+ with open(path, "wb") as f:
f.write(value.encode("utf-8"))
except OSError as e:
raise IOUError("Could not write the iourc file {}: {}".format(path, e))
@@ -1141,8 +1141,7 @@ class IOUVM(BaseNode):
:returns: integer between 1 and 512
"""
- if self._application_id is None:
- return self._manager.get_application_id(self.id)
+
return self._application_id
@application_id.setter
diff --git a/gns3server/compute/iou/utils/application_id.py b/gns3server/compute/iou/utils/application_id.py
index d3de5fe4..5bd2c125 100644
--- a/gns3server/compute/iou/utils/application_id.py
+++ b/gns3server/compute/iou/utils/application_id.py
@@ -24,13 +24,14 @@ log = logging.getLogger(__name__)
def get_next_application_id(nodes):
"""
Calculates free application_id from given nodes
+
:param nodes:
:raises IOUError when exceeds number
:return: integer first free id
"""
- used = set([n.properties.get('application_id') for n in nodes if n.node_type == 'iou'])
+ used = set([n.application_id for n in nodes])
pool = set(range(1, 512))
try:
return (pool - used).pop()
except KeyError:
- raise IOUError("Cannot create a new IOU VM (limit of 512 VMs reached)")
+ raise IOUError("Cannot create a new IOU VM (limit of 512 VMs on one host reached)")
diff --git a/gns3server/compute/qemu/qemu_vm.py b/gns3server/compute/qemu/qemu_vm.py
index 76067b5e..92a43020 100644
--- a/gns3server/compute/qemu/qemu_vm.py
+++ b/gns3server/compute/qemu/qemu_vm.py
@@ -533,7 +533,7 @@ class QemuVM(BaseNode):
if not mac_address:
# use the node UUID to generate a random MAC address
- self._mac_address = "00:%s:%s:%s:%s:00" % (self.project.id[-4:-2], self.project.id[-2:], self.id[-4:-2], self.id[-2:])
+ self._mac_address = "52:%s:%s:%s:%s:00" % (self.project.id[-4:-2], self.project.id[-2:], self.id[-4:-2], self.id[-2:])
else:
self._mac_address = mac_address
diff --git a/gns3server/compute/virtualbox/__init__.py b/gns3server/compute/virtualbox/__init__.py
index f49f61bc..a676bd1a 100644
--- a/gns3server/compute/virtualbox/__init__.py
+++ b/gns3server/compute/virtualbox/__init__.py
@@ -57,24 +57,34 @@ class VirtualBox(BaseManager):
# look for VBoxManage
vboxmanage_path = self.config.get_section_config("VirtualBox").get("vboxmanage_path")
- if not vboxmanage_path:
+ if vboxmanage_path:
+ if not os.path.isabs(vboxmanage_path):
+ vboxmanage_path = shutil.which(vboxmanage_path)
+ else:
+ log.info("A path to VBoxManage has not been configured, trying to find it...")
if sys.platform.startswith("win"):
if "VBOX_INSTALL_PATH" in os.environ:
- vboxmanage_path = os.path.join(os.environ["VBOX_INSTALL_PATH"], "VBoxManage.exe")
+ vboxmanage_path_windows = os.path.join(os.environ["VBOX_INSTALL_PATH"], "VBoxManage.exe")
+ if os.path.exists(vboxmanage_path_windows):
+ vboxmanage_path = vboxmanage_path_windows
elif "VBOX_MSI_INSTALL_PATH" in os.environ:
- vboxmanage_path = os.path.join(os.environ["VBOX_MSI_INSTALL_PATH"], "VBoxManage.exe")
+ vboxmanage_path_windows = os.path.join(os.environ["VBOX_MSI_INSTALL_PATH"], "VBoxManage.exe")
+ if os.path.exists(vboxmanage_path_windows):
+ vboxmanage_path = vboxmanage_path_windows
elif sys.platform.startswith("darwin"):
- vboxmanage_path = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage"
- else:
- vboxmanage_path = "vboxmanage"
+ vboxmanage_path_osx = "/Applications/VirtualBox.app/Contents/MacOS/VBoxManage"
+ if os.path.exists(vboxmanage_path_osx):
+ vboxmanage_path = vboxmanage_path_osx
+ if not vboxmanage_path:
+ vboxmanage_path = shutil.which("vboxmanage")
- if vboxmanage_path and not os.path.isabs(vboxmanage_path):
- vboxmanage_path = shutil.which(vboxmanage_path)
+ if vboxmanage_path and not os.path.exists(vboxmanage_path):
+ log.error("VBoxManage path '{}' doesn't exist".format(vboxmanage_path))
if not vboxmanage_path:
- raise VirtualBoxError("Could not find VBoxManage if you just install VirtualBox you need to reboot")
+ raise VirtualBoxError("Could not find VBoxManage, please reboot if VirtualBox has just been installed")
if not os.path.isfile(vboxmanage_path):
- raise VirtualBoxError("VBoxManage {} is not accessible".format(vboxmanage_path))
+ raise VirtualBoxError("VBoxManage '{}' is not accessible".format(vboxmanage_path))
if not os.access(vboxmanage_path, os.X_OK):
raise VirtualBoxError("VBoxManage is not executable")
if os.path.basename(vboxmanage_path) not in ["VBoxManage", "VBoxManage.exe", "vboxmanage"]:
diff --git a/gns3server/compute/vmware/__init__.py b/gns3server/compute/vmware/__init__.py
index 6f1a6cf1..73f258f9 100644
--- a/gns3server/compute/vmware/__init__.py
+++ b/gns3server/compute/vmware/__init__.py
@@ -396,12 +396,12 @@ class VMware(BaseManager):
try:
stdout_data, _ = yield from asyncio.wait_for(process.communicate(), timeout=timeout)
except asyncio.TimeoutError:
- raise VMwareError("vmrun has timed out after {} seconds!\nTry to run {} in a terminal to see more informations.\n\nMake sure GNS3 and VMware run under the same user and whitelist vmrun.exe in your antivirus.".format(timeout, command_string))
+ raise VMwareError("vmrun has timed out after {} seconds!\nTry to run {} in a terminal to see more details.\n\nMake sure GNS3 and VMware run under the same user and whitelist vmrun.exe in your antivirus.".format(timeout, command_string))
if process.returncode:
# vmrun print errors on stdout
vmrun_error = stdout_data.decode("utf-8", errors="ignore")
- raise VMwareError("vmrun has returned an error: {}\nTry to run {} in a terminal to see more informations.\nAnd make sure GNS3 and VMware run under the same user.".format(vmrun_error, command_string))
+ raise VMwareError("vmrun has returned an error: {}\nTry to run {} in a terminal to see more details.\nAnd make sure GNS3 and VMware run under the same user.".format(vmrun_error, command_string))
return stdout_data.decode("utf-8", errors="ignore").splitlines()
diff --git a/gns3server/compute/vmware/vmware_vm.py b/gns3server/compute/vmware/vmware_vm.py
index 37311f5e..3e2e58df 100644
--- a/gns3server/compute/vmware/vmware_vm.py
+++ b/gns3server/compute/vmware/vmware_vm.py
@@ -745,7 +745,7 @@ class VMwareVM(BaseNode):
"Please remove it or allow VMware VM '{name}' to use any adapter.".format(attachment=self._vmx_pairs[connection_type],
adapter_number=adapter_number,
name=self.name))
- elif self.is_running():
+ elif (yield from self.is_running()):
raise VMwareError("Attachment '{attachment}' is configured on network adapter {adapter_number}. "
"Please stop VMware VM '{name}' to link to this adapter and allow GNS3 to change the attachment type.".format(attachment=self._vmx_pairs[connection_type],
adapter_number=adapter_number,
diff --git a/gns3server/compute/vpcs/vpcs_vm.py b/gns3server/compute/vpcs/vpcs_vm.py
index b832b8e7..787c0aa6 100644
--- a/gns3server/compute/vpcs/vpcs_vm.py
+++ b/gns3server/compute/vpcs/vpcs_vm.py
@@ -134,20 +134,6 @@ class VPCSVM(BaseNode):
"project_id": self.project.id,
"command_line": self.command_line}
- @property
- def relative_startup_script(self):
- """
- Returns the startup config file relative to the project directory.
-
- :returns: path to config file. None if the file doesn't exist
- """
-
- path = os.path.join(self.working_dir, 'startup.vpc')
- if os.path.exists(path):
- return 'startup.vpc'
- else:
- return None
-
def _vpcs_path(self):
"""
Returns the VPCS executable path.
diff --git a/gns3server/controller/__init__.py b/gns3server/controller/__init__.py
index a828f9cf..5c914f57 100644
--- a/gns3server/controller/__init__.py
+++ b/gns3server/controller/__init__.py
@@ -82,7 +82,8 @@ class Controller:
if appliance.status != 'broken':
self._appliance_templates[appliance.id] = appliance
except (ValueError, OSError, KeyError) as e:
- log.warning("Can't load %s: %s", path, str(e))
+ log.warning("Cannot load appliance template file '%s': %s", path, str(e))
+ continue
self._appliances = {}
vms = []
@@ -122,15 +123,34 @@ class Controller:
for prop in vm.copy():
if prop in ["enable_remote_console", "use_ubridge"]:
del vm[prop]
+
+ # remove deprecated default_symbol and hover_symbol
+ # and set symbol if not present
+ deprecated = ["default_symbol", "hover_symbol"]
+ if len([prop for prop in vm.keys() if prop in deprecated]) > 0:
+ if "default_symbol" in vm.keys():
+ del vm["default_symbol"]
+ if "hover_symbol" in vm.keys():
+ del vm["hover_symbol"]
+
+ if "symbol" not in vm.keys():
+ vm["symbol"] = ":/symbols/computer.svg"
+
vm.setdefault("appliance_id", str(uuid.uuid4()))
- appliance = Appliance(vm["appliance_id"], vm)
- self._appliances[appliance.id] = appliance
+ try:
+ appliance = Appliance(vm["appliance_id"], vm)
+ appliance.__json__() # Check if loaded without error
+ self._appliances[appliance.id] = appliance
+ except KeyError as e:
+ # appliance data is not complete (missing name or type)
+ log.warning("Cannot load appliance template {} ('{}'): missing key {}".format(vm["appliance_id"], vm.get("name", "unknown"), e))
+ continue
# Add builtins
builtins = []
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "cloud"), {"node_type": "cloud", "name": "Cloud", "category": 2, "symbol": ":/symbols/cloud.svg"}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "nat"), {"node_type": "nat", "name": "NAT", "category": 2, "symbol": ":/symbols/cloud.svg"}, builtin=True))
- builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "vpcs"), {"node_type": "vpcs", "name": "VPCS", "category": 2, "symbol": ":/symbols/vpcs_guest.svg", "properties": {"base_script_file": "vpcs_base_config.txt"}}, builtin=True))
+ builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "vpcs"), {"node_type": "vpcs", "name": "VPCS", "default_name_format": "PC-{0}", "category": 2, "symbol": ":/symbols/vpcs_guest.svg", "properties": {"base_script_file": "vpcs_base_config.txt"}}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "ethernet_switch"), {"node_type": "ethernet_switch", "name": "Ethernet switch", "category": 1, "symbol": ":/symbols/ethernet_switch.svg"}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "ethernet_hub"), {"node_type": "ethernet_hub", "name": "Ethernet hub", "category": 1, "symbol": ":/symbols/hub.svg"}, builtin=True))
builtins.append(Appliance(uuid.uuid3(uuid.NAMESPACE_DNS, "frame_relay_switch"), {"node_type": "frame_relay_switch", "name": "Frame Relay switch", "category": 1, "symbol": ":/symbols/frame_relay_switch.svg"}, builtin=True))
@@ -405,6 +425,7 @@ class Controller:
:param connect: True connect to the compute immediately
:param kwargs: See the documentation of Compute
"""
+
if compute_id not in self._computes:
# We disallow to create from the outside the local and VM server
diff --git a/gns3server/controller/appliance.py b/gns3server/controller/appliance.py
index 6b0b97f3..8b7b2995 100644
--- a/gns3server/controller/appliance.py
+++ b/gns3server/controller/appliance.py
@@ -44,7 +44,9 @@ class Appliance:
# Version of the gui before 2.1 use linked_base
# and the server linked_clone
if "linked_base" in self._data:
- self._data["linked_clone"] = self._data.pop("linked_base")
+ linked_base = self._data.pop("linked_base")
+ if "linked_clone" not in self._data:
+ self._data["linked_clone"] = linked_base
if data["node_type"] == "iou" and "image" in data:
del self._data["image"]
self._builtin = builtin
diff --git a/gns3server/controller/compute.py b/gns3server/controller/compute.py
index 9eb9bd6b..e1948a35 100644
--- a/gns3server/controller/compute.py
+++ b/gns3server/controller/compute.py
@@ -377,13 +377,13 @@ class Compute:
"""
:param dont_connect: If true do not reconnect if not connected
"""
+
if not self._connected and not dont_connect:
if self._id == "vm" and not self._controller.gns3vm.running:
yield from self._controller.gns3vm.start()
-
yield from self.connect()
if not self._connected and not dont_connect:
- raise ComputeError("Can't connect to {}".format(self._name))
+ raise ComputeError("Cannot connect to compute '{}' with request {} {}".format(self._name, method, path))
response = yield from self._run_http_query(method, path, data=data, **kwargs)
return response
@@ -402,20 +402,20 @@ class Compute:
"""
Check if remote server is accessible
"""
+
if not self._connected and not self._closed:
try:
+ log.info("Connecting to compute '{}'".format(self._id))
response = yield from self._run_http_query("GET", "/capabilities")
- except ComputeError:
+ except ComputeError as e:
# Try to reconnect after 2 seconds if server unavailable only if not during tests (otherwise we create a ressources usage bomb)
if not hasattr(sys, "_called_from_test") or not sys._called_from_test:
self._connection_failure += 1
# After 5 failure we close the project using the compute to avoid sync issues
if self._connection_failure == 5:
- log.warning("Can't connect to compute %s", self._id)
+ log.warning("Cannot connect to compute '{}': {}".format(self._id, e))
yield from self._controller.close_compute_projects(self)
-
asyncio.get_event_loop().call_later(2, lambda: asyncio.async(self._try_reconnect()))
-
return
except aiohttp.web.HTTPNotFound:
raise aiohttp.web.HTTPConflict(text="The server {} is not a GNS3 server or it's a 1.X server".format(self._id))
diff --git a/gns3server/controller/drawing.py b/gns3server/controller/drawing.py
index 39a4d158..47c09d0e 100644
--- a/gns3server/controller/drawing.py
+++ b/gns3server/controller/drawing.py
@@ -118,7 +118,7 @@ class Drawing:
file_path = os.path.join(self._project.pictures_directory, filename)
if not os.path.exists(file_path):
- with open(file_path, "wb+") as f:
+ with open(file_path, "wb") as f:
f.write(data)
value = filename
diff --git a/gns3server/controller/export_project.py b/gns3server/controller/export_project.py
index 57510f69..7e43b894 100644
--- a/gns3server/controller/export_project.py
+++ b/gns3server/controller/export_project.py
@@ -29,7 +29,8 @@ log = logging.getLogger(__name__)
@asyncio.coroutine
-def export_project(project, temporary_dir, include_images=False, keep_compute_id=False, allow_all_nodes=False):
+def export_project(project, temporary_dir, include_images=False, keep_compute_id=False,
+ allow_all_nodes=False, ignore_prefixes=None):
"""
Export the project as zip. It's a ZipStream object.
The file will be read chunk by chunk when you iterate on
@@ -63,7 +64,6 @@ def export_project(project, temporary_dir, include_images=False, keep_compute_id
for root, dirs, files in os.walk(project._path, topdown=True):
files = [f for f in files if not _filter_files(os.path.join(root, f))]
-
for file in files:
path = os.path.join(root, file)
# Try open the file
@@ -111,6 +111,10 @@ def _filter_files(path):
if path.endswith("snapshots"):
return True
+ # filter directory of snapshots
+ if "{sep}snapshots{sep}".format(sep=os.path.sep) in path:
+ return True
+
try:
i = s.index("project-files")
if s[i + 1] in ("tmp", "captures", "snapshots"):
@@ -157,9 +161,12 @@ def _export_project_file(project, path, z, include_images, keep_compute_id, allo
if "properties" in node and node["node_type"] != "docker":
for prop, value in node["properties"].items():
- if not prop.endswith("image"):
- continue
+ if node["node_type"] == "iou":
+ if not prop == "path":
+ continue
+ elif not prop.endswith("image"):
+ continue
if value is None or value.strip() == '':
continue
@@ -209,7 +216,6 @@ def _export_local_images(project, image, z):
continue
directory = os.path.split(img_directory)[-1:][0]
-
if os.path.exists(image):
path = image
else:
@@ -260,4 +266,3 @@ def _export_remote_images(project, compute_id, image_type, image, project_zipfil
arcname = os.path.join("images", image_type, image)
log.info("Saved {}".format(arcname))
project_zipfile.write(temp_path, arcname=arcname, compress_type=zipfile.ZIP_DEFLATED)
-
diff --git a/gns3server/controller/gns3vm/__init__.py b/gns3server/controller/gns3vm/__init__.py
index 0ba8b30e..acd9f7c3 100644
--- a/gns3server/controller/gns3vm/__init__.py
+++ b/gns3server/controller/gns3vm/__init__.py
@@ -27,6 +27,7 @@ from .virtualbox_gns3_vm import VirtualBoxGNS3VM
from .remote_gns3_vm import RemoteGNS3VM
from .gns3_vm_error import GNS3VMError
from ...version import __version__
+from ..compute import ComputeError
import logging
log = logging.getLogger(__name__)
@@ -281,7 +282,8 @@ class GNS3VM:
compute = yield from self._controller.add_compute(compute_id="vm",
name="GNS3 VM is starting ({})".format(engine.vmname),
host=None,
- force=True)
+ force=True,
+ connect=False)
try:
yield from engine.start()
@@ -290,6 +292,7 @@ class GNS3VM:
log.error("Can't start the GNS3 VM: {}".format(str(e)))
yield from compute.update(name="GNS3 VM ({})".format(engine.vmname))
raise e
+ yield from compute.connect() # we can connect now that the VM has started
yield from compute.update(name="GNS3 VM ({})".format(engine.vmname),
protocol=self.protocol,
host=self.ip_address,
@@ -297,7 +300,9 @@ class GNS3VM:
user=self.user,
password=self.password)
- yield from self._check_network(compute)
+ # check if the VM is in the same subnet as the local server, start 10 seconds later to give
+ # some time for the compute in the VM to be ready for requests
+ asyncio.get_event_loop().call_later(10, lambda: asyncio.async(self._check_network(compute)))
@asyncio.coroutine
def _check_network(self, compute):
@@ -305,28 +310,32 @@ class GNS3VM:
Check that the VM is in the same subnet as the local server
"""
- vm_interfaces = yield from compute.interfaces()
- vm_interface_netmask = None
- for interface in vm_interfaces:
- if interface["ip_address"] == self.ip_address:
- vm_interface_netmask = interface["netmask"]
- break
- if vm_interface_netmask:
- vm_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, vm_interface_netmask)).network
- for compute_id in self._controller.computes:
- if compute_id == "local":
- compute = self._controller.get_compute(compute_id)
- interfaces = yield from compute.interfaces()
- netmask = None
- for interface in interfaces:
- if interface["ip_address"] == compute.host_ip:
- netmask = interface["netmask"]
- break
- if netmask:
- compute_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, netmask)).network
- if vm_network.compare_networks(compute_network) != 0:
- msg = "The GNS3 VM ({}) is not on the same network as the {} server ({}), please make sure the local server binding is in the same network as the GNS3 VM".format(vm_network, compute_id, compute_network)
- self._controller.notification.emit("log.warning", {"message": msg})
+ try:
+ vm_interfaces = yield from compute.interfaces()
+ vm_interface_netmask = None
+ for interface in vm_interfaces:
+ if interface["ip_address"] == self.ip_address:
+ vm_interface_netmask = interface["netmask"]
+ break
+ if vm_interface_netmask:
+ vm_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, vm_interface_netmask)).network
+ for compute_id in self._controller.computes:
+ if compute_id == "local":
+ compute = self._controller.get_compute(compute_id)
+ interfaces = yield from compute.interfaces()
+ netmask = None
+ for interface in interfaces:
+ if interface["ip_address"] == compute.host_ip:
+ netmask = interface["netmask"]
+ break
+ if netmask:
+ compute_network = ipaddress.ip_interface("{}/{}".format(compute.host_ip, netmask)).network
+ if vm_network.compare_networks(compute_network) != 0:
+ msg = "The GNS3 VM ({}) is not on the same network as the {} server ({}), please make sure the local server binding is in the same network as the GNS3 VM".format(
+ vm_network, compute_id, compute_network)
+ self._controller.notification.emit("log.warning", {"message": msg})
+ except ComputeError as e:
+ log.warning("Could not check the VM is in the same subnet as the local server: {}".format(e))
@locked_coroutine
def _suspend(self):
diff --git a/gns3server/controller/gns3vm/vmware_gns3_vm.py b/gns3server/controller/gns3vm/vmware_gns3_vm.py
index d3f0c6d2..c14f0732 100644
--- a/gns3server/controller/gns3vm/vmware_gns3_vm.py
+++ b/gns3server/controller/gns3vm/vmware_gns3_vm.py
@@ -171,7 +171,7 @@ class VMwareGNS3VM(BaseGNS3VM):
trial -= 1
# If ip not found fallback on old method
if trial == 0:
- log.warn("No IP found for the VM via readVariable fallback to getGuestIPAddress")
+ log.warning("No IP found for the VM via readVariable fallback to getGuestIPAddress")
guest_ip_address = yield from self._execute("getGuestIPAddress", [self._vmx_path, "-wait"], timeout=120)
break
yield from asyncio.sleep(1)
diff --git a/gns3server/controller/link.py b/gns3server/controller/link.py
index 0c9d5bc4..fc3febb4 100644
--- a/gns3server/controller/link.py
+++ b/gns3server/controller/link.py
@@ -171,12 +171,16 @@ class Link:
self._filters = new_filters
if self._created:
yield from self.update()
+ self._project.controller.notification.emit("link.updated", self.__json__())
+ self._project.dump()
@asyncio.coroutine
def update_suspend(self, value):
if value != self._suspend:
self._suspend = value
yield from self.update()
+ self._project.controller.notification.emit("link.updated", self.__json__())
+ self._project.dump()
@property
def created(self):
@@ -194,6 +198,8 @@ class Link:
"""
port = node.get_port(adapter_number, port_number)
+ if port is None:
+ raise aiohttp.web.HTTPNotFound(text="Port {}/{} for {} not found".format(adapter_number, port_number, node.name))
if port.link is not None:
raise aiohttp.web.HTTPConflict(text="Port is already used")
@@ -209,6 +215,8 @@ class Link:
# Check if user is not connecting serial => ethernet
other_port = other_node["node"].get_port(other_node["adapter_number"], other_node["port_number"])
+ if other_port is None:
+ raise aiohttp.web.HTTPNotFound(text="Port {}/{} for {} not found".format(other_node["adapter_number"], other_node["port_number"], other_node["node"].name))
if port.link_type != other_port.link_type:
raise aiohttp.web.HTTPConflict(text="It's not allowed to connect a {} to a {}".format(other_port.link_type, port.link_type))
@@ -297,6 +305,12 @@ class Link:
Dump a pcap file on disk
"""
+ if os.path.exists(self.capture_file_path):
+ try:
+ os.remove(self.capture_file_path)
+ except OSError as e:
+ raise aiohttp.web.HTTPConflict(text="Could not delete old capture file '{}': {}".format(self.capture_file_path, e))
+
try:
stream_content = yield from self.read_pcap_from_source()
except aiohttp.web.HTTPException as e:
@@ -307,16 +321,19 @@ class Link:
self._project.controller.notification.emit("link.updated", self.__json__())
with stream_content as stream:
- with open(self.capture_file_path, "wb+") as f:
- while self._capturing:
- # We read 1 bytes by 1 otherwise the remaining data is not read if the traffic stops
- data = yield from stream.read(1)
- if data:
- f.write(data)
- # Flush to disk otherwise the live is not really live
- f.flush()
- else:
- break
+ try:
+ with open(self.capture_file_path, "wb") as f:
+ while self._capturing:
+ # We read 1 bytes by 1 otherwise the remaining data is not read if the traffic stops
+ data = yield from stream.read(1)
+ if data:
+ f.write(data)
+ # Flush to disk otherwise the live is not really live
+ f.flush()
+ else:
+ break
+ except OSError as e:
+ raise aiohttp.web.HTTPConflict(text="Could not write capture file '{}': {}".format(self.capture_file_path, e))
@asyncio.coroutine
def stop_capture(self):
diff --git a/gns3server/controller/node.py b/gns3server/controller/node.py
index 87e9f892..f960a8cf 100644
--- a/gns3server/controller/node.py
+++ b/gns3server/controller/node.py
@@ -86,8 +86,7 @@ class Node:
self._first_port_name = None
# This properties will be recompute
- ignore_properties = ("width", "height")
-
+ ignore_properties = ("width", "height", "hover_symbol")
self.properties = kwargs.pop('properties', {})
# Update node properties with additional elements
@@ -104,7 +103,15 @@ class Node:
self.properties[prop] = kwargs[prop]
if self._symbol is None:
- self.symbol = ":/symbols/computer.svg"
+ # compatibility with old node templates
+ if "default_symbol" in self.properties:
+ default_symbol = self.properties.pop("default_symbol")
+ if default_symbol.endswith("normal.svg"):
+ self.symbol = default_symbol[:-11] + ".svg"
+ else:
+ self.symbol = default_symbol
+ else:
+ self.symbol = ":/symbols/computer.svg"
def is_always_running(self):
"""
@@ -567,12 +574,12 @@ class Node:
def get_port(self, adapter_number, port_number):
"""
Return the port for this adapter_number and port_number
- or raise an HTTPNotFound
+ or returns None if the port is not found
"""
for port in self.ports:
if port.adapter_number == adapter_number and port.port_number == port_number:
return port
- raise aiohttp.web.HTTPNotFound(text="Port {}/{} for {} not found".format(adapter_number, port_number, self.name))
+ return None
def _list_ports(self):
"""
diff --git a/gns3server/controller/project.py b/gns3server/controller/project.py
index 6c99431b..46348083 100644
--- a/gns3server/controller/project.py
+++ b/gns3server/controller/project.py
@@ -39,7 +39,6 @@ from ..utils.asyncio.pool import Pool
from ..utils.asyncio import locked_coroutine
from .export_project import export_project
from .import_project import import_project
-from ..compute.iou.utils.application_id import get_next_application_id
import logging
log = logging.getLogger(__name__)
@@ -434,13 +433,10 @@ class Project:
:param dump: Dump topology to disk
:param kwargs: See the documentation of node
"""
+
if node_id in self._nodes:
return self._nodes[node_id]
- if node_type == "iou" and 'application_id' not in kwargs.keys():
-
- kwargs['application_id'] = get_next_application_id(self._nodes.values())
-
node = Node(self, compute, name, node_id=node_id, node_type=node_type, **kwargs)
if compute not in self._project_created_on_compute:
# For a local server we send the project path
@@ -631,9 +627,12 @@ class Project:
with tempfile.TemporaryDirectory() as tmpdir:
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
- with open(snapshot.path, "wb+") as f:
- for data in zipstream:
- f.write(data)
+ try:
+ with open(snapshot.path, "wb") as f:
+ for data in zipstream:
+ f.write(data)
+ except OSError as e:
+ raise aiohttp.web.HTTPConflict(text="Could not write snapshot file '{}': {}".format(snapshot.path, e))
except OSError as e:
raise aiohttp.web.HTTPInternalServerError(text="Could not create project directory: {}".format(e))
@@ -787,8 +786,11 @@ class Project:
for node_link in link_data["nodes"]:
node = self.get_node(node_link["node_id"])
port = node.get_port(node_link["adapter_number"], node_link["port_number"])
+ if port is None:
+ log.warning("Port {}/{} for {} not found".format(node_link["adapter_number"], node_link["port_number"], node.name))
+ continue
if port.link is not None:
- # the node port is already attached to another link
+ log.warning("Port {}/{} is already connected to link ID {}".format(node_link["adapter_number"], node_link["port_number"], port.link.id))
continue
yield from link.add_node(node, node_link["adapter_number"], node_link["port_number"], label=node_link.get("label"), dump=False)
if len(link.nodes) != 2:
@@ -859,7 +861,7 @@ class Project:
try:
with tempfile.TemporaryDirectory() as tmpdir:
zipstream = yield from export_project(self, tmpdir, keep_compute_id=True, allow_all_nodes=True)
- with open(os.path.join(tmpdir, "project.gns3p"), "wb+") as f:
+ with open(os.path.join(tmpdir, "project.gns3p"), "wb") as f:
for data in zipstream:
f.write(data)
with open(os.path.join(tmpdir, "project.gns3p"), "rb") as f:
@@ -941,10 +943,12 @@ class Project:
raise aiohttp.web.HTTPConflict(text="Cannot duplicate node data while the node is running")
data = copy.deepcopy(node.__json__(topology_dump=True))
- # Some properties like internal ID should not be duplicate
+ # Some properties like internal ID should not be duplicated
for unique_property in (
'node_id',
'name',
+ 'mac_addr',
+ 'mac_address',
'compute_id',
'application_id',
'dynamips_id'):
diff --git a/gns3server/crash_report.py b/gns3server/crash_report.py
index f6834023..8303eb05 100644
--- a/gns3server/crash_report.py
+++ b/gns3server/crash_report.py
@@ -57,7 +57,7 @@ class CrashReport:
Report crash to a third party service
"""
- DSN = "sync+https://abb552c4f16c45c2ab75c84641100d6e:279c28ac32794198be94f0d17ad50a54@sentry.io/38482"
+ DSN = "sync+https://6b6c2ce19b8545278f7ee00c333175a6:be17229ec8da460e9a126d02b82de5dc@sentry.io/38482"
if hasattr(sys, "frozen"):
cacert = get_resource("cacert.pem")
if cacert is not None and os.path.isfile(cacert):
diff --git a/gns3server/handlers/api/compute/atm_switch_handler.py b/gns3server/handlers/api/compute/atm_switch_handler.py
index 798bb940..b57dbf42 100644
--- a/gns3server/handlers/api/compute/atm_switch_handler.py
+++ b/gns3server/handlers/api/compute/atm_switch_handler.py
@@ -188,7 +188,7 @@ class ATMSwitchHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
- description="Suspend an ATM Relay switch")
+ description="Suspend an ATM Relay switch (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
diff --git a/gns3server/handlers/api/compute/cloud_handler.py b/gns3server/handlers/api/compute/cloud_handler.py
index f8ef826c..d29a4eeb 100644
--- a/gns3server/handlers/api/compute/cloud_handler.py
+++ b/gns3server/handlers/api/compute/cloud_handler.py
@@ -168,7 +168,7 @@ class CloudHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
- description="Suspend a cloud")
+ description="Suspend a cloud (does nothing)")
def suspend(request, response):
Builtin.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
diff --git a/gns3server/handlers/api/compute/docker_handler.py b/gns3server/handlers/api/compute/docker_handler.py
index 1e0775a0..139cd241 100644
--- a/gns3server/handlers/api/compute/docker_handler.py
+++ b/gns3server/handlers/api/compute/docker_handler.py
@@ -105,6 +105,24 @@ class DockerHandler:
yield from container.stop()
response.set_status(204)
+ @Route.post(
+ r"/projects/{project_id}/docker/nodes/{node_id}/suspend",
+ parameters={
+ "project_id": "Project UUID",
+ "node_id": "Node UUID"
+ },
+ status_codes={
+ 204: "Instance suspended",
+ 400: "Invalid request",
+ 404: "Instance doesn't exist"
+ },
+ description="Suspend a Docker container")
+ def suspend(request, response):
+ docker_manager = Docker.instance()
+ container = docker_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
+ yield from container.pause()
+ response.set_status(204)
+
@Route.post(
r"/projects/{project_id}/docker/nodes/{node_id}/reload",
parameters={
diff --git a/gns3server/handlers/api/compute/ethernet_hub_handler.py b/gns3server/handlers/api/compute/ethernet_hub_handler.py
index 0bbde5f1..77369567 100644
--- a/gns3server/handlers/api/compute/ethernet_hub_handler.py
+++ b/gns3server/handlers/api/compute/ethernet_hub_handler.py
@@ -191,7 +191,7 @@ class EthernetHubHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
- description="Suspend an Ethernet hub")
+ description="Suspend an Ethernet hub (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
diff --git a/gns3server/handlers/api/compute/ethernet_switch_handler.py b/gns3server/handlers/api/compute/ethernet_switch_handler.py
index a1136145..cfde2bfa 100644
--- a/gns3server/handlers/api/compute/ethernet_switch_handler.py
+++ b/gns3server/handlers/api/compute/ethernet_switch_handler.py
@@ -204,7 +204,7 @@ class EthernetSwitchHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
- description="Suspend an Ethernet switch")
+ description="Suspend an Ethernet switch (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
diff --git a/gns3server/handlers/api/compute/frame_relay_switch_handler.py b/gns3server/handlers/api/compute/frame_relay_switch_handler.py
index 1ea45994..fac4ebc5 100644
--- a/gns3server/handlers/api/compute/frame_relay_switch_handler.py
+++ b/gns3server/handlers/api/compute/frame_relay_switch_handler.py
@@ -188,7 +188,7 @@ class FrameRelaySwitchHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
- description="Suspend a Frame Relay switch")
+ description="Suspend a Frame Relay switch (does nothing)")
def suspend(request, response):
Dynamips.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
diff --git a/gns3server/handlers/api/compute/iou_handler.py b/gns3server/handlers/api/compute/iou_handler.py
index 029e8c27..ae661c36 100644
--- a/gns3server/handlers/api/compute/iou_handler.py
+++ b/gns3server/handlers/api/compute/iou_handler.py
@@ -65,6 +65,8 @@ class IOUHandler:
for name, value in request.json.items():
if hasattr(vm, name) and getattr(vm, name) != value:
+ if name == "application_id":
+ continue # we must ignore this to avoid overwriting the application_id allocated by the IOU manager
if name == "startup_config_content" and (vm.startup_config_content and len(vm.startup_config_content) > 0):
continue
if name == "private_config_content" and (vm.private_config_content and len(vm.private_config_content) > 0):
@@ -116,6 +118,8 @@ class IOUHandler:
for name, value in request.json.items():
if hasattr(vm, name) and getattr(vm, name) != value:
+ if name == "application_id":
+ continue # we must ignore this to avoid overwriting the application_id allocated by the IOU manager
setattr(vm, name, value)
if vm.use_default_iou_values:
@@ -207,6 +211,24 @@ class IOUHandler:
yield from vm.stop()
response.set_status(204)
+ @Route.post(
+ r"/projects/{project_id}/iou/nodes/{node_id}/suspend",
+ parameters={
+ "project_id": "Project UUID",
+ "node_id": "Node UUID"
+ },
+ status_codes={
+ 204: "Instance suspended",
+ 400: "Invalid request",
+ 404: "Instance doesn't exist"
+ },
+ description="Suspend an IOU instance (does nothing)")
+ def suspend(request, response):
+
+ iou_manager = IOU.instance()
+ iou_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
+ response.set_status(204)
+
@Route.post(
r"/projects/{project_id}/iou/nodes/{node_id}/reload",
parameters={
diff --git a/gns3server/handlers/api/compute/nat_handler.py b/gns3server/handlers/api/compute/nat_handler.py
index 90ab80ea..21487c1d 100644
--- a/gns3server/handlers/api/compute/nat_handler.py
+++ b/gns3server/handlers/api/compute/nat_handler.py
@@ -166,7 +166,7 @@ class NatHandler:
400: "Invalid request",
404: "Instance doesn't exist"
},
- description="Suspend a nat")
+ description="Suspend a nat (does nothing)")
def suspend(request, response):
Builtin.instance().get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
diff --git a/gns3server/handlers/api/compute/notification_handler.py b/gns3server/handlers/api/compute/notification_handler.py
index 1347f70b..3580a286 100644
--- a/gns3server/handlers/api/compute/notification_handler.py
+++ b/gns3server/handlers/api/compute/notification_handler.py
@@ -48,7 +48,7 @@ class NotificationHandler:
with notifications.queue() as queue:
while True:
try:
- notification = yield from queue.get_json(5)
+ notification = yield from queue.get_json(1)
except asyncio.futures.CancelledError:
break
if ws.closed:
diff --git a/gns3server/handlers/api/compute/project_handler.py b/gns3server/handlers/api/compute/project_handler.py
index 73662ca7..a7d8f760 100644
--- a/gns3server/handlers/api/compute/project_handler.py
+++ b/gns3server/handlers/api/compute/project_handler.py
@@ -19,12 +19,12 @@ import aiohttp
import asyncio
import json
import os
-import psutil
import tempfile
from gns3server.web.route import Route
from gns3server.compute.project_manager import ProjectManager
from gns3server.compute import MODULES
+from gns3server.utils.ping_stats import PingStats
from gns3server.schemas.project import (
PROJECT_OBJECT_SCHEMA,
@@ -186,11 +186,7 @@ class ProjectHandler:
:returns: hash
"""
- stats = {}
- # Non blocking call in order to get cpu usage. First call will return 0
- stats["cpu_usage_percent"] = psutil.cpu_percent(interval=None)
- stats["memory_usage_percent"] = psutil.virtual_memory().percent
- return {"action": "ping", "event": stats}
+ return {"action": "ping", "event": PingStats.get()}
@Route.get(
r"/projects/{project_id}/files",
diff --git a/gns3server/handlers/api/compute/vpcs_handler.py b/gns3server/handlers/api/compute/vpcs_handler.py
index 60b84aad..6dbf2bee 100644
--- a/gns3server/handlers/api/compute/vpcs_handler.py
+++ b/gns3server/handlers/api/compute/vpcs_handler.py
@@ -184,16 +184,15 @@ class VPCSHandler:
"node_id": "Node UUID"
},
status_codes={
- 204: "Instance stopped",
+ 204: "Instance suspended",
400: "Invalid request",
404: "Instance doesn't exist"
},
- description="Suspend a VPCS instance (stop it)")
- def stop(request, response):
+ description="Suspend a VPCS instance (does nothing)")
+ def suspend(request, response):
vpcs_manager = VPCS.instance()
- vm = vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
- yield from vm.stop()
+ vpcs_manager.get_node(request.match_info["node_id"], project_id=request.match_info["project_id"])
response.set_status(204)
@Route.post(
diff --git a/gns3server/handlers/api/controller/server_handler.py b/gns3server/handlers/api/controller/server_handler.py
index e9a798df..7de4a66a 100644
--- a/gns3server/handlers/api/controller/server_handler.py
+++ b/gns3server/handlers/api/controller/server_handler.py
@@ -95,7 +95,7 @@ class ServerHandler:
})
def check_version(request, response):
if request.json["version"] != __version__:
- raise HTTPConflict(text="Client version {} differs with server version {}".format(request.json["version"], __version__))
+ raise HTTPConflict(text="Client version {} is not the same as server version {}".format(request.json["version"], __version__))
response.json({"version": __version__})
@Route.get(
diff --git a/gns3server/handlers/api/controller/symbol_handler.py b/gns3server/handlers/api/controller/symbol_handler.py
index 7122a2e3..b0c4600c 100644
--- a/gns3server/handlers/api/controller/symbol_handler.py
+++ b/gns3server/handlers/api/controller/symbol_handler.py
@@ -16,6 +16,7 @@
# along with this program. If not, see .
import os
+import aiohttp
from gns3server.web.route import Route
from gns3server.controller import Controller
@@ -62,12 +63,15 @@ class SymbolHandler:
def upload(request, response):
controller = Controller.instance()
path = os.path.join(controller.symbols.symbols_path(), os.path.basename(request.match_info["symbol_id"]))
- with open(path, 'wb+') as f:
- while True:
- packet = yield from request.content.read(512)
- if not packet:
- break
- f.write(packet)
+ try:
+ with open(path, 'wb') as f:
+ while True:
+ packet = yield from request.content.read(512)
+ if not packet:
+ break
+ f.write(packet)
+ except OSError as e:
+ raise aiohttp.web.HTTPConflict(text="Could not write symbol file '{}': {}".format(path, e))
# Reset the symbol list
controller.symbols.list()
response.set_status(204)
diff --git a/gns3server/notification_queue.py b/gns3server/notification_queue.py
index c52f8968..2412d5cb 100644
--- a/gns3server/notification_queue.py
+++ b/gns3server/notification_queue.py
@@ -16,9 +16,9 @@
# along with this program. If not, see .
import asyncio
-import psutil
import json
-import psutil
+
+from gns3server.utils.ping_stats import PingStats
class NotificationQueue(asyncio.Queue):
@@ -33,30 +33,20 @@ class NotificationQueue(asyncio.Queue):
@asyncio.coroutine
def get(self, timeout):
"""
- When timeout is expire we send a ping notification with server informations
+ When timeout is expire we send a ping notification with server information
"""
- # At first get we return a ping so the client receive immediately data
+ # At first get we return a ping so the client immediately receives data
if self._first:
self._first = False
- return ("ping", self._getPing(), {})
+ return ("ping", PingStats.get(), {})
try:
(action, msg, kwargs) = yield from asyncio.wait_for(super().get(), timeout)
except asyncio.futures.TimeoutError:
- return ("ping", self._getPing(), {})
+ return ("ping", PingStats.get(), {})
return (action, msg, kwargs)
- def _getPing(self):
- """
- Return the content of the ping notification
- """
- msg = {}
- # Non blocking call in order to get cpu usage. First call will return 0
- msg["cpu_usage_percent"] = psutil.cpu_percent(interval=None)
- msg["memory_usage_percent"] = psutil.virtual_memory().percent
- return msg
-
@asyncio.coroutine
def get_json(self, timeout):
"""
diff --git a/gns3server/run.py b/gns3server/run.py
index 23a74827..0d23b124 100644
--- a/gns3server/run.py
+++ b/gns3server/run.py
@@ -160,7 +160,7 @@ def pid_lock(path):
with open(path) as f:
try:
pid = int(f.read())
- os.kill(pid, 0) # If the proces is not running kill return an error
+ os.kill(pid, 0) # kill returns an error if the process is not running
except (OSError, SystemError, ValueError):
pid = None
except OSError as e:
diff --git a/gns3server/ubridge/hypervisor.py b/gns3server/ubridge/hypervisor.py
index 264cf679..660c930a 100644
--- a/gns3server/ubridge/hypervisor.py
+++ b/gns3server/ubridge/hypervisor.py
@@ -138,8 +138,14 @@ class Hypervisor(UBridgeHypervisor):
match = re.search("ubridge version ([0-9a-z\.]+)", output)
if match:
self._version = match.group(1)
- if parse_version(self._version) < parse_version("0.9.12"):
- raise UbridgeError("uBridge executable version must be >= 0.9.12")
+ if sys.platform.startswith("win") or sys.platform.startswith("darwin"):
+ minimum_required_version = "0.9.12"
+ else:
+ # uBridge version 0.9.14 is required for packet filters
+ # to work for IOU nodes.
+ minimum_required_version = "0.9.14"
+ if parse_version(self._version) < parse_version(minimum_required_version):
+ raise UbridgeError("uBridge executable version must be >= {}".format(minimum_required_version))
else:
raise UbridgeError("Could not determine uBridge version for {}".format(self._path))
except (OSError, subprocess.SubprocessError) as e:
diff --git a/gns3server/ubridge/ubridge_hypervisor.py b/gns3server/ubridge/ubridge_hypervisor.py
index 6299d22c..6f70151e 100644
--- a/gns3server/ubridge/ubridge_hypervisor.py
+++ b/gns3server/ubridge/ubridge_hypervisor.py
@@ -214,6 +214,8 @@ class UBridgeHypervisor:
# Now retrieve the result
data = []
buf = ''
+ retries = 0
+ max_retries = 10
while True:
try:
try:
@@ -222,9 +224,21 @@ class UBridgeHypervisor:
# task has been canceled but continue to read
# any remaining data sent by the hypervisor
continue
+ except ConnectionResetError as e:
+ # Sometimes WinError 64 (ERROR_NETNAME_DELETED) is returned here on Windows.
+ # These happen if connection reset is received before IOCP could complete
+ # a previous operation. Ignore and try again....
+ log.warning("Connection reset received while reading uBridge response: {}".format(e))
+ continue
if not chunk:
- raise UbridgeError("No data returned from {host}:{port}, uBridge process running: {run}"
- .format(host=self._host, port=self._port, run=self.is_running()))
+ if retries > max_retries:
+ raise UbridgeError("No data returned from {host}:{port}, uBridge process running: {run}"
+ .format(host=self._host, port=self._port, run=self.is_running()))
+ else:
+ retries += 1
+ yield from asyncio.sleep(0.1)
+ continue
+ retries = 0
buf += chunk.decode("utf-8")
except OSError as e:
raise UbridgeError("Lost communication with {host}:{port} :{error}, uBridge process running: {run}"
diff --git a/gns3server/utils/ping_stats.py b/gns3server/utils/ping_stats.py
new file mode 100644
index 00000000..3252f9c2
--- /dev/null
+++ b/gns3server/utils/ping_stats.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2018 GNS3 Technologies Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see .
+
+import psutil
+import time
+
+
+class PingStats:
+ """
+ Ping messages are regularly sent to the client to keep the connection open.
+ We send with it some information about server load.
+ """
+
+ _last_measurement = 0.0 # time of last measurement
+ _last_cpu_percent = 0.0 # last cpu_percent
+ _last_mem_percent = 0.0 # last virtual_memory().percent
+
+ @classmethod
+ def get(cls):
+ """
+ Get ping statistics
+
+ :returns: hash
+ """
+ stats = {}
+ cur_time = time.time()
+ # minimum interval for getting CPU and memory statistics
+ if cur_time < cls._last_measurement or \
+ cur_time > cls._last_measurement + 1.9:
+ cls._last_measurement = cur_time
+ # Non blocking call to get cpu usage. First call will return 0
+ cls._last_cpu_percent = psutil.cpu_percent(interval=None)
+ cls._last_mem_percent = psutil.virtual_memory().percent
+ stats["cpu_usage_percent"] = cls._last_cpu_percent
+ stats["memory_usage_percent"] = cls._last_mem_percent
+ return stats
diff --git a/gns3server/version.py b/gns3server/version.py
index fedd2653..a692430b 100644
--- a/gns3server/version.py
+++ b/gns3server/version.py
@@ -23,8 +23,8 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
-__version__ = "2.1.2"
-__version_info__ = (2, 1, 2, 0)
+__version__ = "2.1.4"
+__version_info__ = (2, 1, 4, 0)
# If it's a git checkout try to add the commit
if "dev" in __version__:
diff --git a/requirements.txt b/requirements.txt
index b81fe029..d1269047 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,11 +1,10 @@
jsonschema>=2.4.0
aiohttp>=2.2.0,<2.4.0 # pyup: ignore
aiohttp-cors>=0.5.3,<0.6.0 # pyup: ignore
-yarl>=0.11,<0.12 # pyup: ignore
+yarl>=0.11
Jinja2>=2.7.3
raven>=5.23.0
psutil>=3.0.0
zipstream>=1.1.4
-typing>=3.5.3.0 # Otherwise yarl fails with python 3.4
-multidict<3.2.0 # Otherwise fails when upgraded to v3.2.0
-prompt-toolkit
+typing>=3.5.3.0;python_version<"3.5" # Otherwise yarl fails with python 3.4
+prompt-toolkit==1.0.15
diff --git a/scripts/remote-install.sh b/scripts/remote-install.sh
index 259512e5..6937141e 100644
--- a/scripts/remote-install.sh
+++ b/scripts/remote-install.sh
@@ -25,7 +25,7 @@ function help {
echo "Usage:" >&2
echo "--with-openvpn: Install Open VPN" >&2
echo "--with-iou: Install IOU" >&2
- echo "--with-i386-repository: Add i386 repositories require by IOU if they are not available on the system. Warning this will replace your source.list in order to use official ubuntu mirror" >&2
+ echo "--with-i386-repository: Add the i386 repositories required by IOU if they are not already available on the system. Warning: this will replace your source.list in order to use the official Ubuntu mirror" >&2
echo "--unstable: Use the GNS3 unstable repository"
echo "--help: This help" >&2
}
@@ -37,7 +37,7 @@ function log {
lsb_release -d | grep "LTS" > /dev/null
if [ $? != 0 ]
then
- echo "You can use this script on Ubuntu LTS only"
+ echo "This script can only be run on a Linux Ubuntu LTS release"
exit 1
fi
@@ -310,7 +310,7 @@ apt-get install -y \
dnsutils \
nginx-light
-MY_IP_ADDR=$(dig @ns1.google.com -t txt o-o.myaddr.l.google.com +short | sed 's/"//g')
+MY_IP_ADDR=$(dig @ns1.google.com -t txt o-o.myaddr.l.google.com +short -4 | sed 's/"//g')
log "IP detected: $MY_IP_ADDR"
diff --git a/tests/compute/docker/test_docker_vm.py b/tests/compute/docker/test_docker_vm.py
index f5f8f7e0..d48fcc98 100644
--- a/tests/compute/docker/test_docker_vm.py
+++ b/tests/compute/docker/test_docker_vm.py
@@ -257,35 +257,37 @@ def test_create_environment(loop, project, manager):
vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu")
vm.environment = "YES=1\nNO=0\nGNS3_MAX_ETHERNET=eth2"
loop.run_until_complete(asyncio.async(vm.create()))
- mock.assert_called_with("POST", "containers/create", data={
- "Tty": True,
- "OpenStdin": True,
- "StdinOnce": False,
- "HostConfig":
- {
- "CapAdd": ["ALL"],
- "Binds": [
- "{}:/gns3:ro".format(get_resource("compute/docker/resources")),
- "{}:/gns3volumes/etc/network:rw".format(os.path.join(vm.working_dir, "etc", "network"))
- ],
- "Privileged": True
- },
- "Env": [
- "container=docker",
- "GNS3_MAX_ETHERNET=eth0",
- "GNS3_VOLUMES=/etc/network",
- "YES=1",
- "NO=0"
- ],
- "Volumes": {},
- "NetworkDisabled": True,
- "Name": "test",
- "Hostname": "test",
- "Image": "ubuntu:latest",
- "Entrypoint": ["/gns3/init.sh"],
- "Cmd": ["/bin/sh"]
- })
- assert vm._cid == "e90e34656806"
+ assert mock.call_args[1]['data']['Env'] == [
+ "container=docker",
+ "GNS3_MAX_ETHERNET=eth0",
+ "GNS3_VOLUMES=/etc/network",
+ "YES=1",
+ "NO=0"
+ ]
+
+
+def test_create_environment_with_last_new_line_character(loop, project, manager):
+ """
+ Allow user to pass an environnement. User can't override our
+ internal variables
+ """
+
+ response = {
+ "Id": "e90e34656806",
+ "Warnings": []
+ }
+ with asyncio_patch("gns3server.compute.docker.Docker.list_images", return_value=[{"image": "ubuntu"}]):
+ with asyncio_patch("gns3server.compute.docker.Docker.query", return_value=response) as mock:
+ vm = DockerVM("test", str(uuid.uuid4()), project, manager, "ubuntu")
+ vm.environment = "YES=1\nNO=0\nGNS3_MAX_ETHERNET=eth2\n"
+ loop.run_until_complete(asyncio.async(vm.create()))
+ assert mock.call_args[1]['data']['Env'] == [
+ "container=docker",
+ "GNS3_MAX_ETHERNET=eth0",
+ "GNS3_VOLUMES=/etc/network",
+ "YES=1",
+ "NO=0"
+ ]
def test_create_image_not_available(loop, project, manager):
diff --git a/tests/compute/dynamips/test_ethernet_switch.py b/tests/compute/dynamips/test_ethernet_switch.py
index 6089a634..a21e217c 100644
--- a/tests/compute/dynamips/test_ethernet_switch.py
+++ b/tests/compute/dynamips/test_ethernet_switch.py
@@ -20,7 +20,7 @@ from gns3server.compute.dynamips.nodes.ethernet_switch import EthernetSwitchCons
from gns3server.compute.nios.nio_udp import NIOUDP
-def test_arp_command(async_run):
+def test_mac_command(async_run):
node = AsyncioMagicMock()
node.name = "Test"
node.nios = {}
@@ -30,7 +30,7 @@ def test_arp_command(async_run):
node.nios[1].name = "Ethernet1"
node._hypervisor.send = AsyncioMagicMock(return_value=["0050.7966.6801 1 Ethernet0", "0050.7966.6802 1 Ethernet1"])
console = EthernetSwitchConsole(node)
- assert async_run(console.arp()) == \
+ assert async_run(console.mac()) == \
"Port Mac VLAN\n" \
"Ethernet0 00:50:79:66:68:01 1\n" \
"Ethernet1 00:50:79:66:68:02 1\n"
diff --git a/tests/compute/iou/test_iou_manager.py b/tests/compute/iou/test_iou_manager.py
index 114c86e4..4a3475ea 100644
--- a/tests/compute/iou/test_iou_manager.py
+++ b/tests/compute/iou/test_iou_manager.py
@@ -19,7 +19,6 @@
import pytest
from unittest.mock import patch
import uuid
-import os
import sys
pytestmark = pytest.mark.skipif(sys.platform.startswith("win"), reason="Not supported on Windows")
@@ -40,18 +39,17 @@ def iou(port_manager):
return iou
-def test_get_application_id(loop, project, iou):
+def test_application_id(loop, project, iou):
vm1_id = str(uuid.uuid4())
vm2_id = str(uuid.uuid4())
vm3_id = str(uuid.uuid4())
- loop.run_until_complete(iou.create_node("PC 1", project.id, vm1_id))
- loop.run_until_complete(iou.create_node("PC 2", project.id, vm2_id))
- assert iou.get_application_id(vm1_id) == 1
- assert iou.get_application_id(vm1_id) == 1
- assert iou.get_application_id(vm2_id) == 2
+ vm1 = loop.run_until_complete(iou.create_node("PC 1", project.id, vm1_id))
+ vm2 = loop.run_until_complete(iou.create_node("PC 2", project.id, vm2_id))
+ assert vm1.application_id == 1
+ assert vm2.application_id == 2
loop.run_until_complete(iou.delete_node(vm1_id))
- loop.run_until_complete(iou.create_node("PC 3", project.id, vm3_id))
- assert iou.get_application_id(vm3_id) == 1
+ vm3 = loop.run_until_complete(iou.create_node("PC 3", project.id, vm3_id))
+ assert vm3.application_id == 1
def test_get_application_id_multiple_project(loop, iou):
@@ -60,20 +58,20 @@ def test_get_application_id_multiple_project(loop, iou):
vm3_id = str(uuid.uuid4())
project1 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
project2 = ProjectManager.instance().create_project(project_id=str(uuid.uuid4()))
- loop.run_until_complete(iou.create_node("PC 1", project1.id, vm1_id))
- loop.run_until_complete(iou.create_node("PC 2", project1.id, vm2_id))
- loop.run_until_complete(iou.create_node("PC 2", project2.id, vm3_id))
- assert iou.get_application_id(vm1_id) == 1
- assert iou.get_application_id(vm2_id) == 2
- assert iou.get_application_id(vm3_id) == 3
+ vm1 = loop.run_until_complete(iou.create_node("PC 1", project1.id, vm1_id))
+ vm2 = loop.run_until_complete(iou.create_node("PC 2", project1.id, vm2_id))
+ vm3 = loop.run_until_complete(iou.create_node("PC 2", project2.id, vm3_id))
+ assert vm1.application_id == 1
+ assert vm2.application_id == 2
+ assert vm3.application_id == 3
def test_get_application_id_no_id_available(loop, project, iou):
with pytest.raises(IOUError):
for i in range(1, 513):
node_id = str(uuid.uuid4())
- loop.run_until_complete(iou.create_node("PC {}".format(i), project.id, node_id))
- assert iou.get_application_id(node_id) == i
+ vm = loop.run_until_complete(iou.create_node("PC {}".format(i), project.id, node_id))
+ assert vm.application_id == i
def test_get_images_directory(iou, tmpdir):
diff --git a/tests/compute/iou/test_iou_vm.py b/tests/compute/iou/test_iou_vm.py
index 8f899f7c..24cf8bb7 100644
--- a/tests/compute/iou/test_iou_vm.py
+++ b/tests/compute/iou/test_iou_vm.py
@@ -48,7 +48,7 @@ def manager(port_manager):
@pytest.fixture(scope="function")
def vm(project, manager, tmpdir, fake_iou_bin, iourc_file):
- vm = IOUVM("test", str(uuid.uuid4()), project, manager)
+ vm = IOUVM("test", str(uuid.uuid4()), project, manager, application_id=1)
config = manager.config.get_section_config("IOU")
config["iourc_path"] = iourc_file
manager.config.set_section_config("IOU", config)
@@ -84,7 +84,7 @@ def test_vm(project, manager):
def test_vm_startup_config_content(project, manager):
- vm = IOUVM("test", "00010203-0405-0607-0808-0a0b0c0d0e0f", project, manager)
+ vm = IOUVM("test", "00010203-0405-0607-0808-0a0b0c0d0e0f", project, manager, application_id=1)
vm.startup_config_content = "hostname %h"
assert vm.name == "test"
assert vm.startup_config_content == "hostname test"
@@ -94,7 +94,6 @@ def test_vm_startup_config_content(project, manager):
def test_start(loop, vm):
mock_process = MagicMock()
-
vm._check_requirements = AsyncioMagicMock(return_value=True)
vm._check_iou_licence = AsyncioMagicMock(return_value=True)
vm._start_ubridge = AsyncioMagicMock(return_value=True)
@@ -440,7 +439,7 @@ def test_application_id(project, manager):
"""
Checks if uses local manager to get application_id when not set
"""
- vm = IOUVM("test", str(uuid.uuid4()), project, manager)
+ vm = IOUVM("test", str(uuid.uuid4()), project, manager, application_id=1)
assert vm.application_id == 1
vm.application_id = 3
diff --git a/tests/compute/iou/utils/test_application_id.py b/tests/compute/iou/utils/test_application_id.py
deleted file mode 100644
index e7302c5c..00000000
--- a/tests/compute/iou/utils/test_application_id.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Copyright (C) 2017 GNS3 Technologies Inc.
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see .
-
-import pytest
-from unittest.mock import MagicMock
-from gns3server.compute.iou.utils.application_id import get_next_application_id, IOUError
-
-
-def test_get_next_application_id():
- # test first node
- assert get_next_application_id([]) == 1
-
- # test second node
- nodes = [
- MagicMock(node_type='different'),
- MagicMock(node_type='iou', properties=dict(application_id=1))
- ]
- assert get_next_application_id(nodes) == 2
-
- # test reach out the limit
- nodes = [MagicMock(node_type='iou', properties=dict(application_id=i)) for i in range(1, 512)]
-
- with pytest.raises(IOUError):
- get_next_application_id(nodes)
diff --git a/tests/controller/test_controller.py b/tests/controller/test_controller.py
index 4b39a830..5c984593 100644
--- a/tests/controller/test_controller.py
+++ b/tests/controller/test_controller.py
@@ -510,7 +510,9 @@ def test_load_appliances(controller):
"Qemu": {
"vms": [
{
- "name": "Test"
+ "name": "Test",
+ "node_type": "qemu",
+ "category": "router"
}
]
}
@@ -538,6 +540,52 @@ def test_load_appliances(controller):
assert cloud_uuid == appliance.id
+def test_load_appliances_deprecated_features_default_symbol(controller):
+ controller._settings = {
+ "Qemu": {
+ "vms": [
+ {
+ "name": "Test",
+ "node_type": "qemu",
+ "category": "router",
+ "default_symbol": ":/symbols/iosv_virl.normal.svg",
+ "hover_symbol": ":/symbols/iosv_virl.selected.svg",
+ }
+ ]
+ }
+ }
+ controller.load_appliances()
+ appliances = dict([(a.name, a) for a in controller.appliances.values()])
+
+ assert appliances["Test"].__json__()["symbol"] == ":/symbols/computer.svg"
+ assert "default_symbol" not in appliances["Test"].data.keys()
+ assert "hover_symbol" not in appliances["Test"].data.keys()
+
+
+def test_load_appliances_deprecated_features_default_symbol_with_symbol(controller):
+ controller._settings = {
+ "Qemu": {
+ "vms": [
+ {
+ "name": "Test",
+ "node_type": "qemu",
+ "category": "router",
+ "default_symbol": ":/symbols/iosv_virl.normal.svg",
+ "hover_symbol": ":/symbols/iosv_virl.selected.svg",
+ "symbol": ":/symbols/my-symbol.svg"
+
+ }
+ ]
+ }
+ }
+ controller.load_appliances()
+ appliances = dict([(a.name, a) for a in controller.appliances.values()])
+
+ assert appliances["Test"].__json__()["symbol"] == ":/symbols/my-symbol.svg"
+ assert "default_symbol" not in appliances["Test"].data.keys()
+ assert "hover_symbol" not in appliances["Test"].data.keys()
+
+
def test_autoidlepc(controller, async_run):
controller._computes["local"] = AsyncioMagicMock()
node_mock = AsyncioMagicMock()
diff --git a/tests/controller/test_export_project.py b/tests/controller/test_export_project.py
index 82f3d422..c1582dc0 100644
--- a/tests/controller/test_export_project.py
+++ b/tests/controller/test_export_project.py
@@ -22,6 +22,7 @@ import pytest
import aiohttp
import zipfile
+from pathlib import Path
from unittest.mock import patch
from unittest.mock import MagicMock
from tests.utils import AsyncioMagicMock, AsyncioBytesIO
@@ -417,3 +418,42 @@ def test_export_images_from_vm(tmpdir, project, async_run, controller):
with myzip.open("images/dynamips/test.image") as myfile:
content = myfile.read()
assert content == b"IMAGE"
+
+
+def test_export_with_ignoring_snapshots(tmpdir, project, async_run):
+ with open(os.path.join(project.path, "test.gns3"), 'w+') as f:
+ data = {
+ "topology": {
+ "computes": [
+ {
+ "compute_id": "6b7149c8-7d6e-4ca0-ab6b-daa8ab567be0",
+ "host": "127.0.0.1",
+ "name": "Remote 1",
+ "port": 8001,
+ "protocol": "http"
+ }
+ ],
+ "nodes": [
+ {
+ "compute_id": "6b7149c8-7d6e-4ca0-ab6b-daa8ab567be0",
+ "node_type": "vpcs"
+ }
+ ]
+ }
+ }
+ json.dump(data, f)
+
+ # create snapshot directory
+ snapshots_dir = os.path.join(project.path, 'snapshots')
+ os.makedirs(snapshots_dir)
+ Path(os.path.join(snapshots_dir, 'snap.gns3project')).touch()
+
+ z = async_run(export_project(project, str(tmpdir), keep_compute_id=True))
+
+ with open(str(tmpdir / 'zipfile.zip'), 'wb') as f:
+ for data in z:
+ f.write(data)
+
+ with zipfile.ZipFile(str(tmpdir / 'zipfile.zip')) as myzip:
+ assert not os.path.join('snapshots', 'snap.gns3project') in [f.filename for f in myzip.filelist]
+
diff --git a/tests/controller/test_node.py b/tests/controller/test_node.py
index dd51a6ac..7a860850 100644
--- a/tests/controller/test_node.py
+++ b/tests/controller/test_node.py
@@ -517,8 +517,8 @@ def test_get_port(node):
assert port.port_number == 0
port = node.get_port(1, 0)
assert port.adapter_number == 1
- with pytest.raises(aiohttp.web.HTTPNotFound):
- port = node.get_port(42, 0)
+ port = node.get_port(42, 0)
+ assert port is None
def test_parse_node_response(node, async_run):
diff --git a/tests/controller/test_project.py b/tests/controller/test_project.py
index af8b3079..deb61b16 100644
--- a/tests/controller/test_project.py
+++ b/tests/controller/test_project.py
@@ -605,27 +605,6 @@ def test_node_name(project, async_run):
assert node.name == "R3"
-def test_add_iou_node_and_check_if_gets_application_id(project, async_run):
- compute = MagicMock()
- compute.id = "local"
- response = MagicMock()
- response.json = {"console": 2048}
- compute.post = AsyncioMagicMock(return_value=response)
-
- # tests if get_next_application_id is called
- with patch('gns3server.controller.project.get_next_application_id', return_value=222) as mocked_get_app_id:
- node = async_run(project.add_node(
- compute, "test", None, node_type="iou", properties={"startup_config": "test.cfg"}))
- assert mocked_get_app_id.called
- assert node.properties['application_id'] == 222
-
- # tests if we can send property and it will be used
- node = async_run(project.add_node(
- compute, "test", None, node_type="iou", application_id=333, properties={"startup_config": "test.cfg"}))
- assert mocked_get_app_id.called
- assert node.properties['application_id'] == 333
-
-
def test_duplicate_node(project, async_run):
compute = MagicMock()
compute.id = "local"
diff --git a/tests/handlers/api/controller/test_version.py b/tests/handlers/api/controller/test_version.py
index a763ce00..fafda198 100644
--- a/tests/handlers/api/controller/test_version.py
+++ b/tests/handlers/api/controller/test_version.py
@@ -45,7 +45,8 @@ def test_version_invalid_input(http_controller):
query = {'version': "0.4.2"}
response = http_controller.post('/version', query)
assert response.status == 409
- assert response.json == {'message': 'Client version 0.4.2 differs with server version {}'.format(__version__),
+
+ assert response.json == {'message': 'Client version 0.4.2 is not the same as server version {}'.format(__version__),
'status': 409}
diff --git a/win-requirements.txt b/win-requirements.txt
index 5905e970..017a1d07 100644
--- a/win-requirements.txt
+++ b/win-requirements.txt
@@ -1,3 +1,3 @@
-rrequirements.txt
-pypiwin32 # pyup: ignore
+pywin32>=223 # pyup: ignore