2016-03-11 19:13:52 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Copyright (C) 2016 GNS3 Technologies Inc.
|
|
|
|
#
|
|
|
|
# This program is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# This program is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
import asyncio
|
2016-04-21 10:14:09 +00:00
|
|
|
import aiohttp
|
2016-03-11 19:13:52 +00:00
|
|
|
|
|
|
|
|
|
|
|
from .link import Link
|
|
|
|
|
|
|
|
|
|
|
|
class UDPLink(Link):
|
2016-03-14 16:40:27 +00:00
|
|
|
|
2016-06-14 14:57:13 +00:00
|
|
|
def __init__(self, project, link_id=None):
|
|
|
|
super().__init__(project, link_id=link_id)
|
2016-05-11 17:35:36 +00:00
|
|
|
self._capture_node = None
|
2016-09-15 17:08:57 +00:00
|
|
|
self._created = False
|
2016-11-10 13:18:24 +00:00
|
|
|
self._link_data = []
|
|
|
|
|
|
|
|
@property
|
|
|
|
def debug_link_data(self):
|
|
|
|
"""
|
2016-11-25 14:11:31 +00:00
|
|
|
Use for the debug exports
|
2016-11-10 13:18:24 +00:00
|
|
|
"""
|
|
|
|
return self._link_data
|
2016-03-14 16:40:27 +00:00
|
|
|
|
2016-03-11 19:13:52 +00:00
|
|
|
@asyncio.coroutine
|
|
|
|
def create(self):
|
|
|
|
"""
|
2016-05-11 17:35:36 +00:00
|
|
|
Create the link on the nodes
|
2016-03-11 19:13:52 +00:00
|
|
|
"""
|
2016-03-14 16:40:27 +00:00
|
|
|
|
2016-05-11 17:35:36 +00:00
|
|
|
node1 = self._nodes[0]["node"]
|
|
|
|
adapter_number1 = self._nodes[0]["adapter_number"]
|
|
|
|
port_number1 = self._nodes[0]["port_number"]
|
|
|
|
node2 = self._nodes[1]["node"]
|
|
|
|
adapter_number2 = self._nodes[1]["adapter_number"]
|
|
|
|
port_number2 = self._nodes[1]["port_number"]
|
2016-03-11 19:13:52 +00:00
|
|
|
|
2016-08-25 17:14:29 +00:00
|
|
|
# Get an IP allowing communication between both host
|
|
|
|
try:
|
|
|
|
(node1_host, node2_host) = yield from node1.compute.get_ip_on_same_subnet(node2.compute)
|
|
|
|
except ValueError as e:
|
|
|
|
raise aiohttp.web.HTTPConflict(text=str(e))
|
|
|
|
|
|
|
|
# Reserve a UDP port on both side
|
2016-05-11 17:35:36 +00:00
|
|
|
response = yield from node1.compute.post("/projects/{}/ports/udp".format(self._project.id))
|
|
|
|
self._node1_port = response.json["udp_port"]
|
|
|
|
response = yield from node2.compute.post("/projects/{}/ports/udp".format(self._project.id))
|
|
|
|
self._node2_port = response.json["udp_port"]
|
2016-03-11 19:13:52 +00:00
|
|
|
|
2017-06-30 08:22:30 +00:00
|
|
|
node1_filters = {}
|
|
|
|
node2_filters = {}
|
|
|
|
filter_node = self._get_filter_node()
|
|
|
|
if filter_node == node1:
|
2017-07-19 15:30:25 +00:00
|
|
|
node1_filters = self.get_active_filters()
|
2017-06-30 08:22:30 +00:00
|
|
|
elif filter_node == node2:
|
2017-07-19 15:30:25 +00:00
|
|
|
node2_filters = self.get_active_filters()
|
2017-06-30 08:22:30 +00:00
|
|
|
|
2016-03-11 19:13:52 +00:00
|
|
|
# Create the tunnel on both side
|
2016-11-10 13:18:24 +00:00
|
|
|
self._link_data.append({
|
2016-05-11 17:35:36 +00:00
|
|
|
"lport": self._node1_port,
|
2016-08-25 17:14:29 +00:00
|
|
|
"rhost": node2_host,
|
2016-05-11 17:35:36 +00:00
|
|
|
"rport": self._node2_port,
|
2017-06-30 08:22:30 +00:00
|
|
|
"type": "nio_udp",
|
2018-03-19 09:26:12 +00:00
|
|
|
"filters": node1_filters,
|
|
|
|
"suspend": self._suspended
|
2016-11-10 13:18:24 +00:00
|
|
|
})
|
|
|
|
yield from node1.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120)
|
2016-03-11 19:13:52 +00:00
|
|
|
|
2016-11-10 13:18:24 +00:00
|
|
|
self._link_data.append({
|
2016-05-11 17:35:36 +00:00
|
|
|
"lport": self._node2_port,
|
2016-08-25 17:14:29 +00:00
|
|
|
"rhost": node1_host,
|
2016-05-11 17:35:36 +00:00
|
|
|
"rport": self._node1_port,
|
2017-06-30 08:22:30 +00:00
|
|
|
"type": "nio_udp",
|
2018-03-19 09:26:12 +00:00
|
|
|
"filters": node2_filters,
|
|
|
|
"suspend": self._suspended
|
2016-11-10 13:18:24 +00:00
|
|
|
})
|
2016-10-27 17:09:27 +00:00
|
|
|
try:
|
2016-11-10 13:18:24 +00:00
|
|
|
yield from node2.post("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=120)
|
2016-10-27 17:09:27 +00:00
|
|
|
except Exception as e:
|
|
|
|
# We clean the first NIO
|
2016-11-08 08:54:59 +00:00
|
|
|
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
|
2016-10-27 17:09:27 +00:00
|
|
|
raise e
|
2016-09-15 17:08:57 +00:00
|
|
|
self._created = True
|
2016-03-14 16:40:27 +00:00
|
|
|
|
2017-06-30 08:22:30 +00:00
|
|
|
@asyncio.coroutine
|
|
|
|
def update(self):
|
2018-03-19 09:26:12 +00:00
|
|
|
"""
|
|
|
|
Update the link on the nodes
|
|
|
|
"""
|
|
|
|
|
2017-06-30 08:22:30 +00:00
|
|
|
if len(self._link_data) == 0:
|
|
|
|
return
|
|
|
|
node1 = self._nodes[0]["node"]
|
|
|
|
node2 = self._nodes[1]["node"]
|
2018-03-19 09:26:12 +00:00
|
|
|
|
|
|
|
node1_filters = {}
|
|
|
|
node2_filters = {}
|
2017-06-30 08:22:30 +00:00
|
|
|
filter_node = self._get_filter_node()
|
2018-03-19 09:26:12 +00:00
|
|
|
if filter_node == node1:
|
|
|
|
node1_filters = self.get_active_filters()
|
|
|
|
elif filter_node == node2:
|
|
|
|
node2_filters = self.get_active_filters()
|
2017-06-30 08:22:30 +00:00
|
|
|
|
2018-03-19 09:26:12 +00:00
|
|
|
adapter_number1 = self._nodes[0]["adapter_number"]
|
|
|
|
port_number1 = self._nodes[0]["port_number"]
|
|
|
|
self._link_data[0]["filters"] = node1_filters
|
|
|
|
self._link_data[0]["suspend"] = self._suspended
|
|
|
|
yield from node1.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), data=self._link_data[0], timeout=120)
|
|
|
|
|
|
|
|
adapter_number2 = self._nodes[1]["adapter_number"]
|
|
|
|
port_number2 = self._nodes[1]["port_number"]
|
|
|
|
self._link_data[1]["filters"] = node2_filters
|
|
|
|
self._link_data[1]["suspend"] = self._suspended
|
|
|
|
yield from node2.put("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), data=self._link_data[1], timeout=221)
|
2017-06-30 08:22:30 +00:00
|
|
|
|
2016-03-14 16:40:27 +00:00
|
|
|
@asyncio.coroutine
|
|
|
|
def delete(self):
|
|
|
|
"""
|
2016-05-11 17:35:36 +00:00
|
|
|
Delete the link and free the resources
|
2016-03-14 16:40:27 +00:00
|
|
|
"""
|
2016-10-27 17:09:27 +00:00
|
|
|
if not self._created:
|
|
|
|
return
|
2016-05-18 16:37:18 +00:00
|
|
|
try:
|
|
|
|
node1 = self._nodes[0]["node"]
|
|
|
|
adapter_number1 = self._nodes[0]["adapter_number"]
|
|
|
|
port_number1 = self._nodes[0]["port_number"]
|
|
|
|
except IndexError:
|
|
|
|
return
|
2016-09-05 17:00:42 +00:00
|
|
|
try:
|
2016-11-08 08:54:59 +00:00
|
|
|
yield from node1.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number1, port_number=port_number1), timeout=120)
|
2016-09-05 17:00:42 +00:00
|
|
|
# If the node is already delete (user selected multiple element and delete all in the same time)
|
|
|
|
except aiohttp.web.HTTPNotFound:
|
|
|
|
pass
|
2016-05-18 16:37:18 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
node2 = self._nodes[1]["node"]
|
|
|
|
adapter_number2 = self._nodes[1]["adapter_number"]
|
|
|
|
port_number2 = self._nodes[1]["port_number"]
|
|
|
|
except IndexError:
|
|
|
|
return
|
2016-09-05 17:00:42 +00:00
|
|
|
try:
|
2016-11-08 08:54:59 +00:00
|
|
|
yield from node2.delete("/adapters/{adapter_number}/ports/{port_number}/nio".format(adapter_number=adapter_number2, port_number=port_number2), timeout=120)
|
2016-09-05 17:00:42 +00:00
|
|
|
# If the node is already delete (user selected multiple element and delete all in the same time)
|
|
|
|
except aiohttp.web.HTTPNotFound:
|
|
|
|
pass
|
2016-11-22 15:05:00 +00:00
|
|
|
yield from super().delete()
|
2016-04-21 10:14:09 +00:00
|
|
|
|
|
|
|
@asyncio.coroutine
|
2016-04-26 15:10:33 +00:00
|
|
|
def start_capture(self, data_link_type="DLT_EN10MB", capture_file_name=None):
|
2016-04-21 10:14:09 +00:00
|
|
|
"""
|
|
|
|
Start capture on a link
|
|
|
|
"""
|
2016-04-26 15:10:33 +00:00
|
|
|
if not capture_file_name:
|
|
|
|
capture_file_name = self.default_capture_file_name()
|
2016-05-11 17:35:36 +00:00
|
|
|
self._capture_node = self._choose_capture_side()
|
2016-04-21 10:14:09 +00:00
|
|
|
data = {
|
2016-04-26 15:10:33 +00:00
|
|
|
"capture_file_name": capture_file_name,
|
2016-04-21 11:49:29 +00:00
|
|
|
"data_link_type": data_link_type
|
2016-04-21 10:14:09 +00:00
|
|
|
}
|
2016-05-11 17:35:36 +00:00
|
|
|
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/start_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]), data=data)
|
2016-04-26 15:10:33 +00:00
|
|
|
yield from super().start_capture(data_link_type=data_link_type, capture_file_name=capture_file_name)
|
2016-04-21 10:14:09 +00:00
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def stop_capture(self):
|
|
|
|
"""
|
|
|
|
Stop capture on a link
|
|
|
|
"""
|
2016-05-11 17:35:36 +00:00
|
|
|
if self._capture_node:
|
|
|
|
yield from self._capture_node["node"].post("/adapters/{adapter_number}/ports/{port_number}/stop_capture".format(adapter_number=self._capture_node["adapter_number"], port_number=self._capture_node["port_number"]))
|
|
|
|
self._capture_node = None
|
2016-04-26 15:10:33 +00:00
|
|
|
yield from super().stop_capture()
|
2016-04-21 10:14:09 +00:00
|
|
|
|
|
|
|
def _choose_capture_side(self):
|
|
|
|
"""
|
|
|
|
Run capture on the best candidate.
|
|
|
|
|
2016-11-22 15:05:00 +00:00
|
|
|
The ideal candidate is a node who on controller server and always
|
|
|
|
running (capture will not be cut off)
|
2016-04-21 10:14:09 +00:00
|
|
|
|
2016-05-11 17:35:36 +00:00
|
|
|
:returns: Node where the capture should run
|
2016-04-21 10:14:09 +00:00
|
|
|
"""
|
|
|
|
|
2016-11-22 15:05:00 +00:00
|
|
|
ALWAYS_RUNNING_NODES_TYPE = ("cloud", "nat", "ethernet_switch", "ethernet_hub")
|
|
|
|
|
|
|
|
for node in self._nodes:
|
|
|
|
if node["node"].compute.id == "local" and node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started":
|
|
|
|
return node
|
|
|
|
|
2016-05-11 17:35:36 +00:00
|
|
|
for node in self._nodes:
|
2016-11-22 15:05:00 +00:00
|
|
|
if node["node"].node_type in ALWAYS_RUNNING_NODES_TYPE and node["node"].status == "started":
|
2016-05-11 17:35:36 +00:00
|
|
|
return node
|
2016-04-21 10:14:09 +00:00
|
|
|
|
2016-05-11 17:35:36 +00:00
|
|
|
for node in self._nodes:
|
2016-11-22 15:05:00 +00:00
|
|
|
if node["node"].compute.id == "local" and node["node"].status == "started":
|
2016-05-11 17:35:36 +00:00
|
|
|
return node
|
2016-04-21 10:14:09 +00:00
|
|
|
|
2016-11-22 15:05:00 +00:00
|
|
|
for node in self._nodes:
|
|
|
|
if node["node"].node_type and node["node"].status == "started":
|
|
|
|
return node
|
|
|
|
|
2017-07-20 04:11:44 +00:00
|
|
|
raise aiohttp.web.HTTPConflict(text="Cannot capture because there is no running device on this link")
|
2016-04-22 14:22:03 +00:00
|
|
|
|
|
|
|
@asyncio.coroutine
|
2016-04-26 15:10:33 +00:00
|
|
|
def read_pcap_from_source(self):
|
2016-04-22 14:22:03 +00:00
|
|
|
"""
|
|
|
|
Return a FileStream of the Pcap from the compute node
|
|
|
|
"""
|
2016-05-11 17:35:36 +00:00
|
|
|
if self._capture_node:
|
|
|
|
compute = self._capture_node["node"].compute
|
2016-07-21 18:17:36 +00:00
|
|
|
return compute.stream_file(self._project, "tmp/captures/" + self._capture_file_name)
|
2016-11-22 15:05:00 +00:00
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def node_updated(self, node):
|
|
|
|
"""
|
|
|
|
Called when a node member of the link is updated
|
|
|
|
"""
|
|
|
|
if self._capture_node and node == self._capture_node["node"] and node.status != "started":
|
|
|
|
yield from self.stop_capture()
|