1
0
mirror of https://github.com/GNS3/gns3-server synced 2024-11-24 17:28:08 +00:00

Merge branch '2.2'

This commit is contained in:
grossmj 2023-01-05 09:12:11 +08:00
commit d0141c351b
43 changed files with 516 additions and 65 deletions

View File

@ -1,5 +1,17 @@
# Change Log
## 2.2.36 04/01/2023
* Install web-ui v2.2.36
* Add Trusted Platform Module (TPM) support for Qemu VMs
* Require Dynamips 0.2.23 and bind Dynamips hypervisor on 127.0.0.1
* Delete the built-in appliance directory before installing updated files
* Use a stock BusyBox for the Docker integration
* Overwrite built-in appliance files when starting a more recent version of the server
* Fix reset console. Fixes #1619
* Only use importlib_resources for Python <= 3.9. Fixes #2147
* Support when the user field defined in Docker container is an ID. Fixes #2134
## 2.2.35.1 10/11/2022
* Re-release Web-Ui v2.2.35

View File

@ -29,6 +29,11 @@ In addition of Python dependencies listed in a section below, other software may
* mtools is recommended to support data transfer to/from QEMU VMs using virtual disks.
* i386-libraries of libc and libcrypto are optional (Linux only), they are only needed to run IOU based nodes.
Docker support
**************
Docker support needs the script program (`bsdutils` or `util-linux` package), when running a docker VM and a static busybox during installation (python3 setup.py install / pip3 install / package creation).
Branches
--------

View File

@ -21,14 +21,14 @@
"images": [
{
"filename": "c3725-adventerprisek9-mz.124-15.T14.image",
"version": "124-25.T14",
"version": "124-15.T14",
"md5sum": "64f8c427ed48fd21bd02cf1ff254c4eb",
"filesize": 97859480
}
],
"versions": [
{
"name": "124-25.T14",
"name": "124-15.T14",
"idlepc": "0x60c09aa0",
"images": {
"image": "c3725-adventerprisek9-mz.124-15.T14.image"

View File

@ -77,6 +77,13 @@
"md5sum": "4cf5b7fd68075b6f7ee0dd41a4029ca0",
"filesize": 2150017536,
"download_url": "https://software.cisco.com/download/"
},
{
"filename": "Cisco_Firepower_Management_Center_Virtual-6.2.2-81.qcow2",
"version": "6.2.2 (81)",
"md5sum": "2f75c9c6c18a6fbb5516f6f451aef3a4",
"filesize": 2112356352,
"download_url": "https://software.cisco.com/download/"
}
],
"versions": [
@ -121,6 +128,12 @@
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual_VMware-6.2.1-342-disk1.vmdk"
}
},
{
"name": "6.2.2 (81)",
"images": {
"hda_disk_image": "Cisco_Firepower_Management_Center_Virtual-6.2.2-81.qcow2"
}
}
]
}

View File

@ -25,6 +25,14 @@
"kvm": "require"
},
"images": [
{
"filename": "cumulus-linux-5.3.1-vx-amd64-qemu.qcow2",
"version": "5.3.1",
"md5sum": "366b4e5afbfb638244fac4dd6cd092fd",
"filesize": 2147479552,
"download_url": "https://www.nvidia.com/en-us/networking/ethernet-switching/cumulus-vx/download/",
"direct_download_url": "https://d2cd9e7ca6hntp.cloudfront.net/public/CumulusLinux-5.3.1/cumulus-linux-5.3.1-vx-amd64-qemu.qcow2"
},
{
"filename": "cumulus-linux-5.1.0-vx-amd64-qemu.qcow2",
"version": "5.1.0",
@ -239,6 +247,12 @@
}
],
"versions": [
{
"name": "5.3.1",
"images": {
"hda_disk_image": "cumulus-linux-5.3.1-vx-amd64-qemu.qcow2"
}
},
{
"name": "5.1.0",
"images": {

View File

@ -24,12 +24,12 @@
},
"images": [
{
"filename": "debian-11-genericcloud-amd64-20220911-1135.qcow2",
"version": "11.5",
"md5sum": "06e481ddd23682af4326226661c13d8f",
"filesize": 254672896,
"filename": "debian-11-genericcloud-amd64-20221219-1234.qcow2",
"version": "11.6",
"md5sum": "bd6ddbccc89e40deb7716b812958238d",
"filesize": 258801664,
"download_url": "https://cloud.debian.org/images/cloud/bullseye/",
"direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20220911-1135/debian-11-genericcloud-amd64-20220911-1135.qcow2"
"direct_download_url": "https://cloud.debian.org/images/cloud/bullseye/20221219-1234/debian-11-genericcloud-amd64-20221219-1234.qcow2"
},
{
"filename": "debian-10-genericcloud-amd64-20220911-1135.qcow2",
@ -49,9 +49,9 @@
],
"versions": [
{
"name": "11.5",
"name": "11.6",
"images": {
"hda_disk_image": "debian-11-genericcloud-amd64-20220911-1135.qcow2",
"hda_disk_image": "debian-11-genericcloud-amd64-20221219-1234.qcow2",
"cdrom_image": "debian-cloud-init-data.iso"
}
},

View File

@ -10,7 +10,7 @@
"status": "stable",
"maintainer": "Andras Dosztal",
"maintainer_email": "developers@gns3.net",
"usage": "You can add records by adding entries to the /etc/hosts file in the following format:\n%IP_ADDRESS% %HOSTNAME%.lab %HOSTNAME%\n\nExample:\n192.168.123.10 router1.lab router1",
"usage": "You can add records by adding entries to the /etc/hosts file in the following format:\n%IP_ADDRESS% %HOSTNAME%.lab %HOSTNAME%\n\nExample:\n192.168.123.10 router1.lab router1\n\nIf you require DNS requests to be serviced from a different subnet than the one that the DNS server resides on then do the following:\n\n1. Edit (nano or vim) /ect/init.d/dnsmasq\n2. Find the line DNSMASQ_OPTS=\"$DNSMASQ_OPTS --local-service\"\n3. Remove the --local-service or comment that line out and add DNSMASQ_OPTS=\"\"\n4. Restart dnsmasq - service dnsmaq restart",
"symbol": "linux_guest.svg",
"docker": {
"adapters": 1,

View File

@ -34,6 +34,13 @@
"filesize": 340631552,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FAZ_VM64_KVM-v7.0.5-build0365-FORTINET.out.kvm.qcow2",
"version": "7.0.5",
"md5sum": "6cbc1f865ed285bb3a73323e222f03b8",
"filesize": 334184448,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FAZ_VM64_KVM-v6-build2288-FORTINET.out.kvm.qcow2",
"version": "6.4.5",
@ -191,6 +198,13 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "7.0.5",
"images": {
"hda_disk_image": "FAZ_VM64_KVM-v7.0.5-build0365-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "6.4.5",
"images": {

View File

@ -27,6 +27,13 @@
"kvm": "allow"
},
"images": [
{
"filename": "FGT_VM64_KVM-v7.2.3.F-build1262-FORTINET.out.kvm.qcow2",
"version": "7.2.3",
"md5sum": "e8f3c5879f0d6fe238dc2665a3508694",
"filesize": 87490560,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v7.2.1.F-build1254-FORTINET.out.kvm.qcow2",
"version": "7.2.1",
@ -34,6 +41,20 @@
"filesize": 86704128,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v7.0.9.M-build0444-FORTINET.out.kvm.qcow2",
"version": "7.0.9",
"md5sum": "0aee912ab11bf9a4b0e3fc1a62dd0e40",
"filesize": 77135872,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v6.M-build2030-FORTINET.out.kvm.qcow2",
"version": "6.4.11",
"md5sum": "bcd7491ddfa31fec4f618b73792456e4",
"filesize": 69861376,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FGT_VM64_KVM-v6-build1828-FORTINET.out.kvm.qcow2",
"version": "6.4.5",
@ -261,6 +282,13 @@
}
],
"versions": [
{
"name": "7.2.3",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v7.2.3.F-build1262-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "7.2.1",
"images": {
@ -268,6 +296,20 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "7.0.9",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v7.0.9.M-build0444-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "6.4.11",
"images": {
"hda_disk_image": "FGT_VM64_KVM-v6.M-build2030-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "6.4.5",
"images": {

View File

@ -34,6 +34,13 @@
"filesize": 242814976,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FMG_VM64_KVM-v7.0.5-build0365-FORTINET.out.kvm.qcow2",
"version": "7.0.5",
"md5sum": "e8b9c992784cea766b52a427a5fe0279",
"filesize": 237535232,
"download_url": "https://support.fortinet.com/Download/FirmwareImages.aspx"
},
{
"filename": "FMG_VM64_KVM-v6-build2288-FORTINET.out.kvm.qcow2",
"version": "6.4.5",
@ -191,6 +198,13 @@
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "7.0.5",
"images": {
"hda_disk_image": "FMG_VM64_KVM-v7.0.5-build0365-FORTINET.out.kvm.qcow2",
"hdb_disk_image": "empty30G.qcow2"
}
},
{
"name": "6.4.5",
"images": {

View File

@ -22,6 +22,14 @@
"kvm": "allow"
},
"images": [
{
"filename": "frr-8.2.2.qcow2",
"version": "8.2.2",
"md5sum": "45cda6b991a1b9e8205a3a0ecc953640",
"filesize": 56609280,
"download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/",
"direct_download_url": "http://downloads.sourceforge.net/project/gns-3/Qemu%20Appliances/frr-8.2.2.qcow2"
},
{
"filename": "frr-8.1.0.qcow2",
"version": "8.1.0",
@ -48,6 +56,12 @@
}
],
"versions": [
{
"name": "8.2.2",
"images": {
"hda_disk_image": "frr-8.2.2.qcow2"
}
},
{
"name": "8.1.0",
"images": {

View File

@ -14,7 +14,7 @@
"usage": "In the web interface login as admin/admin\n\nPersistent configuration:\n- Add \"/var/lib/redis\" as an additional persistent directory.\n- Use \"redis-cli save\" in an auxiliary console to save the configuration.",
"docker": {
"adapters": 1,
"image": "ntop/ntopng:stable",
"image": "ntop/ntopng:latest",
"start_command": "--dns-mode 2 --interface eth0",
"console_type": "http",
"console_http_port": 3000,

View File

@ -54,6 +54,13 @@
"filesize": 338690048,
"download_url": "https://support.vyos.io/en/downloads/files/vyos-1-3-0-generic-iso-image"
},
{
"filename": "vyos-1.2.9-amd64.iso",
"version": "1.2.9",
"md5sum": "586be23b6256173e174c82d8f1f699a1",
"filesize": 430964736,
"download_url": "https://support.vyos.io/en/downloads/files/vyos-1-2-9-generic-iso-image"
},
{
"filename": "vyos-1.2.8-amd64.iso",
"version": "1.2.8",
@ -114,6 +121,13 @@
"cdrom_image": "vyos-1.3.0-amd64.iso"
}
},
{
"name": "1.2.9",
"images": {
"hda_disk_image": "empty8G.qcow2",
"cdrom_image": "vyos-1.2.9-amd64.iso"
}
},
{
"name": "1.2.8",
"images": {

View File

@ -0,0 +1,59 @@
{
"appliance_id": "f3b6a3ac-7be5-4bb0-b204-da3712fb646c",
"name": "Windows-11-Dev-Env",
"category": "guest",
"description": "Windows 11 Developer Environment Virtual Machine.",
"vendor_name": "Microsoft",
"vendor_url": "https://www.microsoft.com",
"documentation_url": "https://developer.microsoft.com/en-us/windows/downloads/virtual-machines/",
"product_name": "Windows 11 Development Environment",
"product_url": "https://developer.microsoft.com/en-us/windows/downloads/virtual-machines/",
"registry_version": 4,
"status": "experimental",
"availability": "free",
"maintainer": "Ean Towne",
"maintainer_email": "eantowne@gmail.com",
"usage": "Uses SPICE not VNC\nHighly recommended to install the SPICE-agent from: https://www.spice-space.org/download/windows/spice-guest-tools/spice-guest-tools-latest.exe to be able to change resolution and increase performance.\nThis is an evaluation virtual machine (90 days) and includes:\n* Window 11 Enterprise (Evaluation)\n* Visual Studio 2022 Community Edition with UWP .NET Desktop, Azure, and Windows App SDK for C# workloads enabled\n* Windows Subsystem for Linux 2 enabled with Ubuntu installed\n* Windows Terminal installed\n* Developer mode enabled",
"symbol": "microsoft.svg",
"first_port_name": "Network Adapter 1",
"port_name_format": "Network Adapter {0}",
"qemu": {
"adapter_type": "e1000",
"adapters": 1,
"ram": 4096,
"cpus": 4,
"hda_disk_interface": "sata",
"arch": "x86_64",
"console_type": "spice",
"boot_priority": "c",
"kvm": "require"
},
"images": [
{
"filename": "WinDev2212Eval-disk1.vmdk",
"version": "2212",
"md5sum": "c79f393a067b92e01a513a118d455ac8",
"filesize": 24620493824,
"download_url": "https://aka.ms/windev_VM_vmware",
"compression": "zip"
},
{
"filename": "OVMF-20160813.fd",
"version": "16.08.13",
"md5sum": "8ff0ef1ec56345db5b6bda1a8630e3c6",
"filesize": 2097152,
"download_url": "",
"direct_download_url": "https://sourceforge.net/projects/gns-3/files/Qemu%20Appliances/OVMF-20160813.fd.zip/download",
"compression": "zip"
}
],
"versions": [
{
"images": {
"bios_image": "OVMF-20160813.fd",
"hda_disk_image": "WinDev2212Eval-disk1.vmdk"
},
"name": "2212"
}
]
}

View File

@ -77,6 +77,8 @@ class BaseNode:
self._allocate_aux = allocate_aux
self._wrap_console = wrap_console
self._wrapper_telnet_server = None
self._wrap_console_reader = None
self._wrap_console_writer = None
self._internal_console_port = None
self._custom_adapters = []
self._ubridge_require_privileged_access = False
@ -338,7 +340,6 @@ class BaseNode:
if self._wrap_console:
self._manager.port_manager.release_tcp_port(self._internal_console_port, self._project)
self._internal_console_port = None
if self._aux:
self._manager.port_manager.release_tcp_port(self._aux, self._project)
self._aux = None
@ -376,17 +377,29 @@ class BaseNode:
remaining_trial = 60
while True:
try:
(reader, writer) = await asyncio.open_connection(host="127.0.0.1", port=self._internal_console_port)
(self._wrap_console_reader, self._wrap_console_writer) = await asyncio.open_connection(
host="127.0.0.1",
port=self._internal_console_port
)
break
except (OSError, ConnectionRefusedError) as e:
if remaining_trial <= 0:
raise e
await asyncio.sleep(0.1)
remaining_trial -= 1
await AsyncioTelnetServer.write_client_intro(writer, echo=True)
server = AsyncioTelnetServer(reader=reader, writer=writer, binary=True, echo=True)
await AsyncioTelnetServer.write_client_intro(self._wrap_console_writer, echo=True)
server = AsyncioTelnetServer(
reader=self._wrap_console_reader,
writer=self._wrap_console_writer,
binary=True,
echo=True
)
# warning: this will raise OSError exception if there is a problem...
self._wrapper_telnet_server = await asyncio.start_server(server.run, self._manager.port_manager.console_host, self.console)
self._wrapper_telnet_server = await asyncio.start_server(
server.run,
self._manager.port_manager.console_host,
self.console
)
async def stop_wrap_console(self):
"""
@ -394,12 +407,15 @@ class BaseNode:
"""
if self._wrapper_telnet_server:
self._wrap_console_writer.close()
await self._wrap_console_writer.wait_closed()
self._wrapper_telnet_server.close()
await self._wrapper_telnet_server.wait_closed()
self._wrapper_telnet_server = None
async def reset_console(self):
async def reset_wrap_console(self):
"""
Reset console
Reset the wrap console (restarts the Telnet proxy)
"""
await self.stop_wrap_console()

View File

@ -520,10 +520,14 @@ class DockerVM(BaseNode):
# https://github.com/GNS3/gns3-gui/issues/1039
try:
process = await asyncio.subprocess.create_subprocess_exec(
"docker", "exec", "-i", self._cid, "/gns3/bin/busybox", "script", "-qfc", "while true; do TERM=vt100 /gns3/bin/busybox sh; done", "/dev/null",
"script",
"-qfc",
f"docker exec -i -t {self._cid} /gns3/bin/busybox sh -c 'while true; do TERM=vt100 /gns3/bin/busybox sh; done'",
"/dev/null",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
stdin=asyncio.subprocess.PIPE)
stdin=asyncio.subprocess.PIPE
)
except OSError as e:
raise DockerError("Could not start auxiliary console process: {}".format(e))
server = AsyncioTelnetServer(reader=process.stdout, writer=process.stdin, binary=True, echo=True)

View File

@ -0,0 +1,15 @@
#!/gns3/bin/busybox sh
SCRIPT="/gns3/etc/udhcpc/default.script"
if [ "$(cat "/proc/$PPID/comm" 2>/dev/null)" = ifup ]; then
# remove "-n" argument
for arg do
shift
[ "$arg" = "-n" ] || set -- "$@" "$arg"
done
# add default parameters
set -- -t 3 -T 2 -A 1 -b "$@"
fi
exec /tmp/gns3/bin/udhcpc -s "$SCRIPT" "$@"

View File

@ -87,5 +87,13 @@ done
ifup -a -f
# continue normal docker startup
eval HOME=$(echo ~${GNS3_USER-root})
case "$GNS3_USER" in
[1-9][0-9]*)
# for when the user field defined in the Docker container is an ID
export GNS3_USER=$(cat /etc/passwd | grep ${GNS3_USER-root} | awk -F: '{print $1}')
;;
*)
;;
esac
eval HOME="$(echo ~${GNS3_USER-root})"
exec su ${GNS3_USER-root} -p -- /gns3/run-cmd.sh "$OLD_PATH" "$@"

View File

@ -278,8 +278,12 @@ class Dynamips(BaseManager):
if not working_dir:
working_dir = tempfile.gettempdir()
# FIXME: hypervisor should always listen to 127.0.0.1
if not sys.platform.startswith("win"):
# Hypervisor should always listen to 127.0.0.1
# See https://github.com/GNS3/dynamips/issues/62
# This was fixed in Dynamips v0.2.23 which hasn't been built for Windows
server_host = "127.0.0.1"
else:
server_config = self.config.get_section_config("Server")
server_host = server_config.get("host")
@ -306,6 +310,8 @@ class Dynamips(BaseManager):
await hypervisor.connect()
if parse_version(hypervisor.version) < parse_version('0.2.11'):
raise DynamipsError("Dynamips version must be >= 0.2.11, detected version is {}".format(hypervisor.version))
if not sys.platform.startswith("win") and parse_version(hypervisor.version) < parse_version('0.2.23'):
raise DynamipsError("Dynamips version must be >= 0.2.23 on Linux/macOS, detected version is {}".format(hypervisor.version))
return hypervisor

View File

@ -94,7 +94,9 @@ class DynamipsHypervisor:
try:
version = await self.send("hypervisor version")
self._version = version[0].split("-", 1)[0]
log.info("Dynamips version {} detected".format(self._version))
except IndexError:
log.warning("Dynamips version could not be detected")
self._version = "Unknown"
# this forces to send the working dir to Dynamips

View File

@ -204,11 +204,9 @@ class Hypervisor(DynamipsHypervisor):
command = [self._path]
command.extend(["-N1"]) # use instance IDs for filenames
command.extend(["-l", "dynamips_i{}_log.txt".format(self._id)]) # log file
# Dynamips cannot listen for hypervisor commands and for console connections on
# 2 different IP addresses.
# See https://github.com/GNS3/dynamips/issues/62
if self._console_host != "0.0.0.0" and self._console_host != "::":
command.extend(["-H", "{}:{}".format(self._host, self._port)])
if not sys.platform.startswith("win"):
command.extend(["-H", "{}:{}".format(self._host, self._port), "--console-binding-addr", self._console_host])
else:
command.extend(["-H", str(self._port)])
return command

View File

@ -977,7 +977,6 @@ class Router(BaseNode):
raise DynamipsError('"{name}" must be stopped to change the console type to {console_type}'.format(name=self._name,
console_type=console_type))
self.console_type = console_type
if self._console and console_type == "telnet":
@ -993,6 +992,13 @@ class Router(BaseNode):
self.aux = aux
await self._hypervisor.send('vm set_aux_tcp_port "{name}" {aux}'.format(name=self._name, aux=aux))
async def reset_console(self):
"""
Reset console
"""
pass # reset console is not supported with Dynamips
async def get_cpu_usage(self, cpu_id=0):
"""
Shows cpu usage in seconds, "cpu_id" is ignored.

View File

@ -16,6 +16,7 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import socket
import ipaddress
from aiohttp.web import HTTPConflict
from gns3server.config import Config
@ -83,7 +84,7 @@ class PortManager:
@console_host.setter
def console_host(self, new_host):
"""
Bind console host to 0.0.0.0 if remote connections are allowed.
Bind console host to 0.0.0.0 or :: if remote connections are allowed.
"""
server_config = Config.instance().get_section_config("Server")
@ -91,6 +92,12 @@ class PortManager:
if remote_console_connections:
log.warning("Remote console connections are allowed")
self._console_host = "0.0.0.0"
try:
ip = ipaddress.ip_address(new_host)
if isinstance(ip, ipaddress.IPv6Address):
self._console_host = "::"
except ValueError:
log.warning("Could not determine IP address type for console host")
else:
self._console_host = new_host

View File

@ -78,6 +78,7 @@ class QemuVM(BaseNode):
self._monitor_host = server_config.get("monitor_host", "127.0.0.1")
self._process = None
self._cpulimit_process = None
self._swtpm_process = None
self._monitor = None
self._stdout_file = ""
self._qemu_img_stdout_file = ""
@ -120,6 +121,7 @@ class QemuVM(BaseNode):
self._initrd = ""
self._kernel_image = ""
self._kernel_command_line = ""
self._tpm = False
self._legacy_networking = False
self._replicate_network_connection_state = True
self._create_config_disk = False
@ -686,7 +688,7 @@ class QemuVM(BaseNode):
"""
Sets whether a config disk is automatically created on HDD disk interface (secondary slave)
:param replicate_network_connection_state: boolean
:param create_config_disk: boolean
"""
if create_config_disk:
@ -807,6 +809,30 @@ class QemuVM(BaseNode):
log.info('QEMU VM "{name}" [{id}] has set the number of vCPUs to {cpus}'.format(name=self._name, id=self._id, cpus=cpus))
self._cpus = cpus
@property
def tpm(self):
"""
Returns whether TPM is activated for this QEMU VM.
:returns: boolean
"""
return self._tpm
@tpm.setter
def tpm(self, tpm):
"""
Sets whether TPM is activated for this QEMU VM.
:param tpm: boolean
"""
if tpm:
log.info('QEMU VM "{name}" [{id}] has enabled the Trusted Platform Module (TPM)'.format(name=self._name, id=self._id))
else:
log.info('QEMU VM "{name}" [{id}] has disabled the Trusted Platform Module (TPM)'.format(name=self._name, id=self._id))
self._tpm = tpm
@property
def options(self):
"""
@ -984,11 +1010,8 @@ class QemuVM(BaseNode):
"""
if self._cpulimit_process and self._cpulimit_process.returncode is None:
self._cpulimit_process.kill()
try:
self._process.wait(3)
except subprocess.TimeoutExpired:
log.error("Could not kill cpulimit process {}".format(self._cpulimit_process.pid))
self._cpulimit_process.terminate()
self._cpulimit_process = None
def _set_cpu_throttling(self):
"""
@ -1003,7 +1026,9 @@ class QemuVM(BaseNode):
cpulimit_exec = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), "cpulimit", "cpulimit.exe")
else:
cpulimit_exec = "cpulimit"
subprocess.Popen([cpulimit_exec, "--lazy", "--pid={}".format(self._process.pid), "--limit={}".format(self._cpu_throttling)], cwd=self.working_dir)
command = [cpulimit_exec, "--lazy", "--pid={}".format(self._process.pid), "--limit={}".format(self._cpu_throttling)]
self._cpulimit_process = subprocess.Popen(command, cwd=self.working_dir)
log.info("CPU throttled to {}%".format(self._cpu_throttling))
except FileNotFoundError:
raise QemuError("cpulimit could not be found, please install it or deactivate CPU throttling")
@ -1079,7 +1104,8 @@ class QemuVM(BaseNode):
await self._set_process_priority()
if self._cpu_throttling:
self._set_cpu_throttling()
if self._tpm:
self._start_swtpm()
if "-enable-kvm" in command_string or "-enable-hax" in command_string:
self._hw_virtualization = True
@ -1162,6 +1188,7 @@ class QemuVM(BaseNode):
log.warning('QEMU VM "{}" PID={} is still running'.format(self._name, self._process.pid))
self._process = None
self._stop_cpulimit()
self._stop_swtpm()
if self.on_close != "save_vm_state":
await self._clear_save_vm_stated()
await self._export_config()
@ -1567,6 +1594,14 @@ class QemuVM(BaseNode):
self._process = None
return False
async def reset_console(self):
"""
Reset console
"""
if self.is_running():
await self.reset_wrap_console()
def command(self):
"""
Returns the QEMU command line.
@ -1987,6 +2022,60 @@ class QemuVM(BaseNode):
return options
def _start_swtpm(self):
"""
Start swtpm (TPM emulator)
"""
if sys.platform.startswith("win"):
raise QemuError("swtpm (TPM emulator) is not supported on Windows")
tpm_dir = os.path.join(self.working_dir, "tpm")
os.makedirs(tpm_dir, exist_ok=True)
tpm_sock = os.path.join(self.temporary_directory, "swtpm.sock")
swtpm = shutil.which("swtpm")
if not swtpm:
raise QemuError("Could not find swtpm (TPM emulator)")
try:
command = [
swtpm,
"socket",
"--tpm2",
'--tpmstate', "dir={}".format(tpm_dir),
"--ctrl",
"type=unixio,path={},terminate".format(tpm_sock)
]
command_string = " ".join(shlex_quote(s) for s in command)
log.info("Starting swtpm (TPM emulator) with: {}".format(command_string))
self._swtpm_process = subprocess.Popen(command, cwd=self.working_dir)
log.info("swtpm (TPM emulator) has started")
except (OSError, subprocess.SubprocessError) as e:
raise QemuError("Could not start swtpm (TPM emulator): {}".format(e))
def _stop_swtpm(self):
"""
Stop swtpm (TPM emulator)
"""
if self._swtpm_process and self._swtpm_process.returncode is None:
self._swtpm_process.terminate()
self._swtpm_process = None
def _tpm_options(self):
"""
Return the TPM options for Qemu.
"""
tpm_sock = os.path.join(self.temporary_directory, "swtpm.sock")
options = [
"-chardev",
"socket,id=chrtpm,path={}".format(tpm_sock),
"-tpmdev",
"emulator,id=tpm0,chardev=chrtpm",
"-device",
"tpm-tis,tpmdev=tpm0"
]
return options
async def _network_options(self):
network_options = []
@ -2282,6 +2371,8 @@ class QemuVM(BaseNode):
command.extend((await self._saved_state_option()))
if self._console_type == "telnet":
command.extend((await self._disable_graphics()))
if self._tpm:
command.extend(self._tpm_options())
if additional_options:
try:
command.extend(shlex.split(additional_options))

View File

@ -344,6 +344,14 @@ class VPCSVM(BaseNode):
return True
return False
async def reset_console(self):
"""
Reset console
"""
if self.is_running():
await self.reset_wrap_console()
@BaseNode.console_type.setter
def console_type(self, new_console_type):
"""

View File

@ -22,9 +22,15 @@ import uuid
import socket
import shutil
import aiohttp
try:
import importlib_resources
except ImportError:
from importlib import resources as importlib_resources
from ..config import Config
from ..utils import parse_version
from .project import Project
from .template import Template
from .appliance import Appliance
@ -65,7 +71,7 @@ class Controller:
async def start(self):
log.info("Controller is starting")
self._load_base_files()
self._install_base_configs()
server_config = Config.instance().get_section_config("Server")
Config.instance().listen_for_config_changes(self._update_config)
host = server_config.get("host", "localhost")
@ -242,7 +248,9 @@ class Controller:
if "iou_license" in controller_settings:
self._iou_license_settings = controller_settings["iou_license"]
if parse_version(__version__) > parse_version(controller_settings.get("version", "")):
self._appliance_manager.install_builtin_appliances()
self._appliance_manager.appliances_etag = controller_settings.get("appliances_etag")
self._appliance_manager.load_appliances()
self._template_manager.load_templates(controller_settings.get("templates"))
@ -270,13 +278,14 @@ class Controller:
except OSError as e:
log.error(str(e))
def _load_base_files(self):
def _install_base_configs(self):
"""
At startup we copy base file to the user location to allow
them to customize it
"""
dst_path = self.configs_path()
log.info(f"Installing base configs in '{dst_path}'")
try:
if hasattr(sys, "frozen") and sys.platform.startswith("win"):
resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), "configs"))

View File

@ -21,9 +21,14 @@ import json
import uuid
import asyncio
import aiohttp
import importlib_resources
import shutil
try:
import importlib_resources
except ImportError:
from importlib import resources as importlib_resources
from .appliance import Appliance
from ..config import Config
from ..utils.asyncio import locking
@ -76,13 +81,15 @@ class ApplianceManager:
os.makedirs(appliances_path, exist_ok=True)
return appliances_path
def _builtin_appliances_path(self):
def _builtin_appliances_path(self, delete_first=False):
"""
Get the built-in appliance storage directory
"""
config = Config.instance()
appliances_dir = os.path.join(config.config_dir, "appliances")
if delete_first:
shutil.rmtree(appliances_dir, ignore_errors=True)
os.makedirs(appliances_dir, exist_ok=True)
return appliances_dir
@ -91,17 +98,17 @@ class ApplianceManager:
At startup we copy the built-in appliances files.
"""
dst_path = self._builtin_appliances_path()
dst_path = self._builtin_appliances_path(delete_first=True)
log.info(f"Installing built-in appliances in '{dst_path}'")
try:
if hasattr(sys, "frozen") and sys.platform.startswith("win"):
resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), "appliances"))
for filename in os.listdir(resource_path):
if not os.path.exists(os.path.join(dst_path, filename)):
shutil.copy(os.path.join(resource_path, filename), os.path.join(dst_path, filename))
else:
for entry in importlib_resources.files('gns3server.appliances').iterdir():
full_path = os.path.join(dst_path, entry.name)
if entry.is_file() and not os.path.exists(full_path):
if entry.is_file():
log.debug(f"Installing built-in appliance file {entry.name} to {full_path}")
shutil.copy(str(entry), os.path.join(dst_path, entry.name))
except OSError as e:

View File

@ -34,7 +34,7 @@ log = logging.getLogger(__name__)
class Node:
# This properties are used only on controller and are not forwarded to the compute
# These properties are used only on controller and are not forwarded to computes
CONTROLLER_ONLY_PROPERTIES = ["x", "y", "z", "locked", "width", "height", "symbol", "label", "console_host",
"port_name_format", "first_port_name", "port_segment_size", "ports",
"category", "console_auto_start"]

View File

@ -58,7 +58,7 @@ class CrashReport:
Report crash to a third party service
"""
DSN = "https://d2c9c679b20a4eb7ab275c1d9386143b@o19455.ingest.sentry.io/38482"
DSN = "https://3318fddc3b9f4752b7b00929ac2999e3@o19455.ingest.sentry.io/38482"
_instance = None
def __init__(self):

View File

@ -190,6 +190,10 @@ QEMU_CREATE_SCHEMA = {
"description": "Replicate the network connection state for links in Qemu",
"type": ["boolean", "null"],
},
"tpm": {
"description": "Enable the Trusted Platform Module (TPM) in Qemu",
"type": ["boolean", "null"],
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": ["boolean", "null"],
@ -384,6 +388,10 @@ QEMU_UPDATE_SCHEMA = {
"description": "Replicate the network connection state for links in Qemu",
"type": ["boolean", "null"],
},
"tpm": {
"description": "Enable the Trusted Platform Module (TPM) in Qemu",
"type": ["boolean", "null"],
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": ["boolean", "null"],
@ -591,6 +599,10 @@ QEMU_OBJECT_SCHEMA = {
"description": "Replicate the network connection state for links in Qemu",
"type": "boolean",
},
"tpm": {
"description": "Enable the Trusted Platform Module (TPM) in Qemu",
"type": "boolean",
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": ["boolean", "null"],
@ -665,6 +677,7 @@ QEMU_OBJECT_SCHEMA = {
"kernel_command_line",
"legacy_networking",
"replicate_network_connection_state",
"tpm",
"create_config_disk",
"on_close",
"cpu_throttling",

View File

@ -183,6 +183,11 @@ QEMU_TEMPLATE_PROPERTIES = {
"type": "boolean",
"default": True
},
"tpm": {
"description": "Enable the Trusted Platform Module (TPM) in Qemu",
"type": "boolean",
"default": False
},
"create_config_disk": {
"description": "Automatically create a config disk on HDD disk interface (secondary slave)",
"type": "boolean",

View File

@ -46,6 +46,6 @@
gtag('config', 'G-5D6FZL9923');
</script>
<script src="runtime.91a209cf21f6fb848205.js" defer></script><script src="polyfills-es5.865074f5cd9a121111a2.js" nomodule defer></script><script src="polyfills.2f91a039d848e57ff02e.js" defer></script><script src="main.41e1ff185162d1659203.js" defer></script>
<script src="runtime.91a209cf21f6fb848205.js" defer></script><script src="polyfills-es5.865074f5cd9a121111a2.js" nomodule defer></script><script src="polyfills.2f91a039d848e57ff02e.js" defer></script><script src="main.022350cecd1e6d733a93.js" defer></script>
</body></html>

View File

@ -82,6 +82,7 @@ async def subprocess_check_output(*args, cwd=None, env=None, stderr=False):
# and the code of VPCS, dynamips... Will detect it's not the correct binary
return output.decode("utf-8", errors="ignore")
async def wait_for_process_termination(process, timeout=10):
"""
Wait for a process terminate, and raise asyncio.TimeoutError in case of

View File

@ -202,6 +202,7 @@ class AsyncioTelnetServer:
except ConnectionError:
async with self._lock:
network_writer.close()
await network_writer.wait_closed()
if self._reader_process == network_reader:
self._reader_process = None
# Cancel current read from this reader
@ -216,6 +217,8 @@ class AsyncioTelnetServer:
try:
writer.write_eof()
await writer.drain()
writer.close()
await writer.wait_closed()
except (AttributeError, ConnectionError):
continue

View File

@ -19,7 +19,12 @@ import atexit
import logging
import os
import sys
try:
import importlib_resources
except ImportError:
from importlib import resources as importlib_resources
from contextlib import ExitStack
resource_manager = ExitStack()

View File

@ -23,8 +23,8 @@
# or negative for a release candidate or beta (after the base version
# number has been incremented)
__version__ = "2.2.35.1"
__version_info__ = (2, 2, 35, 0)
__version__ = "2.2.36"
__version_info__ = (2, 2, 36, 0)
if "dev" in __version__:
try:

View File

@ -1,4 +1,4 @@
jsonschema>=4.17.0,<4.18; python_version >= '3.7'
jsonschema>=4.17.3,<4.18; python_version >= '3.7'
jsonschema==3.2.0; python_version < '3.7' # v3.2.0 is the last version to support Python 3.6
aiohttp>=3.8.3,<3.9
aiohttp-cors>=0.7.0,<0.8
@ -6,11 +6,11 @@ aiofiles>=22.1.0,<22.2; python_version >= '3.7'
aiofiles==0.8.0; python_version < '3.7' # v0.8.0 is the last version to support Python 3.6
Jinja2>=3.1.2,<3.2; python_version >= '3.7'
Jinja2==3.0.3; python_version < '3.7' # v3.0.3 is the last version to support Python 3.6
sentry-sdk==1.10.1,<1.11
psutil==5.9.2
sentry-sdk==1.12.1,<1.13
psutil==5.9.4
async-timeout>=4.0.2,<4.1
distro>=1.7.0
py-cpuinfo>=9.0.0,<10.0
importlib-resources>=1.3
importlib-resources>=1.3; python_version <= '3.9'
setuptools>=60.8.1; python_version >= '3.7'
setuptools==59.6.0; python_version < '3.7' # v59.6.0 is the last version to support Python 3.6

View File

@ -16,6 +16,10 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import shutil
import subprocess
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
@ -39,6 +43,28 @@ class PyTest(TestCommand):
sys.exit(errcode)
BUSYBOX_PATH = "gns3server/compute/docker/resources/bin/busybox"
def copy_busybox():
if not sys.platform.startswith("linux"):
return
if os.path.isfile(BUSYBOX_PATH):
return
for bb_cmd in ("busybox-static", "busybox.static", "busybox"):
bb_path = shutil.which(bb_cmd)
if bb_path:
if subprocess.call(["ldd", bb_path],
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL):
shutil.copy2(bb_path, BUSYBOX_PATH, follow_symlinks=True)
break
else:
raise SystemExit("No static busybox found")
copy_busybox()
dependencies = open("requirements.txt", "r").read().splitlines()
setup(

View File

@ -1343,7 +1343,15 @@ async def test_start_aux(vm):
with asyncio_patch("asyncio.subprocess.create_subprocess_exec", return_value=MagicMock()) as mock_exec:
await vm._start_aux()
mock_exec.assert_called_with('docker', 'exec', '-i', 'e90e34656842', '/gns3/bin/busybox', 'script', '-qfc', 'while true; do TERM=vt100 /gns3/bin/busybox sh; done', '/dev/null', stderr=asyncio.subprocess.STDOUT, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
mock_exec.assert_called_with(
"script",
"-qfc",
"docker exec -i -t e90e34656842 /gns3/bin/busybox sh -c 'while true; do TERM=vt100 /gns3/bin/busybox sh; done'",
"/dev/null",
stderr=asyncio.subprocess.STDOUT,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE
)
async def test_create_network_interfaces(vm):

View File

@ -173,7 +173,7 @@ async def test_termination_callback(vm):
await vm._termination_callback(0)
assert vm.status == "stopped"
await queue.get(1) #  Ping
await queue.get(1) # Ping
(action, event, kwargs) = await queue.get(1)
assert action == "node.updated"
@ -401,6 +401,17 @@ async def test_spice_option(vm, fake_qemu_img_binary):
assert '-vga qxl' in ' '.join(options)
async def test_tpm_option(vm, tmpdir, fake_qemu_img_binary):
vm.manager.get_qemu_version = AsyncioMagicMock(return_value="3.1.0")
vm._tpm = True
tpm_sock = os.path.join(vm.temporary_directory, "swtpm.sock")
options = await vm._build_command()
assert '-chardev socket,id=chrtpm,path={}'.format(tpm_sock) in ' '.join(options)
assert '-tpmdev emulator,id=tpm0,chardev=chrtpm' in ' '.join(options)
assert '-device tpm-tis,tpmdev=tpm0' in ' '.join(options)
async def test_disk_options_multiple_disk(vm, tmpdir, fake_qemu_img_binary):
vm._hda_disk_image = str(tmpdir / "test0.qcow2")

View File

@ -381,13 +381,13 @@ async def test_get_free_project_name(controller):
assert controller.get_free_project_name("Hello") == "Hello"
async def test_load_base_files(controller, config, tmpdir):
async def test_install_base_configs(controller, config, tmpdir):
config.set_section_config("Server", {"configs_path": str(tmpdir)})
with open(str(tmpdir / 'iou_l2_base_startup-config.txt'), 'w+') as f:
f.write('test')
controller._load_base_files()
controller._install_base_configs()
assert os.path.exists(str(tmpdir / 'iou_l3_base_startup-config.txt'))
# Check is the file has not been overwritten
@ -410,6 +410,7 @@ def test_appliances(controller, tmpdir):
with open(str(tmpdir / "my_appliance2.gns3a"), 'w+') as f:
json.dump(my_appliance, f)
controller.appliance_manager.install_builtin_appliances()
with patch("gns3server.config.Config.get_section_config", return_value={"appliances_path": str(tmpdir)}):
controller.appliance_manager.load_appliances()
assert len(controller.appliance_manager.appliances) > 0