mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-12 19:38:57 +00:00
Merge branch '2.1' into improved-export-project
This commit is contained in:
commit
fe59c937d5
@ -63,3 +63,5 @@ license_check = True
|
||||
[Qemu]
|
||||
; !! Remember to add the gns3 user to the KVM group, otherwise you will not have read / write permssions to /dev/kvm !!
|
||||
enable_kvm = True
|
||||
; Require KVM to be installed in order to start VMs
|
||||
require_kvm = True
|
@ -537,7 +537,7 @@ class BaseManager:
|
||||
directory = self.get_images_directory()
|
||||
path = os.path.abspath(os.path.join(directory, *os.path.split(filename)))
|
||||
if os.path.commonprefix([directory, path]) != directory:
|
||||
raise aiohttp.web.HTTPForbidden(text="Could not write image: {}, {} is forbiden".format(filename, path))
|
||||
raise aiohttp.web.HTTPForbidden(text="Could not write image: {}, {} is forbidden".format(filename, path))
|
||||
log.info("Writing image file %s", path)
|
||||
try:
|
||||
remove_checksum(path)
|
||||
|
@ -65,7 +65,7 @@ class IOUVM(BaseNode):
|
||||
:param console: TCP console port
|
||||
"""
|
||||
|
||||
def __init__(self, name, node_id, project, manager, console=None):
|
||||
def __init__(self, name, node_id, project, manager, path=None, console=None):
|
||||
|
||||
super().__init__(name, node_id, project, manager, console=console)
|
||||
|
||||
@ -73,8 +73,8 @@ class IOUVM(BaseNode):
|
||||
self._telnet_server = None
|
||||
self._iou_stdout_file = ""
|
||||
self._started = False
|
||||
self._path = None
|
||||
self._nvram_watcher = None
|
||||
self._path = self.manager.get_abs_image_path(path)
|
||||
|
||||
# IOU settings
|
||||
self._ethernet_adapters = []
|
||||
@ -137,6 +137,7 @@ class IOUVM(BaseNode):
|
||||
"""
|
||||
|
||||
self._path = self.manager.get_abs_image_path(path)
|
||||
log.info('IOU "{name}" [{id}]: IOU image updated to "{path}"'.format(name=self._name, id=self._id, path=self._path))
|
||||
|
||||
@property
|
||||
def use_default_iou_values(self):
|
||||
@ -162,6 +163,28 @@ class IOUVM(BaseNode):
|
||||
else:
|
||||
log.info('IOU "{name}" [{id}]: does not use the default IOU image values'.format(name=self._name, id=self._id))
|
||||
|
||||
@asyncio.coroutine
|
||||
def update_default_iou_values(self):
|
||||
"""
|
||||
Finds the default RAM and NVRAM values for the IOU image.
|
||||
"""
|
||||
|
||||
try:
|
||||
output = yield from gns3server.utils.asyncio.subprocess_check_output(self._path, "-h", cwd=self.working_dir, stderr=True)
|
||||
match = re.search("-n <n>\s+Size of nvram in Kb \(default ([0-9]+)KB\)", output)
|
||||
if match:
|
||||
self.nvram = int(match.group(1))
|
||||
match = re.search("-m <n>\s+Megabytes of router memory \(default ([0-9]+)MB\)", output)
|
||||
if match:
|
||||
self.ram = int(match.group(1))
|
||||
except (ValueError, OSError, subprocess.SubprocessError) as e:
|
||||
log.warning("could not find default RAM and NVRAM values for {}: {}".format(os.path.basename(self._path), e))
|
||||
|
||||
@asyncio.coroutine
|
||||
def create(self):
|
||||
|
||||
yield from self.update_default_iou_values()
|
||||
|
||||
def _check_requirements(self):
|
||||
"""
|
||||
Checks the IOU image.
|
||||
@ -479,6 +502,9 @@ class IOUVM(BaseNode):
|
||||
yield from self._start_ubridge()
|
||||
|
||||
self._create_netmap_config()
|
||||
if self.use_default_iou_values:
|
||||
# make sure we have the default nvram amount to correctly push the configs
|
||||
yield from self.update_default_iou_values()
|
||||
self._push_configs_to_nvram()
|
||||
|
||||
# check if there is enough RAM to run
|
||||
|
@ -16,9 +16,14 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import aiohttp
|
||||
import psutil
|
||||
import platform
|
||||
from .project import Project
|
||||
from uuid import UUID
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProjectManager:
|
||||
|
||||
@ -70,6 +75,26 @@ class ProjectManager:
|
||||
raise aiohttp.web.HTTPNotFound(text="Project ID {} doesn't exist".format(project_id))
|
||||
return self._projects[project_id]
|
||||
|
||||
def _check_available_disk_space(self, project):
|
||||
"""
|
||||
Sends a warning notification if disk space is getting low.
|
||||
|
||||
:param project: project instance
|
||||
"""
|
||||
|
||||
try:
|
||||
used_disk_space = psutil.disk_usage(project.path).percent
|
||||
except FileNotFoundError:
|
||||
log.warning('Could not find "{}" when checking for used disk space'.format(project.path))
|
||||
return
|
||||
# send a warning if used disk space is >= 90%
|
||||
if used_disk_space >= 90:
|
||||
message = 'Only {}% or less of disk space detected in "{}" on "{}"'.format(used_disk_space,
|
||||
project.path,
|
||||
platform.node())
|
||||
log.warning(message)
|
||||
project.emit("log.warning", {"message": message})
|
||||
|
||||
def create_project(self, name=None, project_id=None, path=None):
|
||||
"""
|
||||
Create a project and keep a references to it in project manager.
|
||||
@ -80,6 +105,7 @@ class ProjectManager:
|
||||
if project_id is not None and project_id in self._projects:
|
||||
return self._projects[project_id]
|
||||
project = Project(name=name, project_id=project_id, path=path)
|
||||
self._check_available_disk_space(project)
|
||||
self._projects[project.id] = project
|
||||
return project
|
||||
|
||||
|
@ -32,6 +32,7 @@ import gns3server
|
||||
import subprocess
|
||||
|
||||
from gns3server.utils import parse_version
|
||||
from gns3server.utils.asyncio import subprocess_check_output
|
||||
from .qemu_error import QemuError
|
||||
from ..adapters.ethernet_adapter import EthernetAdapter
|
||||
from ..nios.nio_udp import NIOUDP
|
||||
@ -74,6 +75,7 @@ class QemuVM(BaseNode):
|
||||
self._cpulimit_process = None
|
||||
self._monitor = None
|
||||
self._stdout_file = ""
|
||||
self._qemu_img_stdout_file = ""
|
||||
self._execute_lock = asyncio.Lock()
|
||||
self._local_udp_tunnels = {}
|
||||
|
||||
@ -1282,7 +1284,21 @@ class QemuVM(BaseNode):
|
||||
with open(self._stdout_file, "rb") as file:
|
||||
output = file.read().decode("utf-8", errors="replace")
|
||||
except OSError as e:
|
||||
log.warn("Could not read {}: {}".format(self._stdout_file, e))
|
||||
log.warning("Could not read {}: {}".format(self._stdout_file, e))
|
||||
return output
|
||||
|
||||
def read_qemu_img_stdout(self):
|
||||
"""
|
||||
Reads the standard output of the QEMU-IMG process.
|
||||
"""
|
||||
|
||||
output = ""
|
||||
if self._qemu_img_stdout_file:
|
||||
try:
|
||||
with open(self._qemu_img_stdout_file, "rb") as file:
|
||||
output = file.read().decode("utf-8", errors="replace")
|
||||
except OSError as e:
|
||||
log.warning("Could not read {}: {}".format(self._qemu_img_stdout_file, e))
|
||||
return output
|
||||
|
||||
def is_running(self):
|
||||
@ -1360,6 +1376,19 @@ class QemuVM(BaseNode):
|
||||
|
||||
return qemu_img_path
|
||||
|
||||
@asyncio.coroutine
|
||||
def _qemu_img_exec(self, command):
|
||||
|
||||
self._qemu_img_stdout_file = os.path.join(self.working_dir, "qemu-img.log")
|
||||
log.info("logging to {}".format(self._qemu_img_stdout_file))
|
||||
command_string = " ".join(shlex.quote(s) for s in command)
|
||||
log.info("Executing qemu-img with: {}".format(command_string))
|
||||
with open(self._qemu_img_stdout_file, "w", encoding="utf-8") as fd:
|
||||
process = yield from asyncio.create_subprocess_exec(*command, stdout=fd, stderr=subprocess.STDOUT, cwd=self.working_dir)
|
||||
retcode = yield from process.wait()
|
||||
log.info("{} returned with {}".format(self._get_qemu_img(), retcode))
|
||||
return retcode
|
||||
|
||||
@asyncio.coroutine
|
||||
def _disk_options(self):
|
||||
options = []
|
||||
@ -1381,29 +1410,46 @@ class QemuVM(BaseNode):
|
||||
raise QemuError("{} disk image '{}' linked to '{}' is not accessible".format(disk_name, disk_image, os.path.realpath(disk_image)))
|
||||
else:
|
||||
raise QemuError("{} disk image '{}' is not accessible".format(disk_name, disk_image))
|
||||
else:
|
||||
try:
|
||||
# check for corrupt disk image
|
||||
retcode = yield from self._qemu_img_exec([qemu_img_path, "check", disk_image])
|
||||
if retcode == 3:
|
||||
# image has leaked clusters, but is not corrupted, let's try to fix it
|
||||
log.warning("Qemu image {} has leaked clusters".format(disk_image))
|
||||
if (yield from self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", "{}".format(disk_image)])) == 3:
|
||||
self.project.emit("log.warning", {"message": "Qemu image '{}' has leaked clusters and could not be fixed".format(disk_image)})
|
||||
elif retcode == 2:
|
||||
# image is corrupted, let's try to fix it
|
||||
log.warning("Qemu image {} is corrupted".format(disk_image))
|
||||
if (yield from self._qemu_img_exec([qemu_img_path, "check", "-r", "all", "{}".format(disk_image)])) == 2:
|
||||
self.project.emit("log.warning", {"message": "Qemu image '{}' is corrupted and could not be fixed".format(disk_image)})
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
stdout = self.read_qemu_img_stdout()
|
||||
raise QemuError("Could not check '{}' disk image: {}\n{}".format(disk_name, e, stdout))
|
||||
|
||||
if self.linked_clone:
|
||||
disk = os.path.join(self.working_dir, "{}_disk.qcow2".format(disk_name))
|
||||
if not os.path.exists(disk):
|
||||
# create the disk
|
||||
try:
|
||||
command = [qemu_img_path, "create", "-o", "backing_file={}".format(disk_image), "-f", "qcow2", disk]
|
||||
command_string = " ".join(shlex.quote(s) for s in command)
|
||||
log.info("Executing qemu-img with: {}".format(command_string))
|
||||
process = yield from asyncio.create_subprocess_exec(*command)
|
||||
retcode = yield from process.wait()
|
||||
if retcode is not None and retcode != 0:
|
||||
raise QemuError("Could not create {} disk image: qemu-img returned with {}".format(disk_name,
|
||||
retcode))
|
||||
log.info("{} returned with {}".format(qemu_img_path, retcode))
|
||||
retcode = yield from self._qemu_img_exec(command)
|
||||
if retcode:
|
||||
stdout = self.read_qemu_img_stdout()
|
||||
raise QemuError("Could not create '{}' disk image: qemu-img returned with {}\n{}".format(disk_name,
|
||||
retcode,
|
||||
stdout))
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
raise QemuError("Could not create {} disk image: {}".format(disk_name, e))
|
||||
stdout = self.read_qemu_img_stdout()
|
||||
raise QemuError("Could not create '{}' disk image: {}\n{}".format(disk_name, e, stdout))
|
||||
else:
|
||||
# The disk exists we check if the clone work
|
||||
# The disk exists we check if the clone works
|
||||
try:
|
||||
qcow2 = Qcow2(disk)
|
||||
yield from qcow2.rebase(qemu_img_path, disk_image)
|
||||
except (Qcow2Error, OSError) as e:
|
||||
raise QemuError("Could not use qcow2 disk image {} for {} {}".format(disk_image, disk_name, e))
|
||||
raise QemuError("Could not use qcow2 disk image '{}' for {} {}".format(disk_image, disk_name, e))
|
||||
|
||||
else:
|
||||
disk = disk_image
|
||||
@ -1557,7 +1603,9 @@ class QemuVM(BaseNode):
|
||||
return []
|
||||
if len(os.environ.get("DISPLAY", "")) > 0:
|
||||
return []
|
||||
return ["-nographic"]
|
||||
if "-nographic" not in self._options:
|
||||
return ["-nographic"]
|
||||
return []
|
||||
|
||||
def _run_with_kvm(self, qemu_path, options):
|
||||
"""
|
||||
@ -1576,7 +1624,10 @@ class QemuVM(BaseNode):
|
||||
return False
|
||||
|
||||
if not os.path.exists("/dev/kvm"):
|
||||
raise QemuError("KVM acceleration cannot be used (/dev/kvm doesn't exist). You can turn off KVM support in the gns3_server.conf by adding enable_kvm = false to the [Qemu] section.")
|
||||
if self.manager.config.get_section_config("Qemu").getboolean("require_kvm", True):
|
||||
raise QemuError("KVM acceleration cannot be used (/dev/kvm doesn't exist). You can turn off KVM support in the gns3_server.conf by adding enable_kvm = false to the [Qemu] section.")
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -1588,6 +1639,10 @@ class QemuVM(BaseNode):
|
||||
"""
|
||||
|
||||
additional_options = self._options.strip()
|
||||
additional_options = additional_options.replace("%vm-name%", self._name)
|
||||
additional_options = additional_options.replace("%vm-id%", self._id)
|
||||
additional_options = additional_options.replace("%project-id%", self.project.id)
|
||||
additional_options = additional_options.replace("%project-path%", self.project.path)
|
||||
command = [self.qemu_path]
|
||||
command.extend(["-name", self._name])
|
||||
command.extend(["-m", "{}M".format(self._ram)])
|
||||
|
@ -846,10 +846,8 @@ class VirtualBoxVM(BaseNode):
|
||||
nio = self._local_udp_tunnels[adapter_number][0]
|
||||
|
||||
if nio:
|
||||
if not self._use_any_adapter and attachment not in ("none", "null", "generic"):
|
||||
raise VirtualBoxError("Attachment ({}) already configured on adapter {}. "
|
||||
"Please set it to 'Not attached' to allow GNS3 to use it.".format(attachment,
|
||||
adapter_number + 1))
|
||||
if not self._use_any_adapter and attachment in ("nat", "bridged", "intnet", "hostonly", "natnetwork"):
|
||||
continue
|
||||
|
||||
yield from self._modify_vm("--nictrace{} off".format(adapter_number + 1))
|
||||
vbox_adapter_type = "82540EM"
|
||||
@ -972,23 +970,40 @@ class VirtualBoxVM(BaseNode):
|
||||
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
# check if trying to connect to a nat, bridged, host-only or any other special adapter
|
||||
nic_attachments = yield from self._get_nic_attachements(self._maximum_adapters)
|
||||
attachment = nic_attachments[adapter_number]
|
||||
if attachment in ("nat", "bridged", "intnet", "hostonly", "natnetwork"):
|
||||
if not self._use_any_adapter:
|
||||
raise VirtualBoxError("Attachment '{attachment}' is already configured on adapter {adapter_number}. "
|
||||
"Please remove it or allow VirtualBox VM '{name}' to use any adapter.".format(attachment=attachment,
|
||||
adapter_number=adapter_number,
|
||||
name=self.name))
|
||||
elif self.is_running():
|
||||
# dynamically configure an UDP tunnel attachment if the VM is already running
|
||||
local_nio = self._local_udp_tunnels[adapter_number][0]
|
||||
if local_nio and isinstance(local_nio, NIOUDP):
|
||||
yield from self._control_vm("nic{} generic UDPTunnel".format(adapter_number + 1))
|
||||
yield from self._control_vm("nicproperty{} sport={}".format(adapter_number + 1, local_nio.lport))
|
||||
yield from self._control_vm("nicproperty{} dest={}".format(adapter_number + 1, local_nio.rhost))
|
||||
yield from self._control_vm("nicproperty{} dport={}".format(adapter_number + 1, local_nio.rport))
|
||||
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
||||
|
||||
if self.is_running():
|
||||
try:
|
||||
yield from self.add_ubridge_udp_connection("VBOX-{}-{}".format(self._id, adapter_number),
|
||||
self._local_udp_tunnels[adapter_number][1],
|
||||
nio)
|
||||
except KeyError:
|
||||
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(
|
||||
name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
raise VirtualBoxError("Adapter {adapter_number} doesn't exist on VirtualBox VM '{name}'".format(name=self.name,
|
||||
adapter_number=adapter_number))
|
||||
yield from self._control_vm("setlinkstate{} on".format(adapter_number + 1))
|
||||
|
||||
adapter.add_nio(0, nio)
|
||||
log.info("VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(
|
||||
name=self.name,
|
||||
id=self.id,
|
||||
nio=nio,
|
||||
adapter_number=adapter_number))
|
||||
log.info("VirtualBox VM '{name}' [{id}]: {nio} added to adapter {adapter_number}".format(name=self.name,
|
||||
id=self.id,
|
||||
nio=nio,
|
||||
adapter_number=adapter_number))
|
||||
|
||||
@asyncio.coroutine
|
||||
def adapter_update_nio_binding(self, adapter_number, nio):
|
||||
|
@ -736,13 +736,20 @@ class VMwareVM(BaseNode):
|
||||
|
||||
self._read_vmx_file()
|
||||
# check if trying to connect to a nat, bridged or host-only adapter
|
||||
if not self._use_any_adapter and self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"):
|
||||
if self._get_vmx_setting("ethernet{}.present".format(adapter_number), "TRUE"):
|
||||
# check for the connection type
|
||||
connection_type = "ethernet{}.connectiontype".format(adapter_number)
|
||||
if connection_type in self._vmx_pairs and self._vmx_pairs[connection_type] in ("nat", "bridged", "hostonly"):
|
||||
raise VMwareError("Attachment ({}) already configured on network adapter {}. "
|
||||
"Please remove it or allow GNS3 to use any adapter.".format(self._vmx_pairs[connection_type],
|
||||
adapter_number))
|
||||
if not self._use_any_adapter:
|
||||
raise VMwareError("Attachment '{attachment}' is already configured on network adapter {adapter_number}. "
|
||||
"Please remove it or allow VMware VM '{name}' to use any adapter.".format(attachment=self._vmx_pairs[connection_type],
|
||||
adapter_number=adapter_number,
|
||||
name=self.name))
|
||||
elif self.is_running():
|
||||
raise VMwareError("Attachment '{attachment}' is configured on network adapter {adapter_number}. "
|
||||
"Please stop VMware VM '{name}' to link to this adapter and allow GNS3 to change the attachment type.".format(attachment=self._vmx_pairs[connection_type],
|
||||
adapter_number=adapter_number,
|
||||
name=self.name))
|
||||
|
||||
adapter.add_nio(0, nio)
|
||||
if self._started and self._ubridge_hypervisor:
|
||||
|
@ -587,7 +587,7 @@ class Controller:
|
||||
@property
|
||||
def projects(self):
|
||||
"""
|
||||
:returns: The dictionary of computes managed by GNS3
|
||||
:returns: The dictionary of projects managed by GNS3
|
||||
"""
|
||||
return self._projects
|
||||
|
||||
|
@ -60,6 +60,7 @@ class IOUHandler:
|
||||
vm = yield from iou.create_node(request.json.pop("name"),
|
||||
request.match_info["project_id"],
|
||||
request.json.get("node_id"),
|
||||
path=request.json.get("path"),
|
||||
console=request.json.get("console"))
|
||||
|
||||
for name, value in request.json.items():
|
||||
@ -68,6 +69,8 @@ class IOUHandler:
|
||||
continue
|
||||
if name == "private_config_content" and (vm.private_config_content and len(vm.private_config_content) > 0):
|
||||
continue
|
||||
if request.json.get("use_default_iou_values") and (name == "ram" or name == "nvram"):
|
||||
continue
|
||||
setattr(vm, name, value)
|
||||
response.set_status(201)
|
||||
response.json(vm)
|
||||
@ -114,6 +117,11 @@ class IOUHandler:
|
||||
for name, value in request.json.items():
|
||||
if hasattr(vm, name) and getattr(vm, name) != value:
|
||||
setattr(vm, name, value)
|
||||
|
||||
if vm.use_default_iou_values:
|
||||
# update the default IOU values in case the image or use_default_iou_values have changed
|
||||
# this is important to have the correct NVRAM amount in order to correctly push the configs to the NVRAM
|
||||
yield from vm.update_default_iou_values()
|
||||
vm.updated()
|
||||
response.json(vm)
|
||||
|
||||
|
@ -212,6 +212,7 @@ report_errors = True
|
||||
|
||||
[Qemu]
|
||||
enable_kvm = True
|
||||
require_kvm = True
|
||||
EOFC
|
||||
|
||||
chown -R gns3:gns3 /etc/gns3
|
||||
@ -298,6 +299,7 @@ report_errors = True
|
||||
|
||||
[Qemu]
|
||||
enable_kvm = True
|
||||
require_kvm = True
|
||||
EOFSERVER
|
||||
|
||||
log "Install packages for Open VPN"
|
||||
|
@ -79,7 +79,7 @@ def test_iou_create_with_params(http_compute, project, base_params):
|
||||
params["ethernet_adapters"] = 0
|
||||
params["l1_keepalives"] = True
|
||||
params["startup_config_content"] = "hostname test"
|
||||
params["use_default_iou_values"] = True
|
||||
params["use_default_iou_values"] = False
|
||||
|
||||
response = http_compute.post("/projects/{project_id}/iou/nodes".format(project_id=project.id), params, example=True)
|
||||
assert response.status == 201
|
||||
@ -91,7 +91,7 @@ def test_iou_create_with_params(http_compute, project, base_params):
|
||||
assert response.json["ram"] == 1024
|
||||
assert response.json["nvram"] == 512
|
||||
assert response.json["l1_keepalives"] is True
|
||||
assert response.json["use_default_iou_values"] is True
|
||||
assert response.json["use_default_iou_values"] is False
|
||||
|
||||
with open(startup_config_file(project, response.json)) as f:
|
||||
assert f.read() == "hostname test"
|
||||
|
Loading…
Reference in New Issue
Block a user