mirror of
https://github.com/GNS3/gns3-server
synced 2024-11-24 17:28:08 +00:00
Support delete Qemu disk image from API
Return the real disk image name in the 'hdx_disk_image_backed' property for Qemu VMs
This commit is contained in:
parent
a1c76ec4f2
commit
e50bed5bee
@ -162,6 +162,22 @@ async def update_qemu_disk_image(
|
||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
@router.delete(
|
||||
"/{node_id}/disk_image/{disk_name}",
|
||||
status_code=status.HTTP_204_NO_CONTENT
|
||||
)
|
||||
async def delete_qemu_disk_image(
|
||||
disk_name: str,
|
||||
node: QemuVM = Depends(dep_node)
|
||||
) -> Response:
|
||||
"""
|
||||
Delete a Qemu disk image.
|
||||
"""
|
||||
|
||||
node.delete_disk_image(disk_name)
|
||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
@router.post("/{node_id}/start", status_code=status.HTTP_204_NO_CONTENT)
|
||||
async def start_qemu_node(node: QemuVM = Depends(dep_node)) -> Response:
|
||||
"""
|
||||
|
@ -52,12 +52,13 @@ router = APIRouter()
|
||||
@router.get("", response_model=List[schemas.Image])
|
||||
async def get_images(
|
||||
images_repo: ImagesRepository = Depends(get_repository(ImagesRepository)),
|
||||
image_type: Optional[schemas.ImageType] = None
|
||||
) -> List[schemas.Image]:
|
||||
"""
|
||||
Return all images.
|
||||
"""
|
||||
|
||||
return await images_repo.get_images()
|
||||
return await images_repo.get_images(image_type)
|
||||
|
||||
|
||||
@router.post("/upload/{image_path:path}", response_model=schemas.Image, status_code=status.HTTP_201_CREATED)
|
||||
|
@ -348,6 +348,21 @@ async def update_disk_image(
|
||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
@router.delete("/{node_id}/qemu/disk_image/{disk_name}", status_code=status.HTTP_204_NO_CONTENT)
|
||||
async def delete_disk_image(
|
||||
disk_name: str,
|
||||
node: Node = Depends(dep_node)
|
||||
) -> Response:
|
||||
"""
|
||||
Delete a Qemu disk image.
|
||||
"""
|
||||
|
||||
if node.node_type != "qemu":
|
||||
raise ControllerBadRequestError("Deleting a disk image is only supported on a Qemu node")
|
||||
await node.delete(f"/disk_image/{disk_name}")
|
||||
return Response(status_code=status.HTTP_204_NO_CONTENT)
|
||||
|
||||
|
||||
@router.get("/{node_id}/files/{file_path:path}")
|
||||
async def get_file(file_path: str, node: Node = Depends(dep_node)) -> Response:
|
||||
"""
|
||||
|
@ -163,7 +163,7 @@ class Router(BaseNode):
|
||||
"dynamips_id": self._dynamips_id,
|
||||
"platform": self._platform,
|
||||
"image": self._image,
|
||||
"image_md5sum": md5sum(self._image),
|
||||
"image_md5sum": md5sum(self._image, self._working_directory),
|
||||
"ram": self._ram,
|
||||
"nvram": self._nvram,
|
||||
"mmap": self._mmap,
|
||||
|
@ -231,7 +231,7 @@ class IOUVM(BaseNode):
|
||||
"status": self.status,
|
||||
"project_id": self.project.id,
|
||||
"path": self.path,
|
||||
"md5sum": gns3server.utils.images.md5sum(self.path),
|
||||
"md5sum": gns3server.utils.images.md5sum(self.path, self.working_path),
|
||||
"ethernet_adapters": len(self._ethernet_adapters),
|
||||
"serial_adapters": len(self._serial_adapters),
|
||||
"ram": self._ram,
|
||||
|
@ -1662,6 +1662,22 @@ class QemuVM(BaseNode):
|
||||
stdout = self.read_qemu_img_stdout()
|
||||
raise QemuError(f"Could not update '{disk_name}' disk image: {e}\n{stdout}")
|
||||
|
||||
def delete_disk_image(self, disk_name):
|
||||
"""
|
||||
Delete a Qemu disk
|
||||
|
||||
:param disk_name: disk name
|
||||
"""
|
||||
|
||||
disk_path = os.path.join(self.working_dir, disk_name)
|
||||
if not os.path.exists(disk_path):
|
||||
raise QemuError(f"Qemu disk image '{disk_name}' does not exist")
|
||||
|
||||
try:
|
||||
os.remove(disk_path)
|
||||
except OSError as e:
|
||||
raise QemuError(f"Could not delete '{disk_name}' disk image: {e}")
|
||||
|
||||
@property
|
||||
def started(self):
|
||||
"""
|
||||
@ -2042,7 +2058,7 @@ class QemuVM(BaseNode):
|
||||
drives = ["a", "b", "c", "d"]
|
||||
|
||||
for disk_index, drive in enumerate(drives):
|
||||
# prioritize config disk over harddisk d
|
||||
# prioritize config disk over normal disks
|
||||
if drive == "d" and self._create_config_disk:
|
||||
continue
|
||||
|
||||
@ -2056,34 +2072,44 @@ class QemuVM(BaseNode):
|
||||
interface = "ide"
|
||||
setattr(self, f"hd{drive}_disk_interface", interface)
|
||||
|
||||
disk_name = "hd" + drive
|
||||
disk_name = f"hd{drive}"
|
||||
if not os.path.isfile(disk_image) or not os.path.exists(disk_image):
|
||||
if os.path.islink(disk_image):
|
||||
raise QemuError(
|
||||
f"{disk_name} disk image '{disk_image}' linked to '{os.path.realpath(disk_image)}' is not accessible"
|
||||
f"'{disk_name}' disk image linked to "
|
||||
f"'{os.path.realpath(disk_image)}' is not accessible"
|
||||
)
|
||||
else:
|
||||
raise QemuError(f"{disk_name} disk image '{disk_image}' is not accessible")
|
||||
raise QemuError(f"'{disk_image}' is not accessible")
|
||||
else:
|
||||
try:
|
||||
# check for corrupt disk image
|
||||
retcode = await self._qemu_img_exec([qemu_img_path, "check", disk_image])
|
||||
# ignore retcode == 1, one reason is that the image is encrypted and
|
||||
# there is no encrypt.key-secret available
|
||||
if retcode == 3:
|
||||
# image has leaked clusters, but is not corrupted, let's try to fix it
|
||||
log.warning(f"Qemu image {disk_image} has leaked clusters")
|
||||
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", "{}".format(disk_image)]) == 3:
|
||||
self.project.emit("log.warning", {"message": "Qemu image '{}' has leaked clusters and could not be fixed".format(disk_image)})
|
||||
log.warning(f"Disk image '{disk_image}' has leaked clusters")
|
||||
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "leaks", f"{disk_image}"]) == 3:
|
||||
self.project.emit(
|
||||
"log.warning",
|
||||
{"message": f"Disk image '{disk_image}' has leaked clusters and could not be fixed"}
|
||||
)
|
||||
elif retcode == 2:
|
||||
# image is corrupted, let's try to fix it
|
||||
log.warning(f"Qemu image {disk_image} is corrupted")
|
||||
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", "{}".format(disk_image)]) == 2:
|
||||
self.project.emit("log.warning", {"message": "Qemu image '{}' is corrupted and could not be fixed".format(disk_image)})
|
||||
# ignore retcode == 1. One reason is that the image is encrypted and there is no encrypt.key-secret available
|
||||
log.warning(f"Disk image '{disk_image}' is corrupted")
|
||||
if await self._qemu_img_exec([qemu_img_path, "check", "-r", "all", f"{disk_image}"]) == 2:
|
||||
self.project.emit(
|
||||
"log.warning",
|
||||
{"message": f"Disk image '{disk_image}' is corrupted and could not be fixed"}
|
||||
)
|
||||
except (OSError, subprocess.SubprocessError) as e:
|
||||
stdout = self.read_qemu_img_stdout()
|
||||
raise QemuError(f"Could not check '{disk_name}' disk image: {e}\n{stdout}")
|
||||
|
||||
if self.linked_clone:
|
||||
if self.linked_clone and os.path.dirname(disk_image) != self.working_dir:
|
||||
|
||||
#cloned_disk_image = os.path.splitext(os.path.basename(disk_image))
|
||||
disk = os.path.join(self.working_dir, f"{disk_name}_disk.qcow2")
|
||||
if not os.path.exists(disk):
|
||||
# create the disk
|
||||
@ -2091,9 +2117,9 @@ class QemuVM(BaseNode):
|
||||
else:
|
||||
backing_file_format = await self._find_disk_file_format(disk_image)
|
||||
if not backing_file_format:
|
||||
raise QemuError("Could not detect format for disk image: {}".format(disk_image))
|
||||
raise QemuError(f"Could not detect format for disk image '{disk_image}'")
|
||||
# Rebase the image. This is in case the base image moved to a different directory,
|
||||
# which will be the case if we imported a portable project. This uses
|
||||
# which will be the case if we imported a portable project. This uses
|
||||
# get_abs_image_path(hdX_disk_image) and ignores the old base path embedded
|
||||
# in the qcow2 file itself.
|
||||
try:
|
||||
@ -2470,20 +2496,30 @@ class QemuVM(BaseNode):
|
||||
answer[field] = getattr(self, field)
|
||||
except AttributeError:
|
||||
pass
|
||||
answer["hda_disk_image"] = self.manager.get_relative_image_path(self._hda_disk_image, self.working_dir)
|
||||
answer["hda_disk_image_md5sum"] = md5sum(self._hda_disk_image)
|
||||
answer["hdb_disk_image"] = self.manager.get_relative_image_path(self._hdb_disk_image, self.working_dir)
|
||||
answer["hdb_disk_image_md5sum"] = md5sum(self._hdb_disk_image)
|
||||
answer["hdc_disk_image"] = self.manager.get_relative_image_path(self._hdc_disk_image, self.working_dir)
|
||||
answer["hdc_disk_image_md5sum"] = md5sum(self._hdc_disk_image)
|
||||
answer["hdd_disk_image"] = self.manager.get_relative_image_path(self._hdd_disk_image, self.working_dir)
|
||||
answer["hdd_disk_image_md5sum"] = md5sum(self._hdd_disk_image)
|
||||
|
||||
for drive in ["a", "b", "c", "d"]:
|
||||
disk_image = getattr(self, f"_hd{drive}_disk_image")
|
||||
if not disk_image:
|
||||
continue
|
||||
answer[f"hd{drive}_disk_image"] = self.manager.get_relative_image_path(disk_image, self.working_dir)
|
||||
answer[f"hd{drive}_disk_image_md5sum"] = md5sum(disk_image, self.working_dir)
|
||||
|
||||
local_disk = os.path.join(self.working_dir, f"hd{drive}_disk.qcow2")
|
||||
if os.path.exists(local_disk):
|
||||
try:
|
||||
qcow2 = Qcow2(local_disk)
|
||||
if qcow2.backing_file:
|
||||
answer[f"hd{drive}_disk_image_backed"] = os.path.basename(local_disk)
|
||||
except (Qcow2Error, OSError) as e:
|
||||
log.error(f"Could not read qcow2 disk image '{local_disk}': {e}")
|
||||
continue
|
||||
|
||||
answer["cdrom_image"] = self.manager.get_relative_image_path(self._cdrom_image, self.working_dir)
|
||||
answer["cdrom_image_md5sum"] = md5sum(self._cdrom_image)
|
||||
answer["cdrom_image_md5sum"] = md5sum(self._cdrom_image, self.working_dir)
|
||||
answer["bios_image"] = self.manager.get_relative_image_path(self._bios_image, self.working_dir)
|
||||
answer["bios_image_md5sum"] = md5sum(self._bios_image)
|
||||
answer["bios_image_md5sum"] = md5sum(self._bios_image, self.working_dir)
|
||||
answer["initrd"] = self.manager.get_relative_image_path(self._initrd, self.working_dir)
|
||||
answer["initrd_md5sum"] = md5sum(self._initrd)
|
||||
answer["initrd_md5sum"] = md5sum(self._initrd, self.working_dir)
|
||||
answer["kernel_image"] = self.manager.get_relative_image_path(self._kernel_image, self.working_dir)
|
||||
answer["kernel_image_md5sum"] = md5sum(self._kernel_image)
|
||||
answer["kernel_image_md5sum"] = md5sum(self._kernel_image, self.working_dir)
|
||||
return answer
|
||||
|
@ -624,7 +624,6 @@ class Compute:
|
||||
"""
|
||||
Return the list of images available for this type on the compute node.
|
||||
"""
|
||||
images = []
|
||||
|
||||
res = await self.http_query("GET", f"/{type}/images", timeout=None)
|
||||
images = res.json
|
||||
|
@ -59,12 +59,15 @@ class ImagesRepository(BaseRepository):
|
||||
result = await self._db_session.execute(query)
|
||||
return result.scalars().first()
|
||||
|
||||
async def get_images(self) -> List[models.Image]:
|
||||
async def get_images(self, image_type=None) -> List[models.Image]:
|
||||
"""
|
||||
Get all images.
|
||||
"""
|
||||
|
||||
query = select(models.Image)
|
||||
if image_type:
|
||||
query = select(models.Image).where(models.Image.image_type == image_type)
|
||||
else:
|
||||
query = select(models.Image)
|
||||
result = await self._db_session.execute(query)
|
||||
return result.scalars().all()
|
||||
|
||||
|
@ -166,15 +166,19 @@ class QemuBase(BaseModel):
|
||||
aux: Optional[int] = Field(None, gt=0, le=65535, description="Auxiliary console TCP port")
|
||||
aux_type: Optional[QemuConsoleType] = Field(None, description="Auxiliary console type")
|
||||
hda_disk_image: Optional[str] = Field(None, description="QEMU hda disk image path")
|
||||
hda_disk_image_backed: Optional[str] = Field(None, description="QEMU hda backed disk image path")
|
||||
hda_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hda disk image checksum")
|
||||
hda_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hda interface")
|
||||
hdb_disk_image: Optional[str] = Field(None, description="QEMU hdb disk image path")
|
||||
hdb_disk_image_backed: Optional[str] = Field(None, description="QEMU hdb backed disk image path")
|
||||
hdb_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdb disk image checksum")
|
||||
hdb_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdb interface")
|
||||
hdc_disk_image: Optional[str] = Field(None, description="QEMU hdc disk image path")
|
||||
hdc_disk_image_backed: Optional[str] = Field(None, description="QEMU hdc backed disk image path")
|
||||
hdc_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdc disk image checksum")
|
||||
hdc_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdc interface")
|
||||
hdd_disk_image: Optional[str] = Field(None, description="QEMU hdd disk image path")
|
||||
hdd_disk_image_backed: Optional[str] = Field(None, description="QEMU hdd backed disk image path")
|
||||
hdd_disk_image_md5sum: Optional[str] = Field(None, description="QEMU hdd disk image checksum")
|
||||
hdd_disk_interface: Optional[QemuDiskInterfaceType] = Field(None, description="QEMU hdd interface")
|
||||
cdrom_image: Optional[str] = Field(None, description="QEMU cdrom image path")
|
||||
|
@ -158,11 +158,12 @@ def images_directories(type):
|
||||
return [force_unix_path(p) for p in paths if os.path.exists(p)]
|
||||
|
||||
|
||||
def md5sum(path, stopped_event=None):
|
||||
def md5sum(path, working_dir=None, stopped_event=None):
|
||||
"""
|
||||
Return the md5sum of an image and cache it on disk
|
||||
|
||||
:param path: Path to the image
|
||||
:param workdir_dir: where to store .md5sum files
|
||||
:param stopped_event: In case you execute this function on thread and would like to have possibility
|
||||
to cancel operation pass the `threading.Event`
|
||||
:returns: Digest of the image
|
||||
@ -171,8 +172,13 @@ def md5sum(path, stopped_event=None):
|
||||
if path is None or len(path) == 0 or not os.path.exists(path):
|
||||
return None
|
||||
|
||||
if working_dir:
|
||||
md5sum_file = os.path.join(working_dir, os.path.basename(path) + ".md5sum")
|
||||
else:
|
||||
md5sum_file = path + ".md5sum"
|
||||
|
||||
try:
|
||||
with open(path + ".md5sum") as f:
|
||||
with open(md5sum_file) as f:
|
||||
md5 = f.read().strip()
|
||||
if len(md5) == 32:
|
||||
return md5
|
||||
@ -197,7 +203,7 @@ def md5sum(path, stopped_event=None):
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(f"{path}.md5sum", "w+") as f:
|
||||
with open(md5sum_file, "w+") as f:
|
||||
f.write(digest)
|
||||
except OSError as e:
|
||||
log.error("Can't write digest of %s: %s", path, str(e))
|
||||
|
@ -671,6 +671,45 @@ async def test_qemu_update_disk_image(
|
||||
)
|
||||
|
||||
|
||||
async def test_qemu_delete_disk_image(
|
||||
app: FastAPI,
|
||||
compute_client: AsyncClient,
|
||||
compute_project: Project,
|
||||
qemu_vm: dict,
|
||||
) -> None:
|
||||
|
||||
node = compute_project.get_node(qemu_vm["node_id"])
|
||||
shutil.copy("tests/resources/empty8G.qcow2", os.path.join(node.working_dir, "disk.qcow2"))
|
||||
|
||||
response = await compute_client.delete(
|
||||
app.url_path_for(
|
||||
"compute:delete_qemu_disk_image",
|
||||
project_id=qemu_vm["project_id"],
|
||||
node_id=qemu_vm["node_id"],
|
||||
disk_name="disk.qcow2"
|
||||
)
|
||||
)
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
|
||||
|
||||
async def test_qemu_delete_disk_image_missing_image(
|
||||
app: FastAPI,
|
||||
compute_client: AsyncClient,
|
||||
compute_project: Project,
|
||||
qemu_vm: dict,
|
||||
) -> None:
|
||||
|
||||
response = await compute_client.delete(
|
||||
app.url_path_for(
|
||||
"compute:delete_qemu_disk_image",
|
||||
project_id=qemu_vm["project_id"],
|
||||
node_id=qemu_vm["node_id"],
|
||||
disk_name="unknown_image.qcow2"
|
||||
)
|
||||
)
|
||||
assert response.status_code == status.HTTP_409_CONFLICT
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_qemu_start_capture(app: FastAPI, compute_client: AsyncClient, qemu_vm: dict):
|
||||
|
||||
|
@ -346,6 +346,38 @@ async def test_qemu_disk_image_update_wrong_node_type(
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
|
||||
async def test_qemu_disk_image_delete(
|
||||
app: FastAPI,
|
||||
client: AsyncClient,
|
||||
project: Project,
|
||||
compute: Compute,
|
||||
node: Node
|
||||
) -> None:
|
||||
|
||||
response = MagicMock()
|
||||
compute.delete = AsyncioMagicMock(return_value=response)
|
||||
|
||||
node._node_type = "qemu" # force Qemu node type
|
||||
response = await client.delete(
|
||||
app.url_path_for("delete_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2")
|
||||
)
|
||||
assert response.status_code == status.HTTP_204_NO_CONTENT
|
||||
|
||||
|
||||
async def test_qemu_disk_image_delete_wrong_node_type(
|
||||
app: FastAPI,
|
||||
client: AsyncClient,
|
||||
project: Project,
|
||||
compute: Compute,
|
||||
node: Node
|
||||
) -> None:
|
||||
|
||||
response = await client.delete(
|
||||
app.url_path_for("delete_disk_image", project_id=project.id, node_id=node.id, disk_name="hda_disk.qcow2")
|
||||
)
|
||||
assert response.status_code == status.HTTP_400_BAD_REQUEST
|
||||
|
||||
|
||||
async def test_get_file(app: FastAPI, client: AsyncClient, project: Project, compute: Compute, node: Node) -> None:
|
||||
|
||||
response = MagicMock()
|
||||
|
@ -94,20 +94,18 @@ async def test_vm(compute_project, manager, fake_qemu_binary):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_vm_create(tmpdir, compute_project, manager, fake_qemu_binary):
|
||||
|
||||
fake_img = str(tmpdir / 'hello')
|
||||
|
||||
with open(fake_img, 'w+') as f:
|
||||
f.write('hello')
|
||||
async def test_vm_create(compute_project, manager, fake_qemu_binary):
|
||||
|
||||
vm = QemuVM("test", "00010203-0405-0607-0809-0a0b0c0d0e0f", compute_project, manager, qemu_path=fake_qemu_binary)
|
||||
fake_img = os.path.join(vm.working_dir, 'hello')
|
||||
with open(fake_img, 'w+') as f:
|
||||
f.write('hello')
|
||||
vm._hda_disk_image = fake_img
|
||||
|
||||
await vm.create()
|
||||
|
||||
# tests if `create` created md5sums
|
||||
assert os.path.exists(str(tmpdir / 'hello.md5sum'))
|
||||
assert os.path.exists(os.path.join(vm.working_dir, 'hello.md5sum'))
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
Loading…
Reference in New Issue
Block a user