[pdm-devel] [PATCH datacenter-manager 1/9] server: factor qemu/lxc code into own modules

Dominik Csapak d.csapak at proxmox.com
Mon Jan 13 16:45:42 CET 2025


so the modules don't get overly big

Signed-off-by: Dominik Csapak <d.csapak at proxmox.com>
---
 server/src/api/pve/lxc.rs  |  507 ++++++++++++++++++
 server/src/api/pve/mod.rs  | 1029 +-----------------------------------
 server/src/api/pve/qemu.rs |  552 +++++++++++++++++++
 3 files changed, 1066 insertions(+), 1022 deletions(-)
 create mode 100644 server/src/api/pve/lxc.rs
 create mode 100644 server/src/api/pve/qemu.rs

diff --git a/server/src/api/pve/lxc.rs b/server/src/api/pve/lxc.rs
new file mode 100644
index 0000000..b16d268
--- /dev/null
+++ b/server/src/api/pve/lxc.rs
@@ -0,0 +1,507 @@
+use anyhow::{bail, format_err, Error};
+use http::uri::Authority;
+
+use proxmox_access_control::CachedUserInfo;
+use proxmox_router::{
+    http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
+};
+use proxmox_schema::api;
+use proxmox_sortable_macro::sortable;
+
+use pdm_api_types::remotes::REMOTE_ID_SCHEMA;
+use pdm_api_types::{
+    Authid, ConfigurationState, RemoteUpid, NODE_SCHEMA, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_MANAGE,
+    PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA,
+};
+
+use crate::api::pve::get_remote;
+
+use super::{
+    check_guest_delete_perms, check_guest_list_permissions, check_guest_permissions,
+    connect_to_remote, new_remote_upid,
+};
+
+use super::find_node_for_vm;
+
+pub const ROUTER: Router = Router::new()
+    .get(&API_METHOD_LIST_LXC)
+    .match_all("vmid", &LXC_VM_ROUTER);
+
+const LXC_VM_ROUTER: Router = Router::new()
+    .get(&list_subdirs_api_method!(LXC_VM_SUBDIRS))
+    .subdirs(LXC_VM_SUBDIRS);
+#[sortable]
+const LXC_VM_SUBDIRS: SubdirMap = &sorted!([
+    ("config", &Router::new().get(&API_METHOD_LXC_GET_CONFIG)),
+    ("rrddata", &super::rrddata::LXC_RRD_ROUTER),
+    ("start", &Router::new().post(&API_METHOD_LXC_START)),
+    ("status", &Router::new().get(&API_METHOD_LXC_GET_STATUS)),
+    ("stop", &Router::new().post(&API_METHOD_LXC_STOP)),
+    ("shutdown", &Router::new().post(&API_METHOD_LXC_SHUTDOWN)),
+    ("migrate", &Router::new().post(&API_METHOD_LXC_MIGRATE)),
+    (
+        "remote-migrate",
+        &Router::new().post(&API_METHOD_LXC_REMOTE_MIGRATE)
+    ),
+]);
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+        },
+    },
+    returns: {
+        type: Array,
+        description: "Get a list of containers.",
+        items: { type: pve_api_types::VmEntry },
+    },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false),
+    },
+)]
+/// Query the remote's list of lxc containers. If no node is provided, the all nodes are queried.
+pub async fn list_lxc(
+    remote: String,
+    node: Option<String>,
+    rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Vec<pve_api_types::LxcEntry>, Error> {
+    // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody
+    // and fine-grained checks once those are implemented for all API calls..
+    let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?;
+
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let list = if let Some(node) = node {
+        pve.list_lxc(&node).await?
+    } else {
+        let mut list = Vec::new();
+        for node in pve.list_nodes().await? {
+            list.extend(pve.list_lxc(&node.node).await?);
+        }
+        list
+    };
+
+    if top_level_allowed {
+        return Ok(list);
+    }
+
+    Ok(list
+        .into_iter()
+        .filter(|entry| {
+            check_guest_permissions(
+                &auth_id,
+                &user_info,
+                &remote,
+                PRIV_RESOURCE_AUDIT,
+                entry.vmid,
+            )
+        })
+        .collect())
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+            state: { type: ConfigurationState },
+            snapshot: {
+                schema: SNAPSHOT_NAME_SCHEMA,
+                optional: true,
+            },
+        },
+    },
+    returns: { type: pve_api_types::LxcConfig },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
+    },
+)]
+/// Get the configuration of an lxc container from a remote. If a node is provided, the container
+/// must be on that node, otherwise the node is determined automatically.
+pub async fn lxc_get_config(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+    state: ConfigurationState,
+    snapshot: Option<String>,
+) -> Result<pve_api_types::LxcConfig, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    Ok(pve
+        .lxc_get_config(&node, vmid, state.current(), snapshot)
+        .await?)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: pve_api_types::QemuStatus },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
+    },
+)]
+/// Get the status of an LXC guest from a remote. If a node is provided, the guest must be on that
+/// node, otherwise the node is determined automatically.
+pub async fn lxc_get_status(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<pve_api_types::LxcStatus, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    Ok(pve.lxc_get_status(&node, vmid).await?)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
+    },
+)]
+/// Start a remote lxc container.
+pub async fn lxc_start(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<RemoteUpid, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    let upid = pve.start_lxc_async(&node, vmid, Default::default()).await?;
+
+    new_remote_upid(remote, upid)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
+    },
+)]
+/// Stop a remote lxc container.
+pub async fn lxc_stop(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<RemoteUpid, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    let upid = pve.stop_lxc_async(&node, vmid, Default::default()).await?;
+
+    new_remote_upid(remote, upid)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
+    },
+)]
+/// Perform a shutdown of a remote lxc container.
+pub async fn lxc_shutdown(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<RemoteUpid, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    let upid = pve
+        .shutdown_lxc_async(&node, vmid, Default::default())
+        .await?;
+
+    new_remote_upid(remote, upid)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            target: { schema: NODE_SCHEMA },
+            vmid: { schema: VMID_SCHEMA },
+            online: {
+                type: bool,
+                description: "Attempt an online migration if the container is running.",
+                optional: true,
+            },
+            restart: {
+                type: bool,
+                description: "Perform a restart-migration if the container is running.",
+                optional: true,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+                optional: true,
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            },
+            timeout: {
+                description: "Shutdown timeout for restart-migrations.",
+                optional: true,
+            },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
+        ]),
+    },
+)]
+/// Perform an in-cluster migration of a VM.
+#[allow(clippy::too_many_arguments)]
+pub async fn lxc_migrate(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+    bwlimit: Option<u64>,
+    restart: Option<bool>,
+    online: Option<bool>,
+    target: String,
+    target_storage: Option<String>,
+    timeout: Option<i64>,
+) -> Result<RemoteUpid, Error> {
+    let bwlimit = bwlimit.map(|n| n as f64);
+
+    log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}");
+
+    let (remotes, _) = pdm_config::remotes::config()?;
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    if node == target {
+        bail!("refusing migration to the same node");
+    }
+
+    let params = pve_api_types::MigrateLxc {
+        bwlimit,
+        online,
+        restart,
+        target,
+        target_storage,
+        timeout,
+    };
+    let upid = pve.migrate_lxc(&node, vmid, params).await?;
+
+    new_remote_upid(remote, upid)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            target: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+            "target-vmid": {
+                optional: true,
+                schema: VMID_SCHEMA,
+            },
+            delete: {
+                description: "Delete the original VM and related data after successful migration.",
+                optional: true,
+                default: false,
+            },
+            online: {
+                type: bool,
+                description: "Perform an online migration if the vm is running.",
+                optional: true,
+                default: false,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+            },
+            "target-bridge": {
+                description: "Mapping of source bridges to remote bridges.",
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            },
+            restart: {
+                description: "Perform a restart-migration.",
+                optional: true,
+            },
+            timeout: {
+                description: "Add a shutdown timeout for the restart-migration.",
+                optional: true,
+            },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission:
+            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
+        description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid",
+    },
+)]
+/// Perform a remote migration of an lxc container.
+#[allow(clippy::too_many_arguments)]
+pub async fn lxc_remote_migrate(
+    remote: String, // this is the source
+    target: String, // this is the destination remote name
+    node: Option<String>,
+    vmid: u32,
+    target_vmid: Option<u32>,
+    delete: bool,
+    online: bool,
+    target_storage: String,
+    target_bridge: String,
+    bwlimit: Option<u64>,
+    restart: Option<bool>,
+    timeout: Option<i64>,
+    rpcenv: &mut dyn RpcEnvironment,
+) -> Result<RemoteUpid, Error> {
+    let user_info = CachedUserInfo::new()?;
+    let auth_id: Authid = rpcenv
+        .get_auth_id()
+        .ok_or_else(|| format_err!("no authid available"))?
+        .parse()?;
+    let target_privs = user_info.lookup_privs(
+        &auth_id,
+        &[
+            "resource",
+            &target,
+            "guest",
+            &target_vmid.unwrap_or(vmid).to_string(),
+        ],
+    );
+    if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
+        http_bail!(
+            UNAUTHORIZED,
+            "missing PRIV_RESOURCE_MIGRATE on target remote+vmid"
+        );
+    }
+    if delete {
+        check_guest_delete_perms(rpcenv, &remote, vmid)?;
+    }
+
+    let source = remote; // let's stick to "source" and "target" naming
+
+    log::info!("remote migration requested");
+
+    if source == target {
+        bail!("source and destination clusters must be different");
+    }
+
+    let (remotes, _) = pdm_config::remotes::config()?;
+    let target = get_remote(&remotes, &target)?;
+    let source_conn = connect_to_remote(&remotes, &source)?;
+
+    let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?;
+
+    // FIXME: For now we'll only try with the first node but we should probably try others, too, in
+    // case some are offline?
+
+    let target_node = target
+        .nodes
+        .first()
+        .ok_or_else(|| format_err!("no nodes configured for target cluster"))?;
+    let target_host_port: Authority = target_node.hostname.parse()?;
+    let mut target_endpoint = format!(
+        "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}",
+        host = target_host_port.host(),
+        authid = target.authid,
+        secret = target.token,
+        port = target_host_port.port_u16().unwrap_or(8006),
+    );
+    if let Some(fp) = target_node.fingerprint.as_deref() {
+        target_endpoint.reserve(fp.len() + ",fingerprint=".len());
+        target_endpoint.push_str(",fingerprint=");
+        target_endpoint.push_str(fp);
+    }
+
+    log::info!("forwarding remote migration requested");
+    let params = pve_api_types::RemoteMigrateLxc {
+        target_bridge,
+        target_storage,
+        delete: Some(delete),
+        online: Some(online),
+        target_vmid,
+        target_endpoint,
+        bwlimit: bwlimit.map(|limit| limit as f64),
+        restart,
+        timeout,
+    };
+    log::info!("migrating vm {vmid} of node {node:?}");
+    let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?;
+
+    new_remote_upid(source, upid)
+}
diff --git a/server/src/api/pve/mod.rs b/server/src/api/pve/mod.rs
index ae44722..48e16b2 100644
--- a/server/src/api/pve/mod.rs
+++ b/server/src/api/pve/mod.rs
@@ -3,7 +3,6 @@
 use std::sync::Arc;
 
 use anyhow::{bail, format_err, Error};
-use http::uri::Authority;
 
 use proxmox_access_control::CachedUserInfo;
 use proxmox_router::{
@@ -17,22 +16,20 @@ use proxmox_sortable_macro::sortable;
 use pdm_api_types::remotes::{NodeUrl, Remote, RemoteType, REMOTE_ID_SCHEMA};
 use pdm_api_types::resource::PveResource;
 use pdm_api_types::{
-    Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, HOST_OPTIONAL_PORT_FORMAT, NODE_SCHEMA,
-    PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_DELETE, PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE,
-    PRIV_SYS_MODIFY, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA,
+    Authid, RemoteUpid, HOST_OPTIONAL_PORT_FORMAT, PRIV_RESOURCE_AUDIT, PRIV_RESOURCE_DELETE,
+    PRIV_SYS_MODIFY,
 };
 
 use pve_api_types::client::PveClient;
-use pve_api_types::{
-    ClusterResourceKind, ClusterResourceType, ListRealm, PveUpid, QemuMigratePreconditions,
-    StartQemuMigrationType,
-};
+use pve_api_types::{ClusterResourceKind, ClusterResourceType, ListRealm, PveUpid};
 
 use super::resources::{map_pve_lxc, map_pve_node, map_pve_qemu, map_pve_storage};
 
 use crate::{connection, task_cache};
 
+mod lxc;
 mod node;
+mod qemu;
 mod rrddata;
 pub mod tasks;
 
@@ -58,66 +55,17 @@ const MAIN_ROUTER: Router = Router::new()
 
 #[sortable]
 const REMOTE_SUBDIRS: SubdirMap = &sorted!([
-    ("lxc", &LXC_ROUTER),
+    ("lxc", &lxc::ROUTER),
     ("nodes", &NODES_ROUTER),
-    ("qemu", &QEMU_ROUTER),
+    ("qemu", &qemu::ROUTER),
     ("resources", &RESOURCES_ROUTER),
     ("tasks", &tasks::ROUTER),
 ]);
 
-const LXC_ROUTER: Router = Router::new()
-    .get(&API_METHOD_LIST_LXC)
-    .match_all("vmid", &LXC_VM_ROUTER);
-
-const LXC_VM_ROUTER: Router = Router::new()
-    .get(&list_subdirs_api_method!(LXC_VM_SUBDIRS))
-    .subdirs(LXC_VM_SUBDIRS);
-#[sortable]
-const LXC_VM_SUBDIRS: SubdirMap = &sorted!([
-    ("config", &Router::new().get(&API_METHOD_LXC_GET_CONFIG)),
-    ("rrddata", &rrddata::LXC_RRD_ROUTER),
-    ("start", &Router::new().post(&API_METHOD_LXC_START)),
-    ("status", &Router::new().get(&API_METHOD_LXC_GET_STATUS)),
-    ("stop", &Router::new().post(&API_METHOD_LXC_STOP)),
-    ("shutdown", &Router::new().post(&API_METHOD_LXC_SHUTDOWN)),
-    ("migrate", &Router::new().post(&API_METHOD_LXC_MIGRATE)),
-    (
-        "remote-migrate",
-        &Router::new().post(&API_METHOD_LXC_REMOTE_MIGRATE)
-    ),
-]);
-
 const NODES_ROUTER: Router = Router::new()
     .get(&API_METHOD_LIST_NODES)
     .match_all("node", &node::ROUTER);
 
-const QEMU_ROUTER: Router = Router::new()
-    .get(&API_METHOD_LIST_QEMU)
-    .match_all("vmid", &QEMU_VM_ROUTER);
-
-const QEMU_VM_ROUTER: Router = Router::new()
-    .get(&list_subdirs_api_method!(QEMU_VM_SUBDIRS))
-    .subdirs(QEMU_VM_SUBDIRS);
-#[sortable]
-const QEMU_VM_SUBDIRS: SubdirMap = &sorted!([
-    ("config", &Router::new().get(&API_METHOD_QEMU_GET_CONFIG)),
-    ("rrddata", &rrddata::QEMU_RRD_ROUTER),
-    ("start", &Router::new().post(&API_METHOD_QEMU_START)),
-    ("status", &Router::new().get(&API_METHOD_QEMU_GET_STATUS)),
-    ("stop", &Router::new().post(&API_METHOD_QEMU_STOP)),
-    ("shutdown", &Router::new().post(&API_METHOD_QEMU_SHUTDOWN)),
-    (
-        "migrate",
-        &Router::new()
-            .get(&API_METHOD_QEMU_MIGRATE_PRECONDITIONS)
-            .post(&API_METHOD_QEMU_MIGRATE)
-    ),
-    (
-        "remote-migrate",
-        &Router::new().post(&API_METHOD_QEMU_REMOTE_MIGRATE)
-    ),
-]);
-
 const RESOURCES_ROUTER: Router = Router::new().get(&API_METHOD_CLUSTER_RESOURCES);
 
 // converts a remote + PveUpid into a RemoteUpid and starts tracking it
@@ -274,128 +222,6 @@ fn check_guest_permissions(
     auth_privs & privilege != 0
 }
 
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-        },
-    },
-    returns: {
-        type: Array,
-        description: "Get a list of VMs",
-        items: { type: pve_api_types::VmEntry },
-    },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false),
-    },
-)]
-/// Query the remote's list of qemu VMs. If no node is provided, the all nodes are queried.
-pub async fn list_qemu(
-    remote: String,
-    node: Option<String>,
-    rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Vec<pve_api_types::VmEntry>, Error> {
-    // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody
-    // and fine-grained checks once those are implemented for all API calls..
-    let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?;
-
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let list = if let Some(node) = node {
-        pve.list_qemu(&node, None).await?
-    } else {
-        let mut list = Vec::new();
-        for node in pve.list_nodes().await? {
-            list.extend(pve.list_qemu(&node.node, None).await?);
-        }
-        list
-    };
-
-    if top_level_allowed {
-        return Ok(list);
-    }
-
-    Ok(list
-        .into_iter()
-        .filter(|entry| {
-            check_guest_permissions(
-                &auth_id,
-                &user_info,
-                &remote,
-                PRIV_RESOURCE_AUDIT,
-                entry.vmid,
-            )
-        })
-        .collect())
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-        },
-    },
-    returns: {
-        type: Array,
-        description: "Get a list of containers.",
-        items: { type: pve_api_types::VmEntry },
-    },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false),
-    },
-)]
-/// Query the remote's list of lxc containers. If no node is provided, the all nodes are queried.
-pub async fn list_lxc(
-    remote: String,
-    node: Option<String>,
-    rpcenv: &mut dyn RpcEnvironment,
-) -> Result<Vec<pve_api_types::LxcEntry>, Error> {
-    // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody
-    // and fine-grained checks once those are implemented for all API calls..
-    let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?;
-
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let list = if let Some(node) = node {
-        pve.list_lxc(&node).await?
-    } else {
-        let mut list = Vec::new();
-        for node in pve.list_nodes().await? {
-            list.extend(pve.list_lxc(&node.node).await?);
-        }
-        list
-    };
-
-    if top_level_allowed {
-        return Ok(list);
-    }
-
-    Ok(list
-        .into_iter()
-        .filter(|entry| {
-            check_guest_permissions(
-                &auth_id,
-                &user_info,
-                &remote,
-                PRIV_RESOURCE_AUDIT,
-                entry.vmid,
-            )
-        })
-        .collect())
-}
-
 async fn find_node_for_vm(
     node: Option<String>,
     vmid: u32,
@@ -414,183 +240,6 @@ async fn find_node_for_vm(
     })
 }
 
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-            state: { type: ConfigurationState },
-            snapshot: {
-                schema: SNAPSHOT_NAME_SCHEMA,
-                optional: true,
-            },
-        },
-    },
-    returns: { type: pve_api_types::QemuConfig },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
-    },
-)]
-/// Get the configuration of a qemu VM from a remote. If a node is provided, the VM must be on that
-/// node, otherwise the node is determined automatically.
-pub async fn qemu_get_config(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-    state: ConfigurationState,
-    snapshot: Option<String>,
-) -> Result<pve_api_types::QemuConfig, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    Ok(pve
-        .qemu_get_config(&node, vmid, state.current(), snapshot)
-        .await?)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: pve_api_types::QemuStatus },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
-    },
-)]
-/// Get the status of a qemu VM from a remote. If a node is provided, the VM must be on that
-/// node, otherwise the node is determined automatically.
-pub async fn qemu_get_status(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<pve_api_types::QemuStatus, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    Ok(pve.qemu_get_status(&node, vmid).await?)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
-    },
-)]
-/// Start a remote qemu vm.
-pub async fn qemu_start(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<RemoteUpid, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    let upid = pve
-        .start_qemu_async(&node, vmid, Default::default())
-        .await?;
-
-    new_remote_upid(remote, upid)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
-    },
-)]
-/// Stop a remote qemu vm.
-pub async fn qemu_stop(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<RemoteUpid, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    let upid = pve.stop_qemu_async(&node, vmid, Default::default()).await?;
-
-    new_remote_upid(remote, upid)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
-    },
-)]
-/// Perform a shutdown of a remote qemu vm.
-pub async fn qemu_shutdown(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<RemoteUpid, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    let upid = pve
-        .shutdown_qemu_async(&node, vmid, Default::default())
-        .await?;
-
-    //(remote, upid.to_string()).try_into()
-    new_remote_upid(remote, upid)
-}
-
 fn check_guest_delete_perms(
     rpcenv: &mut dyn RpcEnvironment,
     remote: &str,
@@ -609,670 +258,6 @@ fn check_guest_delete_perms(
     )
 }
 
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            target: { schema: NODE_SCHEMA },
-            vmid: { schema: VMID_SCHEMA },
-            online: {
-                type: bool,
-                description: "Perform an online migration if the vm is running.",
-                optional: true,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-                optional: true,
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            },
-            "migration-network": {
-                description: "CIDR of the (sub) network that is used for migration.",
-                type: String,
-                format: &CIDR_FORMAT,
-                optional: true,
-            },
-            "migration-type": {
-                type: StartQemuMigrationType,
-                optional: true,
-            },
-            force: {
-                description: "Allow to migrate VMs with local devices.",
-                optional: true,
-                default: false,
-            },
-            "with-local-disks": {
-                description: "Enable live storage migration for local disks.",
-                optional: true,
-            },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::And(&[
-            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
-        ]),
-    },
-)]
-/// Perform an in-cluster migration of a VM.
-#[allow(clippy::too_many_arguments)]
-pub async fn qemu_migrate(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-    bwlimit: Option<u64>,
-    force: Option<bool>,
-    migration_network: Option<String>,
-    migration_type: Option<StartQemuMigrationType>,
-    online: Option<bool>,
-    target: String,
-    target_storage: Option<String>,
-    with_local_disks: Option<bool>,
-) -> Result<RemoteUpid, Error> {
-    log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}");
-
-    let (remotes, _) = pdm_config::remotes::config()?;
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    if node == target {
-        bail!("refusing migration to the same node");
-    }
-
-    let params = pve_api_types::MigrateQemu {
-        bwlimit,
-        force,
-        migration_network,
-        migration_type,
-        online,
-        target,
-        targetstorage: target_storage,
-        with_local_disks,
-    };
-    let upid = pve.migrate_qemu(&node, vmid, params).await?;
-    //(remote, upid.to_string()).try_into()
-    new_remote_upid(remote, upid)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            target: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        }
-    },
-    access: {
-        permission: &Permission::And(&[
-            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
-        ]),
-    },
-)]
-/// Qemu (local) migrate preconditions
-async fn qemu_migrate_preconditions(
-    remote: String,
-    node: Option<String>,
-    target: Option<String>,
-    vmid: u32,
-) -> Result<QemuMigratePreconditions, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    let res = pve.qemu_migrate_preconditions(&node, vmid, target).await?;
-    Ok(res)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            target: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-            "target-vmid": {
-                optional: true,
-                schema: VMID_SCHEMA,
-            },
-            delete: {
-                description: "Delete the original VM and related data after successful migration.",
-                optional: true,
-                default: false,
-            },
-            online: {
-                type: bool,
-                description: "Perform an online migration if the vm is running.",
-                optional: true,
-                default: false,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-            },
-            "target-bridge": {
-                description: "Mapping of source bridges to remote bridges.",
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            }
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission:
-            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
-        description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid",
-    },
-)]
-/// Perform a remote migration of a VM.
-#[allow(clippy::too_many_arguments)]
-pub async fn qemu_remote_migrate(
-    remote: String, // this is the source
-    target: String, // this is the destination remote name
-    node: Option<String>,
-    vmid: u32,
-    target_vmid: Option<u32>,
-    delete: bool,
-    online: bool,
-    target_storage: String,
-    target_bridge: String,
-    bwlimit: Option<u64>,
-    rpcenv: &mut dyn RpcEnvironment,
-) -> Result<RemoteUpid, Error> {
-    let user_info = CachedUserInfo::new()?;
-    let auth_id: Authid = rpcenv
-        .get_auth_id()
-        .ok_or_else(|| format_err!("no authid available"))?
-        .parse()?;
-    let target_privs = user_info.lookup_privs(
-        &auth_id,
-        &[
-            "resource",
-            &target,
-            "guest",
-            &target_vmid.unwrap_or(vmid).to_string(),
-        ],
-    );
-    if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
-        http_bail!(
-            UNAUTHORIZED,
-            "missing PRIV_RESOURCE_MIGRATE on target remote+vmid"
-        );
-    }
-
-    if delete {
-        check_guest_delete_perms(rpcenv, &remote, vmid)?;
-    }
-
-    let source = remote; // let's stick to "source" and "target" naming
-
-    log::info!("remote migration requested");
-
-    if source == target {
-        bail!("source and destination clusters must be different");
-    }
-
-    let (remotes, _) = pdm_config::remotes::config()?;
-    let target = get_remote(&remotes, &target)?;
-    let source_conn = connect_to_remote(&remotes, &source)?;
-
-    let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?;
-
-    // FIXME: For now we'll only try with the first node but we should probably try others, too, in
-    // case some are offline?
-
-    let target_node = target
-        .nodes
-        .first()
-        .ok_or_else(|| format_err!("no nodes configured for target cluster"))?;
-    let target_host_port: Authority = target_node.hostname.parse()?;
-    let mut target_endpoint = format!(
-        "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}",
-        host = target_host_port.host(),
-        authid = target.authid,
-        secret = target.token,
-        port = target_host_port.port_u16().unwrap_or(8006),
-    );
-    if let Some(fp) = target_node.fingerprint.as_deref() {
-        target_endpoint.reserve(fp.len() + ",fingerprint=".len());
-        target_endpoint.push_str(",fingerprint=");
-        target_endpoint.push_str(fp);
-    }
-
-    log::info!("forwarding remote migration requested");
-    let params = pve_api_types::RemoteMigrateQemu {
-        target_bridge,
-        target_storage,
-        delete: Some(delete),
-        online: Some(online),
-        target_vmid,
-        target_endpoint,
-        bwlimit,
-    };
-    log::info!("migrating vm {vmid} of node {node:?}");
-    let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?;
-
-    (source, upid.to_string()).try_into()
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-            state: { type: ConfigurationState },
-            snapshot: {
-                schema: SNAPSHOT_NAME_SCHEMA,
-                optional: true,
-            },
-        },
-    },
-    returns: { type: pve_api_types::LxcConfig },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
-    },
-)]
-/// Get the configuration of an lxc container from a remote. If a node is provided, the container
-/// must be on that node, otherwise the node is determined automatically.
-pub async fn lxc_get_config(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-    state: ConfigurationState,
-    snapshot: Option<String>,
-) -> Result<pve_api_types::LxcConfig, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    Ok(pve
-        .lxc_get_config(&node, vmid, state.current(), snapshot)
-        .await?)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: pve_api_types::QemuStatus },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
-    },
-)]
-/// Get the status of an LXC guest from a remote. If a node is provided, the guest must be on that
-/// node, otherwise the node is determined automatically.
-pub async fn lxc_get_status(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<pve_api_types::LxcStatus, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    Ok(pve.lxc_get_status(&node, vmid).await?)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
-    },
-)]
-/// Start a remote lxc container.
-pub async fn lxc_start(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<RemoteUpid, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    let upid = pve.start_lxc_async(&node, vmid, Default::default()).await?;
-
-    new_remote_upid(remote, upid)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
-    },
-)]
-/// Stop a remote lxc container.
-pub async fn lxc_stop(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<RemoteUpid, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    let upid = pve.stop_lxc_async(&node, vmid, Default::default()).await?;
-
-    new_remote_upid(remote, upid)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
-    },
-)]
-/// Perform a shutdown of a remote lxc container.
-pub async fn lxc_shutdown(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-) -> Result<RemoteUpid, Error> {
-    let (remotes, _) = pdm_config::remotes::config()?;
-
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    let upid = pve
-        .shutdown_lxc_async(&node, vmid, Default::default())
-        .await?;
-
-    new_remote_upid(remote, upid)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            target: { schema: NODE_SCHEMA },
-            vmid: { schema: VMID_SCHEMA },
-            online: {
-                type: bool,
-                description: "Attempt an online migration if the container is running.",
-                optional: true,
-            },
-            restart: {
-                type: bool,
-                description: "Perform a restart-migration if the container is running.",
-                optional: true,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-                optional: true,
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            },
-            timeout: {
-                description: "Shutdown timeout for restart-migrations.",
-                optional: true,
-            },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission: &Permission::And(&[
-            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
-        ]),
-    },
-)]
-/// Perform an in-cluster migration of a VM.
-#[allow(clippy::too_many_arguments)]
-pub async fn lxc_migrate(
-    remote: String,
-    node: Option<String>,
-    vmid: u32,
-    bwlimit: Option<u64>,
-    restart: Option<bool>,
-    online: Option<bool>,
-    target: String,
-    target_storage: Option<String>,
-    timeout: Option<i64>,
-) -> Result<RemoteUpid, Error> {
-    let bwlimit = bwlimit.map(|n| n as f64);
-
-    log::info!("in-cluster migration requested for remote {remote:?} ct {vmid} to node {target:?}");
-
-    let (remotes, _) = pdm_config::remotes::config()?;
-    let pve = connect_to_remote(&remotes, &remote)?;
-
-    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
-
-    if node == target {
-        bail!("refusing migration to the same node");
-    }
-
-    let params = pve_api_types::MigrateLxc {
-        bwlimit,
-        online,
-        restart,
-        target,
-        target_storage,
-        timeout,
-    };
-    let upid = pve.migrate_lxc(&node, vmid, params).await?;
-
-    new_remote_upid(remote, upid)
-}
-
-#[api(
-    input: {
-        properties: {
-            remote: { schema: REMOTE_ID_SCHEMA },
-            target: { schema: REMOTE_ID_SCHEMA },
-            node: {
-                schema: NODE_SCHEMA,
-                optional: true,
-            },
-            vmid: { schema: VMID_SCHEMA },
-            "target-vmid": {
-                optional: true,
-                schema: VMID_SCHEMA,
-            },
-            delete: {
-                description: "Delete the original VM and related data after successful migration.",
-                optional: true,
-                default: false,
-            },
-            online: {
-                type: bool,
-                description: "Perform an online migration if the vm is running.",
-                optional: true,
-                default: false,
-            },
-            "target-storage": {
-                description: "Mapping of source storages to target storages.",
-            },
-            "target-bridge": {
-                description: "Mapping of source bridges to remote bridges.",
-            },
-            bwlimit: {
-                description: "Override I/O bandwidth limit (in KiB/s).",
-                optional: true,
-            },
-            restart: {
-                description: "Perform a restart-migration.",
-                optional: true,
-            },
-            timeout: {
-                description: "Add a shutdown timeout for the restart-migration.",
-                optional: true,
-            },
-        },
-    },
-    returns: { type: RemoteUpid },
-    access: {
-        permission:
-            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
-        description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid",
-    },
-)]
-/// Perform a remote migration of an lxc container.
-#[allow(clippy::too_many_arguments)]
-pub async fn lxc_remote_migrate(
-    remote: String, // this is the source
-    target: String, // this is the destination remote name
-    node: Option<String>,
-    vmid: u32,
-    target_vmid: Option<u32>,
-    delete: bool,
-    online: bool,
-    target_storage: String,
-    target_bridge: String,
-    bwlimit: Option<u64>,
-    restart: Option<bool>,
-    timeout: Option<i64>,
-    rpcenv: &mut dyn RpcEnvironment,
-) -> Result<RemoteUpid, Error> {
-    let user_info = CachedUserInfo::new()?;
-    let auth_id: Authid = rpcenv
-        .get_auth_id()
-        .ok_or_else(|| format_err!("no authid available"))?
-        .parse()?;
-    let target_privs = user_info.lookup_privs(
-        &auth_id,
-        &[
-            "resource",
-            &target,
-            "guest",
-            &target_vmid.unwrap_or(vmid).to_string(),
-        ],
-    );
-    if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
-        http_bail!(
-            UNAUTHORIZED,
-            "missing PRIV_RESOURCE_MIGRATE on target remote+vmid"
-        );
-    }
-    if delete {
-        check_guest_delete_perms(rpcenv, &remote, vmid)?;
-    }
-
-    let source = remote; // let's stick to "source" and "target" naming
-
-    log::info!("remote migration requested");
-
-    if source == target {
-        bail!("source and destination clusters must be different");
-    }
-
-    let (remotes, _) = pdm_config::remotes::config()?;
-    let target = get_remote(&remotes, &target)?;
-    let source_conn = connect_to_remote(&remotes, &source)?;
-
-    let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?;
-
-    // FIXME: For now we'll only try with the first node but we should probably try others, too, in
-    // case some are offline?
-
-    let target_node = target
-        .nodes
-        .first()
-        .ok_or_else(|| format_err!("no nodes configured for target cluster"))?;
-    let target_host_port: Authority = target_node.hostname.parse()?;
-    let mut target_endpoint = format!(
-        "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}",
-        host = target_host_port.host(),
-        authid = target.authid,
-        secret = target.token,
-        port = target_host_port.port_u16().unwrap_or(8006),
-    );
-    if let Some(fp) = target_node.fingerprint.as_deref() {
-        target_endpoint.reserve(fp.len() + ",fingerprint=".len());
-        target_endpoint.push_str(",fingerprint=");
-        target_endpoint.push_str(fp);
-    }
-
-    log::info!("forwarding remote migration requested");
-    let params = pve_api_types::RemoteMigrateLxc {
-        target_bridge,
-        target_storage,
-        delete: Some(delete),
-        online: Some(online),
-        target_vmid,
-        target_endpoint,
-        bwlimit: bwlimit.map(|limit| limit as f64),
-        restart,
-        timeout,
-    };
-    log::info!("migrating vm {vmid} of node {node:?}");
-    let upid = source_conn.remote_migrate_lxc(&node, vmid, params).await?;
-
-    new_remote_upid(source, upid)
-}
-
 #[api(
     input: {
         properties: {
diff --git a/server/src/api/pve/qemu.rs b/server/src/api/pve/qemu.rs
new file mode 100644
index 0000000..9a67c10
--- /dev/null
+++ b/server/src/api/pve/qemu.rs
@@ -0,0 +1,552 @@
+use anyhow::{bail, format_err, Error};
+use http::uri::Authority;
+
+use proxmox_access_control::CachedUserInfo;
+use proxmox_router::{
+    http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
+};
+use proxmox_schema::api;
+use proxmox_sortable_macro::sortable;
+
+use pdm_api_types::remotes::REMOTE_ID_SCHEMA;
+use pdm_api_types::{
+    Authid, ConfigurationState, RemoteUpid, CIDR_FORMAT, NODE_SCHEMA, PRIV_RESOURCE_AUDIT,
+    PRIV_RESOURCE_MANAGE, PRIV_RESOURCE_MIGRATE, SNAPSHOT_NAME_SCHEMA, VMID_SCHEMA,
+};
+
+use pve_api_types::{QemuMigratePreconditions, StartQemuMigrationType};
+
+use crate::api::pve::get_remote;
+
+use super::{
+    check_guest_delete_perms, check_guest_list_permissions, check_guest_permissions,
+    connect_to_remote, find_node_for_vm, new_remote_upid,
+};
+
+pub const ROUTER: Router = Router::new()
+    .get(&API_METHOD_LIST_QEMU)
+    .match_all("vmid", &QEMU_VM_ROUTER);
+
+const QEMU_VM_ROUTER: Router = Router::new()
+    .get(&list_subdirs_api_method!(QEMU_VM_SUBDIRS))
+    .subdirs(QEMU_VM_SUBDIRS);
+#[sortable]
+const QEMU_VM_SUBDIRS: SubdirMap = &sorted!([
+    ("config", &Router::new().get(&API_METHOD_QEMU_GET_CONFIG)),
+    ("rrddata", &super::rrddata::QEMU_RRD_ROUTER),
+    ("start", &Router::new().post(&API_METHOD_QEMU_START)),
+    ("status", &Router::new().get(&API_METHOD_QEMU_GET_STATUS)),
+    ("stop", &Router::new().post(&API_METHOD_QEMU_STOP)),
+    ("shutdown", &Router::new().post(&API_METHOD_QEMU_SHUTDOWN)),
+    (
+        "migrate",
+        &Router::new()
+            .get(&API_METHOD_QEMU_MIGRATE_PRECONDITIONS)
+            .post(&API_METHOD_QEMU_MIGRATE)
+    ),
+    (
+        "remote-migrate",
+        &Router::new().post(&API_METHOD_QEMU_REMOTE_MIGRATE)
+    ),
+]);
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+        },
+    },
+    returns: {
+        type: Array,
+        description: "Get a list of VMs",
+        items: { type: pve_api_types::VmEntry },
+    },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}"], PRIV_RESOURCE_AUDIT, false),
+    },
+)]
+/// Query the remote's list of qemu VMs. If no node is provided, the all nodes are queried.
+pub async fn list_qemu(
+    remote: String,
+    node: Option<String>,
+    rpcenv: &mut dyn RpcEnvironment,
+) -> Result<Vec<pve_api_types::VmEntry>, Error> {
+    // FIXME: top_level_allowed is always true because of schema check above, replace with Anybody
+    // and fine-grained checks once those are implemented for all API calls..
+    let (auth_id, user_info, top_level_allowed) = check_guest_list_permissions(&remote, rpcenv)?;
+
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let list = if let Some(node) = node {
+        pve.list_qemu(&node, None).await?
+    } else {
+        let mut list = Vec::new();
+        for node in pve.list_nodes().await? {
+            list.extend(pve.list_qemu(&node.node, None).await?);
+        }
+        list
+    };
+
+    if top_level_allowed {
+        return Ok(list);
+    }
+
+    Ok(list
+        .into_iter()
+        .filter(|entry| {
+            check_guest_permissions(
+                &auth_id,
+                &user_info,
+                &remote,
+                PRIV_RESOURCE_AUDIT,
+                entry.vmid,
+            )
+        })
+        .collect())
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+            state: { type: ConfigurationState },
+            snapshot: {
+                schema: SNAPSHOT_NAME_SCHEMA,
+                optional: true,
+            },
+        },
+    },
+    returns: { type: pve_api_types::QemuConfig },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
+    },
+)]
+/// Get the configuration of a qemu VM from a remote. If a node is provided, the VM must be on that
+/// node, otherwise the node is determined automatically.
+pub async fn qemu_get_config(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+    state: ConfigurationState,
+    snapshot: Option<String>,
+) -> Result<pve_api_types::QemuConfig, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    Ok(pve
+        .qemu_get_config(&node, vmid, state.current(), snapshot)
+        .await?)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: pve_api_types::QemuStatus },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_AUDIT, false),
+    },
+)]
+/// Get the status of a qemu VM from a remote. If a node is provided, the VM must be on that
+/// node, otherwise the node is determined automatically.
+pub async fn qemu_get_status(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<pve_api_types::QemuStatus, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    Ok(pve.qemu_get_status(&node, vmid).await?)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
+    },
+)]
+/// Start a remote qemu vm.
+pub async fn qemu_start(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<RemoteUpid, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    let upid = pve
+        .start_qemu_async(&node, vmid, Default::default())
+        .await?;
+
+    new_remote_upid(remote, upid)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
+    },
+)]
+/// Stop a remote qemu vm.
+pub async fn qemu_stop(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<RemoteUpid, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    let upid = pve.stop_qemu_async(&node, vmid, Default::default()).await?;
+
+    (remote, upid.to_string()).try_into()
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MANAGE, false),
+    },
+)]
+/// Perform a shutdown of a remote qemu vm.
+pub async fn qemu_shutdown(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+) -> Result<RemoteUpid, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    let upid = pve
+        .shutdown_qemu_async(&node, vmid, Default::default())
+        .await?;
+
+    (remote, upid.to_string()).try_into()
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            target: { schema: NODE_SCHEMA },
+            vmid: { schema: VMID_SCHEMA },
+            online: {
+                type: bool,
+                description: "Perform an online migration if the vm is running.",
+                optional: true,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+                optional: true,
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            },
+            "migration-network": {
+                description: "CIDR of the (sub) network that is used for migration.",
+                type: String,
+                format: &CIDR_FORMAT,
+                optional: true,
+            },
+            "migration-type": {
+                type: StartQemuMigrationType,
+                optional: true,
+            },
+            force: {
+                description: "Allow to migrate VMs with local devices.",
+                optional: true,
+                default: false,
+            },
+            "with-local-disks": {
+                description: "Enable live storage migration for local disks.",
+                optional: true,
+            },
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
+        ]),
+    },
+)]
+/// Perform an in-cluster migration of a VM.
+#[allow(clippy::too_many_arguments)]
+pub async fn qemu_migrate(
+    remote: String,
+    node: Option<String>,
+    vmid: u32,
+    bwlimit: Option<u64>,
+    force: Option<bool>,
+    migration_network: Option<String>,
+    migration_type: Option<StartQemuMigrationType>,
+    online: Option<bool>,
+    target: String,
+    target_storage: Option<String>,
+    with_local_disks: Option<bool>,
+) -> Result<RemoteUpid, Error> {
+    log::info!("in-cluster migration requested for remote {remote:?} vm {vmid} to node {target:?}");
+
+    let (remotes, _) = pdm_config::remotes::config()?;
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    if node == target {
+        bail!("refusing migration to the same node");
+    }
+
+    let params = pve_api_types::MigrateQemu {
+        bwlimit,
+        force,
+        migration_network,
+        migration_type,
+        online,
+        target,
+        targetstorage: target_storage,
+        with_local_disks,
+    };
+    let upid = pve.migrate_qemu(&node, vmid, params).await?;
+    //(remote, upid.to_string()).try_into()
+    new_remote_upid(remote, upid)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            target: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+        }
+    },
+    access: {
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
+        ]),
+    },
+)]
+/// Qemu (local) migrate preconditions
+async fn qemu_migrate_preconditions(
+    remote: String,
+    node: Option<String>,
+    target: Option<String>,
+    vmid: u32,
+) -> Result<QemuMigratePreconditions, Error> {
+    let (remotes, _) = pdm_config::remotes::config()?;
+    let pve = connect_to_remote(&remotes, &remote)?;
+
+    let node = find_node_for_vm(node, vmid, pve.as_ref()).await?;
+
+    let res = pve.qemu_migrate_preconditions(&node, vmid, target).await?;
+    Ok(res)
+}
+
+#[api(
+    input: {
+        properties: {
+            remote: { schema: REMOTE_ID_SCHEMA },
+            target: { schema: REMOTE_ID_SCHEMA },
+            node: {
+                schema: NODE_SCHEMA,
+                optional: true,
+            },
+            vmid: { schema: VMID_SCHEMA },
+            "target-vmid": {
+                optional: true,
+                schema: VMID_SCHEMA,
+            },
+            delete: {
+                description: "Delete the original VM and related data after successful migration.",
+                optional: true,
+                default: false,
+            },
+            online: {
+                type: bool,
+                description: "Perform an online migration if the vm is running.",
+                optional: true,
+                default: false,
+            },
+            "target-storage": {
+                description: "Mapping of source storages to target storages.",
+            },
+            "target-bridge": {
+                description: "Mapping of source bridges to remote bridges.",
+            },
+            bwlimit: {
+                description: "Override I/O bandwidth limit (in KiB/s).",
+                optional: true,
+            }
+        },
+    },
+    returns: { type: RemoteUpid },
+    access: {
+        permission:
+            &Permission::Privilege(&["resource", "{remote}", "guest", "{vmid}"], PRIV_RESOURCE_MIGRATE, false),
+        description: "requires PRIV_RESOURCE_MIGRATE on /resource/{remote}/guest/{vmid} for source and target remove and vmid",
+    },
+)]
+/// Perform a remote migration of a VM.
+#[allow(clippy::too_many_arguments)]
+pub async fn qemu_remote_migrate(
+    remote: String, // this is the source
+    target: String, // this is the destination remote name
+    node: Option<String>,
+    vmid: u32,
+    target_vmid: Option<u32>,
+    delete: bool,
+    online: bool,
+    target_storage: String,
+    target_bridge: String,
+    bwlimit: Option<u64>,
+    rpcenv: &mut dyn RpcEnvironment,
+) -> Result<RemoteUpid, Error> {
+    let user_info = CachedUserInfo::new()?;
+    let auth_id: Authid = rpcenv
+        .get_auth_id()
+        .ok_or_else(|| format_err!("no authid available"))?
+        .parse()?;
+    let target_privs = user_info.lookup_privs(
+        &auth_id,
+        &[
+            "resource",
+            &target,
+            "guest",
+            &target_vmid.unwrap_or(vmid).to_string(),
+        ],
+    );
+    if target_privs & PRIV_RESOURCE_MIGRATE == 0 {
+        http_bail!(
+            UNAUTHORIZED,
+            "missing PRIV_RESOURCE_MIGRATE on target remote+vmid"
+        );
+    }
+
+    if delete {
+        check_guest_delete_perms(rpcenv, &remote, vmid)?;
+    }
+
+    let source = remote; // let's stick to "source" and "target" naming
+
+    log::info!("remote migration requested");
+
+    if source == target {
+        bail!("source and destination clusters must be different");
+    }
+
+    let (remotes, _) = pdm_config::remotes::config()?;
+    let target = get_remote(&remotes, &target)?;
+    let source_conn = connect_to_remote(&remotes, &source)?;
+
+    let node = find_node_for_vm(node, vmid, source_conn.as_ref()).await?;
+
+    // FIXME: For now we'll only try with the first node but we should probably try others, too, in
+    // case some are offline?
+
+    let target_node = target
+        .nodes
+        .first()
+        .ok_or_else(|| format_err!("no nodes configured for target cluster"))?;
+    let target_host_port: Authority = target_node.hostname.parse()?;
+    let mut target_endpoint = format!(
+        "host={host},port={port},apitoken=PVEAPIToken={authid}={secret}",
+        host = target_host_port.host(),
+        authid = target.authid,
+        secret = target.token,
+        port = target_host_port.port_u16().unwrap_or(8006),
+    );
+    if let Some(fp) = target_node.fingerprint.as_deref() {
+        target_endpoint.reserve(fp.len() + ",fingerprint=".len());
+        target_endpoint.push_str(",fingerprint=");
+        target_endpoint.push_str(fp);
+    }
+
+    log::info!("forwarding remote migration requested");
+    let params = pve_api_types::RemoteMigrateQemu {
+        target_bridge,
+        target_storage,
+        delete: Some(delete),
+        online: Some(online),
+        target_vmid,
+        target_endpoint,
+        bwlimit,
+    };
+    log::info!("migrating vm {vmid} of node {node:?}");
+    let upid = source_conn.remote_migrate_qemu(&node, vmid, params).await?;
+
+    (source, upid.to_string()).try_into()
+}
-- 
2.39.5





More information about the pdm-devel mailing list