[pbs-devel] [PATCH proxmox-backup v2] api: make prune-group a real workertask

Gabriel Goller g.goller at proxmox.com
Thu Jan 25 12:48:54 CET 2024


`prune-group` is currently not a real workertask, ie it behaves like one
but doesn't start a thread nor a task to do its work.

Changed it to start a tokio-task, so that we can delete snapshots
asynchronously. The `dry-run` feature still behaves in the same way and
returns early.

This paves the way for the new logging infra (which uses `task_local` to
define a logger) and improves performance of bigger backup-groups.

Signed-off-by: Gabriel Goller <g.goller at proxmox.com>
---

Changes since v1:
 - use feature flag to activate, so we don't break the api 
 - convert the result to a structure and print it in the tasklog
 - enable the feature flag in the frontend

 src/api2/admin/datastore.rs | 146 ++++++++++++++++++++++--------------
 www/datastore/Prune.js      |   1 +
 2 files changed, 92 insertions(+), 55 deletions(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index a95031e7..f2a94448 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -944,6 +944,12 @@ pub fn verify(
                 type: BackupNamespace,
                 optional: true,
             },
+            "use-task": {
+                type: bool,
+                default: false,
+                optional: true,
+                description: "Spins up an asynchronous task that does the work.",
+            },
         },
     },
     returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
@@ -960,6 +966,7 @@ pub fn prune(
     keep_options: KeepOptions,
     store: String,
     ns: Option<BackupNamespace>,
+    use_task: bool,
     _param: Value,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<Value, Error> {
@@ -978,7 +985,20 @@ pub fn prune(
     let worker_id = format!("{}:{}:{}", store, ns, group);
     let group = datastore.backup_group(ns.clone(), group);
 
-    let mut prune_result = Vec::new();
+    #[derive(Debug, serde::Serialize)]
+    struct PruneResult {
+        #[serde(rename = "backup-type")]
+        backup_type: BackupType,
+        #[serde(rename = "backup-id")]
+        backup_id: String,
+        #[serde(rename = "backup-time")]
+        backup_time: i64,
+        keep: bool,
+        protected: bool,
+        #[serde(skip_serializing_if = "Option::is_none")]
+        ns: Option<BackupNamespace>,
+    }
+    let mut prune_result: Vec<PruneResult> = Vec::new();
 
     let list = group.list_backups()?;
 
@@ -992,77 +1012,93 @@ pub fn prune(
         for (info, mark) in prune_info {
             let keep = keep_all || mark.keep();
 
-            let mut result = json!({
-                "backup-type": info.backup_dir.backup_type(),
-                "backup-id": info.backup_dir.backup_id(),
-                "backup-time": info.backup_dir.backup_time(),
-                "keep": keep,
-                "protected": mark.protected(),
-            });
+            let mut result = PruneResult {
+                backup_type: info.backup_dir.backup_type(),
+                backup_id: info.backup_dir.backup_id().to_owned(),
+                backup_time: info.backup_dir.backup_time(),
+                keep,
+                protected: mark.protected(),
+                ns: None,
+            };
             let prune_ns = info.backup_dir.backup_ns();
             if !prune_ns.is_root() {
-                result["ns"] = serde_json::to_value(prune_ns)?;
+                result.ns = Some(prune_ns.to_owned());
             }
             prune_result.push(result);
         }
         return Ok(json!(prune_result));
     }
-
-    // We use a WorkerTask just to have a task log, but run synchrounously
-    let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
-
-    if keep_all {
-        task_log!(worker, "No prune selection - keeping all files.");
-    } else {
-        let mut opts = Vec::new();
-        if !ns.is_root() {
-            opts.push(format!("--ns {ns}"));
+    let prune_group = move |worker: Arc<WorkerTask>| {
+        if keep_all {
+            task_log!(worker, "No prune selection - keeping all files.");
+        } else {
+            let mut opts = Vec::new();
+            if !ns.is_root() {
+                opts.push(format!("--ns {ns}"));
+            }
+            crate::server::cli_keep_options(&mut opts, &keep_options);
+
+            task_log!(worker, "retention options: {}", opts.join(" "));
+            task_log!(
+                worker,
+                "Starting prune on {} group \"{}\"",
+                print_store_and_ns(&store, &ns),
+                group.group(),
+            );
         }
-        crate::server::cli_keep_options(&mut opts, &keep_options);
-
-        task_log!(worker, "retention options: {}", opts.join(" "));
-        task_log!(
-            worker,
-            "Starting prune on {} group \"{}\"",
-            print_store_and_ns(&store, &ns),
-            group.group(),
-        );
-    }
 
-    for (info, mark) in prune_info {
-        let keep = keep_all || mark.keep();
+        for (info, mark) in prune_info {
+            let keep = keep_all || mark.keep();
 
-        let backup_time = info.backup_dir.backup_time();
-        let timestamp = info.backup_dir.backup_time_string();
-        let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
+            let backup_time = info.backup_dir.backup_time();
+            let timestamp = info.backup_dir.backup_time_string();
+            let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
 
-        let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
+            let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark);
 
-        task_log!(worker, "{}", msg);
+            task_log!(worker, "{}", msg);
 
-        prune_result.push(json!({
-            "backup-type": group.ty,
-            "backup-id": group.id,
-            "backup-time": backup_time,
-            "keep": keep,
-            "protected": mark.protected(),
-        }));
+            prune_result.push(PruneResult {
+                backup_type: group.ty,
+                backup_id: group.id.clone(),
+                backup_time,
+                keep,
+                protected: mark.protected(),
+                ns: None,
+            });
 
-        if !(dry_run || keep) {
-            if let Err(err) = info.backup_dir.destroy(false) {
-                task_warn!(
-                    worker,
-                    "failed to remove dir {:?}: {}",
-                    info.backup_dir.relative_path(),
-                    err,
-                );
+            if !(dry_run || keep) {
+                if let Err(err) = info.backup_dir.destroy(false) {
+                    task_warn!(
+                        worker,
+                        "failed to remove dir {:?}: {}",
+                        info.backup_dir.relative_path(),
+                        err,
+                    );
+                }
             }
         }
+        prune_result
+    };
+    if use_task {
+        let upid = WorkerTask::spawn(
+            "prune",
+            Some(worker_id),
+            auth_id.to_string(),
+            true,
+            move |worker| async move {
+                let result = prune_group(worker.clone());
+                task_log!(worker, "{:#?}", result);
+                Ok(())
+            },
+        )?;
+        Ok(json!(upid))
+    } else {
+        let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
+        let result = prune_group(worker.clone());
+        worker.log_result(&Ok(()));
+        Ok(json!(result))
     }
-
-    worker.log_result(&Ok(()));
-
-    Ok(json!(prune_result))
 }
 
 #[api(
diff --git a/www/datastore/Prune.js b/www/datastore/Prune.js
index 81f6927b..5752907e 100644
--- a/www/datastore/Prune.js
+++ b/www/datastore/Prune.js
@@ -52,6 +52,7 @@ Ext.define('PBS.Datastore.PruneInputPanel', {
 	if (me.ns && me.ns !== '') {
 	    values.ns = me.ns;
 	}
+	values["use-task"] = true;
 	return values;
     },
 
-- 
2.43.0





More information about the pbs-devel mailing list