[pbs-devel] [PATCH proxmox-backup 1/1] api: ui: add transfer-last parameter to pull and sync-job

Stefan Hanreich s.hanreich at proxmox.com
Fri Oct 14 12:30:31 CEST 2022


partially fixes #3701

Specifying this parameter limits the amount of backups that get
synced via the pull command or sync job. The parameter specifies how many
of the N latest backups should get pulled/synced. All other backups will
get skipped during the pull/sync-job.

Additionally added a field in the sync job UI to configure this value.

Signed-off-by: Stefan Hanreich <s.hanreich at proxmox.com>
---
 pbs-api-types/src/jobs.rs         | 12 ++++++++++++
 src/api2/config/sync.rs           |  9 +++++++++
 src/api2/pull.rs                  | 10 +++++++++-
 src/bin/proxmox-backup-manager.rs | 13 +++++++++++--
 src/server/pull.rs                | 16 ++++++++++++++--
 www/config/SyncView.js            |  9 ++++++++-
 www/window/SyncJobEdit.js         | 13 +++++++++++++
 7 files changed, 76 insertions(+), 6 deletions(-)

diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs
index 7f029af7..4ac84372 100644
--- a/pbs-api-types/src/jobs.rs
+++ b/pbs-api-types/src/jobs.rs
@@ -433,6 +433,12 @@ pub const GROUP_FILTER_SCHEMA: Schema = StringSchema::new(
 pub const GROUP_FILTER_LIST_SCHEMA: Schema =
     ArraySchema::new("List of group filters.", &GROUP_FILTER_SCHEMA).schema();
 
+pub const TRANSFER_LAST_SCHEMA: Schema = IntegerSchema::new(
+    "The maximum amount of snapshots to be transferred (per group)."
+)
+    .minimum(1)
+    .schema();
+
 #[api(
     properties: {
         id: {
@@ -482,6 +488,10 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
             schema: GROUP_FILTER_LIST_SCHEMA,
             optional: true,
         },
+        "transfer-last": {
+            schema: TRANSFER_LAST_SCHEMA,
+            optional: true,
+        },
     }
 )]
 #[derive(Serialize, Deserialize, Clone, Updater)]
@@ -511,6 +521,8 @@ pub struct SyncJobConfig {
     pub group_filter: Option<Vec<GroupFilter>>,
     #[serde(flatten)]
     pub limit: RateLimitConfig,
+    #[serde(skip_serializing_if = "Option::is_none")]
+    pub transfer_last: Option<u64>,
 }
 
 impl SyncJobConfig {
diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs
index 6f39b239..0250b290 100644
--- a/src/api2/config/sync.rs
+++ b/src/api2/config/sync.rs
@@ -216,6 +216,8 @@ pub enum DeletableProperty {
     remote_ns,
     /// Delete the max_depth property,
     max_depth,
+    /// Delete the transfer_last property,
+    transfer_last,
 }
 
 #[api(
@@ -310,6 +312,9 @@ pub fn update_sync_job(
                 DeletableProperty::max_depth => {
                     data.max_depth = None;
                 }
+                DeletableProperty::transfer_last => {
+                    data.transfer_last = None;
+                }
             }
         }
     }
@@ -344,6 +349,9 @@ pub fn update_sync_job(
     if let Some(group_filter) = update.group_filter {
         data.group_filter = Some(group_filter);
     }
+    if let Some(transfer_last) = update.transfer_last {
+        data.transfer_last = Some(transfer_last);
+    }
 
     if update.limit.rate_in.is_some() {
         data.limit.rate_in = update.limit.rate_in;
@@ -508,6 +516,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
         group_filter: None,
         schedule: None,
         limit: pbs_api_types::RateLimitConfig::default(), // no limit
+        transfer_last: None,
     };
 
     // should work without ACLs
diff --git a/src/api2/pull.rs b/src/api2/pull.rs
index 193f28fe..20b0ecf3 100644
--- a/src/api2/pull.rs
+++ b/src/api2/pull.rs
@@ -12,6 +12,7 @@ use pbs_api_types::{
     Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
     GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
     PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
+    TRANSFER_LAST_SCHEMA
 };
 use pbs_config::CachedUserInfo;
 use proxmox_rest_server::WorkerTask;
@@ -78,6 +79,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
             sync_job.max_depth,
             sync_job.group_filter.clone(),
             sync_job.limit.clone(),
+            sync_job.transfer_last,
         )
     }
 }
@@ -203,7 +205,11 @@ pub fn do_sync_job(
             limit: {
                 type: RateLimitConfig,
                 flatten: true,
-            }
+            },
+            "transfer-last": {
+                schema: TRANSFER_LAST_SCHEMA,
+                optional: true,
+            },
         },
     },
     access: {
@@ -227,6 +233,7 @@ async fn pull(
     max_depth: Option<usize>,
     group_filter: Option<Vec<GroupFilter>>,
     limit: RateLimitConfig,
+    transfer_last: Option<u64>,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -259,6 +266,7 @@ async fn pull(
         max_depth,
         group_filter,
         limit,
+        transfer_last,
     )?;
     let client = pull_params.client().await?;
 
diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs
index 58e7e33a..fb956aa9 100644
--- a/src/bin/proxmox-backup-manager.rs
+++ b/src/bin/proxmox-backup-manager.rs
@@ -13,8 +13,8 @@ use pbs_api_types::percent_encoding::percent_encode_component;
 use pbs_api_types::{
     BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
     GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA,
-    REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, UPID_SCHEMA,
-    VERIFICATION_OUTDATED_AFTER_SCHEMA,
+    REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA,
+    UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
 };
 use pbs_client::{display_task_log, view_task_result};
 use pbs_config::sync;
@@ -272,6 +272,10 @@ fn task_mgmt_cli() -> CommandLineInterface {
                 schema: OUTPUT_FORMAT,
                 optional: true,
             },
+            "transfer-last": {
+                schema: TRANSFER_LAST_SCHEMA,
+                optional: true,
+            },
         }
    }
 )]
@@ -287,6 +291,7 @@ async fn pull_datastore(
     max_depth: Option<usize>,
     group_filter: Option<Vec<GroupFilter>>,
     limit: RateLimitConfig,
+    transfer_last: Option<u64>,
     param: Value,
 ) -> Result<Value, Error> {
     let output_format = get_output_format(&param);
@@ -320,6 +325,10 @@ async fn pull_datastore(
         args["remove-vanished"] = Value::from(remove_vanished);
     }
 
+    if transfer_last.is_some() {
+        args["transfer-last"] = json!(transfer_last)
+    }
+
     let result = client.post("api2/json/pull", Some(args)).await?;
 
     view_task_result(&client, result, &output_format).await?;
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 77caf327..9be82669 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -60,6 +60,8 @@ pub(crate) struct PullParameters {
     group_filter: Option<Vec<GroupFilter>>,
     /// Rate limits for all transfers from `remote`
     limit: RateLimitConfig,
+    /// How many snapshots should be transferred at most (taking the newest N snapshots)
+    transfer_last: Option<u64>,
 }
 
 impl PullParameters {
@@ -79,6 +81,7 @@ impl PullParameters {
         max_depth: Option<usize>,
         group_filter: Option<Vec<GroupFilter>>,
         limit: RateLimitConfig,
+        transfer_last: Option<u64>,
     ) -> Result<Self, Error> {
         let store = DataStore::lookup_datastore(store, Some(Operation::Write))?;
 
@@ -110,6 +113,7 @@ impl PullParameters {
             max_depth,
             group_filter,
             limit,
+            transfer_last,
         })
     }
 
@@ -574,7 +578,7 @@ impl std::fmt::Display for SkipInfo {
     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
         write!(
             f,
-            "skipped: {} snapshot(s) ({}) older than the newest local snapshot",
+            "skipped: {} snapshot(s) ({}) - older than the newest local snapshot or excluded by transfer-last",
             self.count,
             self.affected().map_err(|_| std::fmt::Error)?
         )
@@ -647,6 +651,8 @@ async fn pull_group(
         count: 0,
     };
 
+    let transfer_amount = params.transfer_last.unwrap_or(list.len() as u64);
+
     for (pos, item) in list.into_iter().enumerate() {
         let snapshot = item.backup;
 
@@ -669,6 +675,12 @@ async fn pull_group(
             }
         }
 
+        let i = pos as u64;
+        if progress.group_snapshots - i > transfer_amount {
+            skip_info.update(snapshot.time);
+            continue;
+        }
+
         // get updated auth_info (new tickets)
         let auth_info = client.login().await?;
 
@@ -697,7 +709,7 @@ async fn pull_group(
 
         let result = pull_snapshot_from(worker, reader, &snapshot, downloaded_chunks.clone()).await;
 
-        progress.done_snapshots = pos as u64 + 1;
+        progress.done_snapshots = i + 1;
         task_log!(worker, "percentage done: {}", progress);
 
         result?; // stop on error
diff --git a/www/config/SyncView.js b/www/config/SyncView.js
index a90e9a70..bf9072cb 100644
--- a/www/config/SyncView.js
+++ b/www/config/SyncView.js
@@ -3,7 +3,7 @@ Ext.define('pbs-sync-jobs-status', {
     fields: [
 	'id', 'owner', 'remote', 'remote-store', 'remote-ns', 'store', 'ns',
 	'schedule', 'group-filter', 'next-run', 'last-run-upid', 'last-run-state',
-	'last-run-endtime',
+	'last-run-endtime', 'transfer-last',
 	{
 	    name: 'duration',
 	    calculate: function(data) {
@@ -241,6 +241,13 @@ Ext.define('PBS.config.SyncJobView', {
 	    renderer: v => v ? Ext.String.htmlEncode(v) : gettext('All'),
 	    width: 80,
 	},
+	{
+	    header: gettext('Transfer Last'),
+	    dataIndex: 'transfer-last',
+	    flex: 1,
+	    sortable: true,
+	    hidden: true,
+	},
 	{
 	    header: gettext('Schedule'),
 	    dataIndex: 'schedule',
diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js
index 948ad5da..64447ebc 100644
--- a/www/window/SyncJobEdit.js
+++ b/www/window/SyncJobEdit.js
@@ -131,6 +131,19 @@ Ext.define('PBS.window.SyncJobEdit', {
 			submitAutoScaledSizeUnit: true,
 			// NOTE: handle deleteEmpty in onGetValues due to bandwidth field having a cbind too
 		    },
+		    {
+			fieldLabel: gettext('Transfer Last'),
+			xtype: 'pbsPruneKeepInput',
+			name: 'transfer-last',
+			emptyText: gettext('all'),
+			autoEl: {
+			    tag: 'div',
+			    'data-qtip': gettext('The maximum amount of snapshots to be transferred (per group)'),
+			},
+			cbind: {
+			    deleteEmpty: '{!isCreate}',
+			},
+		    },
 		],
 
 		column2: [
-- 
2.30.2





More information about the pbs-devel mailing list