[pbs-devel] [PATCH proxmox-backup 1/3] Add KeepOptions to Sync Job settings
Stefan Hanreich
s.hanreich at proxmox.com
Fri Nov 4 15:30:52 CET 2022
This enables users to specify prune options when creating/editing a sync
job. After a Sync Jobs runs successfully, a prune job with the specified
parameters gets started that prunes the synced backup groups. This
behaves exactly the same as a normal prune job.
Signed-off-by: Stefan Hanreich <s.hanreich at proxmox.com>
---
pbs-api-types/src/jobs.rs | 7 +++-
src/api2/config/sync.rs | 66 +++++++++++++++++++++++++++--
src/api2/pull.rs | 11 ++++-
src/bin/proxmox-backup-manager.rs | 20 +++++++--
src/server/pull.rs | 70 +++++++++++++++++++++++++++----
5 files changed, 156 insertions(+), 18 deletions(-)
diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs
index 7f029af7..c778039c 100644
--- a/pbs-api-types/src/jobs.rs
+++ b/pbs-api-types/src/jobs.rs
@@ -474,6 +474,9 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
limit: {
type: RateLimitConfig,
},
+ keep: {
+ type: KeepOptions,
+ },
schedule: {
optional: true,
schema: SYNC_SCHEDULE_SCHEMA,
@@ -511,6 +514,8 @@ pub struct SyncJobConfig {
pub group_filter: Option<Vec<GroupFilter>>,
#[serde(flatten)]
pub limit: RateLimitConfig,
+ #[serde(flatten)]
+ pub keep: KeepOptions,
}
impl SyncJobConfig {
@@ -572,7 +577,7 @@ pub struct SyncJobStatus {
},
}
)]
-#[derive(Serialize, Deserialize, Default, Updater)]
+#[derive(Serialize, Deserialize, Default, Updater, Clone)]
#[serde(rename_all = "kebab-case")]
/// Common pruning options
pub struct KeepOptions {
diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs
index 6f39b239..82231715 100644
--- a/src/api2/config/sync.rs
+++ b/src/api2/config/sync.rs
@@ -7,9 +7,10 @@ use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
- Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
- PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
- PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
+ Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA,
+ PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
+ PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT, PRIV_REMOTE_READ,
+ PROXMOX_CONFIG_DIGEST_SCHEMA
};
use pbs_config::sync;
@@ -216,6 +217,18 @@ pub enum DeletableProperty {
remote_ns,
/// Delete the max_depth property,
max_depth,
+ /// Delete keep_last prune option.
+ keep_last,
+ /// Delete keep_hourly prune option.
+ keep_hourly,
+ /// Delete keep_daily prune option.
+ keep_daily,
+ /// Delete keep_weekly prune option.
+ keep_weekly,
+ /// Delete keep_monthly prune option.
+ keep_monthly,
+ /// Delete keep_yearly prune option.
+ keep_yearly
}
#[api(
@@ -310,6 +323,24 @@ pub fn update_sync_job(
DeletableProperty::max_depth => {
data.max_depth = None;
}
+ DeletableProperty::keep_last => {
+ data.keep.keep_last = None;
+ }
+ DeletableProperty::keep_hourly => {
+ data.keep.keep_hourly = None;
+ }
+ DeletableProperty::keep_daily => {
+ data.keep.keep_daily = None;
+ }
+ DeletableProperty::keep_weekly => {
+ data.keep.keep_weekly = None;
+ }
+ DeletableProperty::keep_monthly => {
+ data.keep.keep_monthly = None;
+ }
+ DeletableProperty::keep_yearly => {
+ data.keep.keep_yearly = None;
+ }
}
}
}
@@ -381,6 +412,25 @@ pub fn update_sync_job(
}
}
+ if update.keep.keep_last.is_some() {
+ data.keep.keep_last = update.keep.keep_last;
+ }
+ if update.keep.keep_hourly.is_some() {
+ data.keep.keep_hourly = update.keep.keep_hourly;
+ }
+ if update.keep.keep_daily.is_some() {
+ data.keep.keep_daily = update.keep.keep_daily;
+ }
+ if update.keep.keep_weekly.is_some() {
+ data.keep.keep_weekly = update.keep.keep_weekly;
+ }
+ if update.keep.keep_monthly.is_some() {
+ data.keep.keep_monthly = update.keep.keep_monthly;
+ }
+ if update.keep.keep_yearly.is_some() {
+ data.keep.keep_yearly = update.keep.keep_yearly;
+ }
+
if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
bail!("permission check failed");
}
@@ -463,6 +513,8 @@ pub const ROUTER: Router = Router::new()
#[test]
fn sync_job_access_test() -> Result<(), Error> {
+ use pbs_api_types::KeepOptions;
+
let (user_cfg, _) = pbs_config::user::test_cfg_from_str(
r###"
user: noperm at pbs
@@ -508,6 +560,14 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
group_filter: None,
schedule: None,
limit: pbs_api_types::RateLimitConfig::default(), // no limit
+ keep: KeepOptions {
+ keep_last: None,
+ keep_hourly: None,
+ keep_daily: None,
+ keep_weekly: None,
+ keep_monthly: None,
+ keep_yearly: None
+ },
};
// should work without ACLs
diff --git a/src/api2/pull.rs b/src/api2/pull.rs
index 193f28fe..f39e0b11 100644
--- a/src/api2/pull.rs
+++ b/src/api2/pull.rs
@@ -9,7 +9,7 @@ use proxmox_schema::api;
use proxmox_sys::task_log;
use pbs_api_types::{
- Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
+ Authid, BackupNamespace, GroupFilter, RateLimitConfig, KeepOptions, SyncJobConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
};
@@ -78,6 +78,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
sync_job.max_depth,
sync_job.group_filter.clone(),
sync_job.limit.clone(),
+ sync_job.keep.clone(),
)
}
}
@@ -203,7 +204,11 @@ pub fn do_sync_job(
limit: {
type: RateLimitConfig,
flatten: true,
- }
+ },
+ "keep-options": {
+ type: KeepOptions,
+ flatten: true,
+ },
},
},
access: {
@@ -227,6 +232,7 @@ async fn pull(
max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig,
+ keep_options: KeepOptions,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@@ -259,6 +265,7 @@ async fn pull(
max_depth,
group_filter,
limit,
+ keep_options,
)?;
let client = pull_params.client().await?;
diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs
index b9bfd701..ffd7960e 100644
--- a/src/bin/proxmox-backup-manager.rs
+++ b/src/bin/proxmox-backup-manager.rs
@@ -11,7 +11,7 @@ use proxmox_sys::fs::CreateOptions;
use pbs_api_types::percent_encoding::percent_encode_component;
use pbs_api_types::{
- BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
+ BackupNamespace, GroupFilter, KeepOptions, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA,
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, UPID_SCHEMA,
VERIFICATION_OUTDATED_AFTER_SCHEMA,
@@ -268,6 +268,10 @@ fn task_mgmt_cli() -> CommandLineInterface {
type: RateLimitConfig,
flatten: true,
},
+ "keep-options": {
+ type: KeepOptions,
+ flatten: true,
+ },
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
@@ -287,6 +291,7 @@ async fn pull_datastore(
max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig,
+ keep_options: KeepOptions,
param: Value,
) -> Result<Value, Error> {
let output_format = get_output_format(¶m);
@@ -324,10 +329,17 @@ async fn pull_datastore(
.as_object_mut()
.ok_or_else(|| format_err!("limit is not an Object"))?;
- args
+ let mut keep_json = json!(keep_options);
+ let keep_map = keep_json
.as_object_mut()
- .unwrap()
- .append(limit_map);
+ .ok_or_else(|| format_err!("keep_options is not an Object"))?;
+
+ let args_map = args
+ .as_object_mut()
+ .unwrap();
+
+ args_map.append(limit_map);
+ args_map.append(keep_map);
let result = client.post("api2/json/pull", Some(args)).await?;
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 77caf327..20fda909 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -13,11 +13,11 @@ use pbs_config::CachedUserInfo;
use serde_json::json;
use proxmox_router::HttpError;
-use proxmox_sys::task_log;
+use proxmox_sys::{task_log, task_warn};
use pbs_api_types::{
- print_store_and_ns, Authid, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem,
- Operation, RateLimitConfig, Remote, SnapshotListItem, MAX_NAMESPACE_DEPTH,
+ print_store_and_ns, Authid, BackupNamespace, GroupFilter, GroupListItem, KeepOptions,
+ NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, MAX_NAMESPACE_DEPTH,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
};
@@ -34,6 +34,7 @@ use pbs_datastore::manifest::{
use pbs_datastore::{check_backup_owner, DataStore, StoreProgress};
use pbs_tools::sha::sha256;
use proxmox_rest_server::WorkerTask;
+use pbs_datastore::prune::compute_prune_info;
use crate::backup::{check_ns_modification_privs, check_ns_privs};
use crate::tools::parallel_handler::ParallelHandler;
@@ -60,6 +61,8 @@ pub(crate) struct PullParameters {
group_filter: Option<Vec<GroupFilter>>,
/// Rate limits for all transfers from `remote`
limit: RateLimitConfig,
+ /// Options for pruning after pulling
+ keep_options: KeepOptions,
}
impl PullParameters {
@@ -79,6 +82,7 @@ impl PullParameters {
max_depth: Option<usize>,
group_filter: Option<Vec<GroupFilter>>,
limit: RateLimitConfig,
+ keep_options: KeepOptions,
) -> Result<Self, Error> {
let store = DataStore::lookup_datastore(store, Some(Operation::Write))?;
@@ -110,6 +114,7 @@ impl PullParameters {
max_depth,
group_filter,
limit,
+ keep_options,
})
}
@@ -1067,7 +1072,7 @@ pub(crate) async fn pull_ns(
let mut progress = StoreProgress::new(list.len() as u64);
- for (done, group) in list.into_iter().enumerate() {
+ for (done, group) in list.iter().enumerate() {
progress.done_groups = done as u64;
progress.done_snapshots = 0;
progress.group_snapshots = 0;
@@ -1082,7 +1087,7 @@ pub(crate) async fn pull_ns(
task_log!(
worker,
"sync group {} failed - group lock failed: {}",
- &group,
+ group,
err
);
errors = true; // do not stop here, instead continue
@@ -1096,7 +1101,7 @@ pub(crate) async fn pull_ns(
task_log!(
worker,
"sync group {} failed - owner check failed ({} != {})",
- &group,
+ group,
params.owner,
owner
);
@@ -1105,13 +1110,13 @@ pub(crate) async fn pull_ns(
worker,
client,
params,
- &group,
+ group,
source_ns.clone(),
&mut progress,
)
.await
{
- task_log!(worker, "sync group {} failed - {}", &group, err,);
+ task_log!(worker, "sync group {} failed - {}", group, err,);
errors = true; // do not stop here, instead continue
}
}
@@ -1157,5 +1162,54 @@ pub(crate) async fn pull_ns(
};
}
+ if params.keep_options.keeps_something() {
+ let result: Result<(), Error> = proxmox_lang::try_block!({
+ task_log!(worker, "running prune job");
+
+ for local_group in list.into_iter() {
+ let owner = params.store.get_owner(&target_ns, &local_group)?;
+ if check_backup_owner(&owner, ¶ms.owner).is_err() {
+ continue;
+ }
+
+ if let Some(ref group_filter) = ¶ms.group_filter {
+ if !apply_filters(&local_group, group_filter) {
+ continue;
+ }
+ }
+
+ task_log!(worker, "pruning backup group {}", &local_group);
+
+ let backup_group = params.store.backup_group(target_ns.clone(), local_group);
+ let prune_info = compute_prune_info(backup_group.list_backups()?, ¶ms.keep_options)?;
+
+ for (info, mark) in prune_info {
+ if mark.keep() {
+ task_log!(worker, "keep {}", info.backup_dir.dir());
+ continue;
+ }
+
+ task_log!(worker, "remove {}", info.backup_dir.dir());
+
+ if let Err(err) = info.backup_dir.destroy(false) {
+ task_warn!(
+ worker,
+ "failed to remove dir {:?}: {}",
+ info.backup_dir.relative_path(),
+ err,
+ );
+ errors = true;
+ }
+ }
+ }
+ Ok(())
+ });
+
+ if let Err(err) = result {
+ task_log!(worker, "error during pruning: {}", err);
+ errors = true;
+ };
+ }
+
Ok((progress, errors))
}
--
2.30.2
More information about the pbs-devel
mailing list