[pbs-devel] [PATCH proxmox-backup v3 28/41] datastore: prune groups/snapshots from S3 object store backend
Christian Ebner
c.ebner at proxmox.com
Mon Jun 16 16:21:43 CEST 2025
When pruning a backup group or a backup snapshot for a datastore with
S3 object store backend, remove the associated objects by removing
them based on the prefix.
In order to exclude protected contents, add a filtering based on the
presence of the protected marker.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
pbs-datastore/src/backup_info.rs | 53 +++++++++++++++++++++++++++++---
pbs-datastore/src/datastore.rs | 44 +++++++++++++++++++++++---
src/api2/admin/datastore.rs | 24 ++++++++++-----
3 files changed, 103 insertions(+), 18 deletions(-)
diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs
index 37a605b94..9252a4d3e 100644
--- a/pbs-datastore/src/backup_info.rs
+++ b/pbs-datastore/src/backup_info.rs
@@ -9,6 +9,7 @@ use std::time::Duration;
use anyhow::{bail, format_err, Context, Error};
use const_format::concatcp;
+use pbs_s3_client::{S3PathPrefix, S3_CONTENT_PREFIX};
use proxmox_sys::fs::{lock_dir_noblock, lock_dir_noblock_shared, replace_file, CreateOptions};
use proxmox_systemd::escape_unit;
@@ -18,11 +19,12 @@ use pbs_api_types::{
};
use pbs_config::{open_backup_lockfile, BackupLockGuard};
+use crate::datastore::GROUP_NOTES_FILE_NAME;
use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME};
-use crate::{DataBlob, DataStore};
+use crate::{DataBlob, DataStore, DatastoreBackend};
pub const DATASTORE_LOCKS_DIR: &str = "/run/proxmox-backup/locks";
-const PROTECTED_MARKER_FILENAME: &str = ".protected";
+pub const PROTECTED_MARKER_FILENAME: &str = ".protected";
proxmox_schema::const_regex! {
pub BACKUP_FILES_AND_PROTECTED_REGEX = concatcp!(r"^(.*\.([fd]idx|blob)|\", PROTECTED_MARKER_FILENAME, ")$");
@@ -218,7 +220,7 @@ impl BackupGroup {
///
/// Returns `BackupGroupDeleteStats`, containing the number of deleted snapshots
/// and number of protected snaphsots, which therefore were not removed.
- pub fn destroy(&self) -> Result<BackupGroupDeleteStats, Error> {
+ pub fn destroy(&self, backend: &DatastoreBackend) -> Result<BackupGroupDeleteStats, Error> {
let _guard = self
.lock()
.with_context(|| format!("while destroying group '{self:?}'"))?;
@@ -232,10 +234,30 @@ impl BackupGroup {
delete_stats.increment_protected_snapshots();
continue;
}
- snap.destroy(false)?;
+ // also for S3 cleanup local only, the actual S3 objects will be removed below,
+ // reducing the number of required API calls.
+ snap.destroy(false, &DatastoreBackend::Filesystem)?;
delete_stats.increment_removed_snapshots();
}
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let path = self.relative_group_path();
+ let group_prefix = path
+ .to_str()
+ .ok_or_else(|| format_err!("invalid group path prefix"))?;
+ let prefix = format!("{S3_CONTENT_PREFIX}/{group_prefix}");
+ let delete_objects_error = proxmox_async::runtime::block_on(
+ s3_client.delete_objects_by_prefix_with_suffix_filter(
+ &S3PathPrefix::Some(prefix),
+ PROTECTED_MARKER_FILENAME,
+ &["owner", GROUP_NOTES_FILE_NAME],
+ ),
+ )?;
+ if delete_objects_error {
+ bail!("deleting objects failed");
+ }
+ }
+
// Note: make sure the old locking mechanism isn't used as `remove_dir_all` is not safe in
// that case
if delete_stats.all_removed() && !*OLD_LOCKING {
@@ -588,7 +610,7 @@ impl BackupDir {
/// Destroy the whole snapshot, bails if it's protected
///
/// Setting `force` to true skips locking and thus ignores if the backup is currently in use.
- pub fn destroy(&self, force: bool) -> Result<(), Error> {
+ pub fn destroy(&self, force: bool, backend: &DatastoreBackend) -> Result<(), Error> {
let (_guard, _manifest_guard);
if !force {
_guard = self
@@ -601,6 +623,20 @@ impl BackupDir {
bail!("cannot remove protected snapshot"); // use special error type?
}
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let path = self.relative_path();
+ let snapshot_prefix = path
+ .to_str()
+ .ok_or_else(|| format_err!("invalid snapshot path"))?;
+ let prefix = format!("{S3_CONTENT_PREFIX}/{snapshot_prefix}");
+ let delete_objects_error = proxmox_async::runtime::block_on(
+ s3_client.delete_objects_by_prefix(&S3PathPrefix::Some(prefix)),
+ )?;
+ if delete_objects_error {
+ bail!("deleting objects failed");
+ }
+ }
+
let full_path = self.full_path();
log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path).map_err(|err| {
@@ -630,6 +666,13 @@ impl BackupDir {
// do to rectify the situation.
if guard.is_ok() && group.list_backups()?.is_empty() && !*OLD_LOCKING {
group.remove_group_dir()?;
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let path = group.relative_group_path().join("owner");
+ let owner_key = path
+ .to_str()
+ .ok_or_else(|| format_err!("invalid group path prefix"))?;
+ proxmox_async::runtime::block_on(s3_client.delete_object(owner_key.into()))?;
+ }
} else if let Err(err) = guard {
log::debug!("{err:#}");
}
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index cde47b064..c304eea21 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -29,8 +29,11 @@ use pbs_api_types::{
S3ClientConfig, S3ClientSecretsConfig, UPID,
};
use pbs_config::BackupLockGuard;
+use pbs_s3_client::{S3PathPrefix, S3_CONTENT_PREFIX};
-use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, OLD_LOCKING};
+use crate::backup_info::{
+ BackupDir, BackupGroup, BackupInfo, OLD_LOCKING, PROTECTED_MARKER_FILENAME,
+};
use crate::chunk_store::ChunkStore;
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
@@ -42,7 +45,7 @@ use crate::DataBlob;
static DATASTORE_MAP: LazyLock<Mutex<HashMap<String, Arc<DataStoreImpl>>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
-const GROUP_NOTES_FILE_NAME: &str = "notes";
+pub const GROUP_NOTES_FILE_NAME: &str = "notes";
const NAMESPACE_MARKER_FILENAME: &str = ".namespace";
/// checks if auth_id is owner, or, if owner is a token, if
@@ -660,7 +663,9 @@ impl DataStore {
let mut stats = BackupGroupDeleteStats::default();
for group in self.iter_backup_groups(ns.to_owned())? {
- let delete_stats = group?.destroy()?;
+ let group = group?;
+ let backend = self.backend()?;
+ let delete_stats = group.destroy(&backend)?;
stats.add(&delete_stats);
removed_all_groups = removed_all_groups && delete_stats.all_removed();
}
@@ -694,6 +699,8 @@ impl DataStore {
let store = self.name();
let mut removed_all_requested = true;
let mut stats = BackupGroupDeleteStats::default();
+ let backend = self.backend()?;
+
if delete_groups {
log::info!("removing whole namespace recursively below {store}:/{ns}",);
for ns in self.recursive_iter_backup_ns(ns.to_owned())? {
@@ -701,6 +708,24 @@ impl DataStore {
stats.add(&delete_stats);
removed_all_requested = removed_all_requested && removed_ns_groups;
}
+
+ if let DatastoreBackend::S3(s3_client) = &backend {
+ let ns_dir = ns.path();
+ let ns_prefix = ns_dir
+ .to_str()
+ .ok_or_else(|| format_err!("invalid namespace path prefix"))?;
+ let prefix = format!("{S3_CONTENT_PREFIX}/{ns_prefix}");
+ let delete_objects_error = proxmox_async::runtime::block_on(
+ s3_client.delete_objects_by_prefix_with_suffix_filter(
+ &S3PathPrefix::Some(prefix),
+ PROTECTED_MARKER_FILENAME,
+ &["owner", GROUP_NOTES_FILE_NAME],
+ ),
+ )?;
+ if delete_objects_error {
+ bail!("deleting objects failed");
+ }
+ }
} else {
log::info!("pruning empty namespace recursively below {store}:/{ns}");
}
@@ -736,6 +761,15 @@ impl DataStore {
log::warn!("failed to remove namespace {ns} - {err}")
}
}
+ if let DatastoreBackend::S3(s3_client) = &backend {
+ // Only remove the namespace marker, if it was empty,
+ // than this is the same as the namespace being removed.
+ let ns_dir = ns.path().join(NAMESPACE_MARKER_FILENAME);
+ let ns_key = ns_dir
+ .to_str()
+ .ok_or_else(|| format_err!("invalid namespace path"))?;
+ proxmox_async::runtime::block_on(s3_client.delete_object(ns_key.into()))?;
+ }
}
}
@@ -753,7 +787,7 @@ impl DataStore {
) -> Result<BackupGroupDeleteStats, Error> {
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
- backup_group.destroy()
+ backup_group.destroy(&self.backend()?)
}
/// Remove a backup directory including all content
@@ -765,7 +799,7 @@ impl DataStore {
) -> Result<(), Error> {
let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
- backup_dir.destroy(force)
+ backup_dir.destroy(force, &self.backend()?)
}
/// Returns the time of the last successful backup
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 4b032e817..ed5ee8a0c 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -420,7 +420,7 @@ pub async fn delete_snapshot(
let snapshot = datastore.backup_dir(ns, backup_dir)?;
- snapshot.destroy(false)?;
+ snapshot.destroy(false, &datastore.backend()?)?;
Ok(Value::Null)
})
@@ -1086,13 +1086,21 @@ pub fn prune(
});
if !keep {
- if let Err(err) = backup_dir.destroy(false) {
- warn!(
- "failed to remove dir {:?}: {}",
- backup_dir.relative_path(),
- err,
- );
- }
+ match datastore.backend() {
+ Ok(backend) => {
+ if let Err(err) = backup_dir.destroy(false, &backend) {
+ warn!(
+ "failed to remove dir {:?}: {}",
+ backup_dir.relative_path(),
+ err,
+ );
+ }
+ }
+ Err(err) => warn!(
+ "failed to remove dir {:?}: {err}",
+ backup_dir.relative_path()
+ ),
+ };
}
}
prune_result
--
2.39.5
More information about the pbs-devel
mailing list