[pbs-devel] [PATCH proxmox-backup 2/3] GC: S3: factor out batch object deletion
Fabian Grünbichler
f.gruenbichler at proxmox.com
Fri Nov 21 10:06:00 CET 2025
since we do it twice with identical code, move that code to a closure.
Signed-off-by: Fabian Grünbichler <f.gruenbichler at proxmox.com>
---
pbs-datastore/src/datastore.rs | 46 +++++++++++++++-------------------
1 file changed, 20 insertions(+), 26 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 1afcef53a..f89fd28b0 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1660,6 +1660,24 @@ impl DataStore {
.context("failed to list chunk in s3 object store")?;
let mut delete_list = Vec::with_capacity(S3_DELETE_BATCH_LIMIT);
+
+ let s3_delete_batch = |delete_list: &mut Vec<(S3ObjectKey, BackupLockGuard)>,
+ s3_client: &Arc<S3Client>|
+ -> Result<(), Error> {
+ let delete_objects_result = proxmox_async::runtime::block_on(
+ s3_client.delete_objects(
+ &delete_list
+ .iter()
+ .map(|(key, _)| key.clone())
+ .collect::<Vec<S3ObjectKey>>(),
+ ),
+ )?;
+ if let Some(_err) = delete_objects_result.error {
+ bail!("failed to delete some objects");
+ }
+ delete_list.clear();
+ Ok(())
+ };
loop {
for content in list_bucket_result.contents {
let (chunk_path, digest, bad) =
@@ -1724,37 +1742,13 @@ impl DataStore {
// limit pending deletes to avoid holding too many chunk flocks
if delete_list.len() > S3_DELETE_BATCH_LIMIT {
- let delete_objects_result = proxmox_async::runtime::block_on(
- s3_client.delete_objects(
- &delete_list
- .iter()
- .map(|(key, _)| key.clone())
- .collect::<Vec<S3ObjectKey>>(),
- ),
- )?;
- if let Some(_err) = delete_objects_result.error {
- bail!("failed to delete some objects");
- }
- // release all chunk guards
- delete_list.clear();
+ s3_delete_batch(&mut delete_list, s3_client)?;
}
}
// delete the last batch of objects, if there are any remaining
if !delete_list.is_empty() {
- let delete_objects_result = proxmox_async::runtime::block_on(
- s3_client.delete_objects(
- &delete_list
- .iter()
- .map(|(key, _)| key.clone())
- .collect::<Vec<S3ObjectKey>>(),
- ),
- )?;
- if let Some(_err) = delete_objects_result.error {
- bail!("failed to delete some objects");
- }
- // release all chunk guards
- delete_list.clear();
+ s3_delete_batch(&mut delete_list, s3_client)?;
}
// Process next batch of chunks if there is more
--
2.47.3
More information about the pbs-devel
mailing list