[pbs-devel] [PATCH proxmox-backup v6 13/21] GC: cleanup chunk markers from cache in phase 3 on s3 backends

Christian Ebner c.ebner at proxmox.com
Fri Nov 14 14:18:53 CET 2025


Pass along the in-memory cache when sweeping unused chunks in phase 3
of garbage collection for datastores with s3 backend.
When a dangling marker file has been detected, which will only happen
if the chunk was removed from the object store by some unexpected
interaction (e.g. manually removed from the bucket), this marker must be
removed to get a consistent state (snapshots referencing the chunk
remain however corrupt).

Clear such a chunk from both, the in-memory and local datastore cache,
so it can be reuploaded by future backup or sync jobs.

Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
 pbs-datastore/src/chunk_store.rs | 22 +++++++++++++++++++++-
 pbs-datastore/src/datastore.rs   |  2 ++
 2 files changed, 23 insertions(+), 1 deletion(-)

diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs
index f5a77276d..20f71efef 100644
--- a/pbs-datastore/src/chunk_store.rs
+++ b/pbs-datastore/src/chunk_store.rs
@@ -5,6 +5,7 @@ use std::sync::{Arc, Mutex};
 use std::time::Duration;
 
 use anyhow::{bail, format_err, Context, Error};
+use hex::FromHex;
 use tracing::{info, warn};
 
 use pbs_api_types::{DatastoreFSyncLevel, GarbageCollectionStatus};
@@ -22,7 +23,7 @@ use crate::data_blob::DataChunkBuilder;
 use crate::file_formats::{
     COMPRESSED_BLOB_MAGIC_1_0, ENCRYPTED_BLOB_MAGIC_1_0, UNCOMPRESSED_BLOB_MAGIC_1_0,
 };
-use crate::DataBlob;
+use crate::{DataBlob, LocalDatastoreLruCache};
 
 /// File system based chunk store
 pub struct ChunkStore {
@@ -383,6 +384,7 @@ impl ChunkStore {
         min_atime: i64,
         status: &mut GarbageCollectionStatus,
         worker: &dyn WorkerTaskContext,
+        cache: Option<&LocalDatastoreLruCache>,
     ) -> Result<(), Error> {
         // unwrap: only `None` in unit tests
         assert!(self.locker.is_some());
@@ -436,6 +438,24 @@ impl ChunkStore {
                         bad,
                         status,
                         || {
+                            // non-bad S3 chunks need to be removed via cache
+                            if let Some(cache) = cache {
+                                if !bad {
+                                    let digest = <[u8; 32]>::from_hex(filename.to_bytes())?;
+
+                                    // unless there is a concurrent upload pending,
+                                    // must never block due to required locking order
+                                    if let Ok(_guard) =
+                                        self.lock_chunk(&digest, Duration::from_secs(0))
+                                    {
+                                        cache.remove(&digest)?;
+                                    }
+
+                                    return Ok(());
+                                }
+                            }
+
+                            // bad or local chunks
                             unlinkat(Some(dirfd), filename, UnlinkatFlags::NoRemoveDir).map_err(
                                 |err| {
                                     format_err!(
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index ff8517e45..083a83f7d 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1755,6 +1755,7 @@ impl DataStore {
                 min_atime,
                 &mut tmp_gc_status,
                 worker,
+                self.cache(),
             )?;
         } else {
             self.inner.chunk_store.sweep_unused_chunks(
@@ -1762,6 +1763,7 @@ impl DataStore {
                 min_atime,
                 &mut gc_status,
                 worker,
+                None,
             )?;
         }
 
-- 
2.47.3





More information about the pbs-devel mailing list