[pbs-devel] [PATCH proxmox-backup v4 07/14] datastore: add locking to protect against races on chunk insert for s3

Christian Ebner c.ebner at proxmox.com
Mon Nov 10 12:56:20 CET 2025


Acquire the per-chunk file lock to get exclusive access to the chunk
on insert and make it possible to guarantee that s3 backend
operations and the local caches reflect the consistent state without
other operations such as verify chunk renaming or garbage collection
interfering.

Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
 pbs-datastore/src/datastore.rs | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 3beba940c..10acc91a0 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1923,6 +1923,10 @@ impl DataStore {
         match backend {
             DatastoreBackend::Filesystem => self.inner.chunk_store.insert_chunk(chunk, digest),
             DatastoreBackend::S3(s3_client) => {
+                let _chunk_guard = self
+                    .inner
+                    .chunk_store
+                    .lock_chunk(digest, CHUNK_LOCK_TIMEOUT)?;
                 let chunk_data: Bytes = chunk.raw_data().to_vec().into();
                 let chunk_size = chunk_data.len() as u64;
                 let object_key = crate::s3::object_key_from_digest(digest)?;
@@ -1943,6 +1947,10 @@ impl DataStore {
     ) -> Result<(bool, u64), Error> {
         let chunk_data = chunk.raw_data();
         let chunk_size = chunk_data.len() as u64;
+        let _chunk_guard = self
+            .inner
+            .chunk_store
+            .lock_chunk(digest, CHUNK_LOCK_TIMEOUT)?;
 
         // Avoid re-upload to S3 if the chunk is either present in the in-memory LRU cache
         // or the chunk marker file exists on filesystem. The latter means the chunk has
-- 
2.47.3





More information about the pbs-devel mailing list