[pbs-devel] [PATCH proxmox-backup v3 7/8] api: chunk upload: fix race with garbage collection for no-cache on s3
Christian Ebner
c.ebner at proxmox.com
Wed Oct 15 18:40:07 CEST 2025
Chunks uploaded to the s3 backend are never inserted into the local
datastore cache. The presence of the chunk marker file is however
required for garbage collection to not cleanup the chunks. While the
marker files are created during phase 1 of the garbage collection for
indexed chunks, this is not the case for in progress backups with the
no-cache flag set.
Therefore, mark chunks as in-progress while being uploaded just like
for the regular mode with cache, but replace this with the zero-sized
chunk marker file after upload finished to avoid incorrect garbage
collection cleanup.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
pbs-datastore/src/chunk_store.rs | 26 ++++++++++++++++++++++++++
pbs-datastore/src/datastore.rs | 7 +++++++
src/api2/backup/upload_chunk.rs | 4 ++++
3 files changed, 37 insertions(+)
diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs
index 2693a1c11..1e71b2970 100644
--- a/pbs-datastore/src/chunk_store.rs
+++ b/pbs-datastore/src/chunk_store.rs
@@ -646,6 +646,32 @@ impl ChunkStore {
Ok(atime)
}
+ /// Transform the backend upload marker to be a chunk marker.
+ ///
+ /// If the chunk marker is already present, its atime will be updated instead.
+ pub(crate) fn persist_backend_upload_marker(&self, digest: &[u8; 32]) -> Result<(), Error> {
+ if self.datastore_backend_type == DatastoreBackendType::Filesystem {
+ bail!("cannot create backend upload marker, not a cache store");
+ }
+ let (marker_path, _digest_str) = self.chunk_backed_upload_marker_path(digest);
+ let (chunk_path, digest_str) = self.chunk_path(digest);
+ let _lock = self.mutex.lock();
+
+ if let Err(err) = std::fs::rename(&marker_path, chunk_path) {
+ // Assert the chunk has been inserted and it is therefore safe to cleanup
+ // the upload marker nevertheless.
+ if self.cond_touch_chunk(digest, false)? {
+ std::fs::remove_file(&marker_path)?;
+ return Ok(());
+ }
+
+ return Err(format_err!(
+ "persisting backup upload marker failed for {digest_str} - {err}"
+ ));
+ }
+ Ok(())
+ }
+
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
self.insert_chunk_impl(chunk, digest, |_, _| Ok(()))
}
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index aa34ab037..69c87c336 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1871,6 +1871,13 @@ impl DataStore {
self.inner.chunk_store.touch_backend_upload_marker(digest)
}
+ /// Persist the backend upload marker to be a zero size chunk marker.
+ ///
+ /// Marks the chunk as present in the local store cache without inserting its payload.
+ pub fn persist_backend_upload_marker(&self, digest: &[u8; 32]) -> Result<(), Error> {
+ self.inner.chunk_store.persist_backend_upload_marker(digest)
+ }
+
pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest);
std::fs::metadata(chunk_path).map_err(Error::from)
diff --git a/src/api2/backup/upload_chunk.rs b/src/api2/backup/upload_chunk.rs
index 0640f3652..bc64054a8 100644
--- a/src/api2/backup/upload_chunk.rs
+++ b/src/api2/backup/upload_chunk.rs
@@ -263,10 +263,14 @@ async fn upload_to_backend(
if env.no_cache {
let object_key = pbs_datastore::s3::object_key_from_digest(&digest)?;
+ if !datastore.touch_backend_upload_marker(&digest)? {
+ return Ok((digest, size, encoded_size, true));
+ }
let is_duplicate = s3_client
.upload_replace_on_final_retry(object_key, data)
.await
.map_err(|err| format_err!("failed to upload chunk to s3 backend - {err:#}"))?;
+ datastore.persist_backend_upload_marker(&digest)?;
return Ok((digest, size, encoded_size, is_duplicate));
}
--
2.47.3
More information about the pbs-devel
mailing list