[pbs-devel] [PATCH proxmox-backup 09/17] datastore: refactor rename_corrupt_chunk error handling
Christian Ebner
c.ebner at proxmox.com
Mon Nov 3 12:31:12 CET 2025
As part of the verification process, the helper was not intended to
return errors on failure but rather just log information and errors.
Refactoring the code so that the helper method optionally returns the
new chunk path after it being renamed, None if the source path could
not be found and error otherwise.
However, keep the logging as info at the callsite for both error and
success message logging to not interfere with the task log.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
pbs-datastore/src/datastore.rs | 85 ++++++++++++++--------------------
src/backup/verify.rs | 12 ++++-
2 files changed, 44 insertions(+), 53 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index ebfbf5229..397c37e56 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -2568,13 +2568,15 @@ impl DataStore {
Ok(())
}
- pub fn rename_corrupt_chunk(&self, digest: &[u8; 32]) {
+ /// Renames a corrupt chunk, returning the new path if the chunk was renamed successfully.
+ /// Returns with `Ok(None)` if the chunk source was not found.
+ pub fn rename_corrupt_chunk(&self, digest: &[u8; 32]) -> Result<Option<PathBuf>, Error> {
let (path, digest_str) = self.chunk_path(digest);
let mut counter = 0;
let mut new_path = path.clone();
loop {
- new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
+ new_path.set_file_name(format!("{digest_str}.{counter}.bad"));
if new_path.exists() && counter < 9 {
counter += 1;
} else {
@@ -2582,59 +2584,40 @@ impl DataStore {
}
}
- let backend = match self.backend() {
- Ok(backend) => backend,
- Err(err) => {
- info!(
- "failed to get backend while trying to rename bad chunk: {digest_str} - {err}"
- );
- return;
- }
- };
+ let backend = self.backend().map_err(|err| {
+ format_err!(
+ "failed to get backend while trying to rename bad chunk: {digest_str} - {err}"
+ )
+ })?;
if let DatastoreBackend::S3(s3_client) = backend {
- let suffix = format!(".{}.bad", counter);
- let target_key = match crate::s3::object_key_from_digest_with_suffix(digest, &suffix) {
- Ok(target_key) => target_key,
- Err(err) => {
- info!("could not generate target key for corrupt chunk {path:?} - {err}");
- return;
- }
- };
- let object_key = match crate::s3::object_key_from_digest(digest) {
- Ok(object_key) => object_key,
- Err(err) => {
- info!("could not generate object key for corrupt chunk {path:?} - {err}");
- return;
- }
- };
- if proxmox_async::runtime::block_on(
- s3_client.copy_object(object_key.clone(), target_key),
- )
- .is_ok()
- {
- if proxmox_async::runtime::block_on(s3_client.delete_object(object_key)).is_err() {
- info!("failed to delete corrupt chunk on s3 backend: {digest_str}");
- }
- } else {
- info!("failed to copy corrupt chunk on s3 backend: {digest_str}");
- // Early return to leave the potentially locally cached chunk in the same state as
- // on the object store. Verification might have failed because of connection issue
- // after all.
- return;
- }
+ let suffix = format!(".{counter}.bad");
+ let target_key = crate::s3::object_key_from_digest_with_suffix(digest, &suffix)
+ .map_err(|err| {
+ format_err!("could not generate target key for corrupt chunk {path:?} - {err}")
+ })?;
+ let object_key = crate::s3::object_key_from_digest(digest).map_err(|err| {
+ format_err!("could not generate object key for corrupt chunk {path:?} - {err}")
+ })?;
+
+ proxmox_async::runtime::block_on(s3_client.copy_object(object_key.clone(), target_key))
+ .map_err(|err| {
+ format_err!("failed to copy corrupt chunk on s3 backend: {digest_str} - {err}")
+ })?;
+
+ proxmox_async::runtime::block_on(s3_client.delete_object(object_key)).map_err(
+ |err| {
+ format_err!(
+ "failed to delete corrupt chunk on s3 backend: {digest_str} - {err}"
+ )
+ },
+ )?;
}
match std::fs::rename(&path, &new_path) {
- Ok(_) => {
- info!("corrupt chunk renamed to {:?}", &new_path);
- }
- Err(err) => {
- match err.kind() {
- std::io::ErrorKind::NotFound => { /* ignored */ }
- _ => info!("could not rename corrupt chunk {:?} - {err}", &path),
- }
- }
- };
+ Ok(_) => Ok(Some(new_path)),
+ Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(None),
+ Err(err) => bail!("could not rename corrupt chunk {path:?} - {err}"),
+ }
}
}
diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index 7fac46e18..31c03891a 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -118,7 +118,11 @@ impl VerifyWorker {
corrupt_chunks2.lock().unwrap().insert(digest);
info!("{err}");
errors2.fetch_add(1, Ordering::SeqCst);
- datastore2.rename_corrupt_chunk(&digest);
+ match datastore2.rename_corrupt_chunk(&digest) {
+ Ok(Some(new_path)) => info!("corrupt chunk renamed to {new_path:?}"),
+ Err(err) => info!("{err}"),
+ _ => (),
+ }
} else {
verified_chunks2.lock().unwrap().insert(digest);
}
@@ -265,7 +269,11 @@ impl VerifyWorker {
corrupt_chunks.insert(digest);
error!(message);
errors.fetch_add(1, Ordering::SeqCst);
- self.datastore.rename_corrupt_chunk(&digest);
+ match self.datastore.rename_corrupt_chunk(&digest) {
+ Ok(Some(new_path)) => info!("corrupt chunk renamed to {new_path:?}"),
+ Err(err) => info!("{err}"),
+ _ => (),
+ }
}
fn verify_fixed_index(&self, backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
--
2.47.3
More information about the pbs-devel
mailing list