[pbs-devel] [PATCH proxmox-backup v2 1/4] api: verify: move chunk loading into parallel handler

Nicolas Frey n.frey at proxmox.com
Thu Nov 6 17:13:13 CET 2025


This way, the chunks will be loaded in parallel in addition to being
checked in parallel.

Depending on the underlying storage, this can speed up reading chunks
from disk, especially when the underlying storage is IO depth
dependent, and the CPU is faster than the storage.

Introduces a new state struct `IndexVerifyState` so that we only need
to pass around and clone one `Arc`. 

Originally-by: Dominik Csapak <d.csapak at proxmox.com>
Signed-off-by: Nicolas Frey <n.frey at proxmox.com>
---
 src/backup/verify.rs | 134 +++++++++++++++++++++++++------------------
 1 file changed, 78 insertions(+), 56 deletions(-)

diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index 31c03891..910a3ed5 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -1,6 +1,6 @@
 use pbs_config::BackupLockGuard;
 use std::collections::HashSet;
-use std::sync::atomic::{AtomicUsize, Ordering};
+use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
 use std::sync::{Arc, Mutex};
 use std::time::Instant;
 
@@ -20,7 +20,7 @@ use pbs_datastore::index::{ChunkReadInfo, IndexFile};
 use pbs_datastore::manifest::{BackupManifest, FileInfo};
 use pbs_datastore::{DataBlob, DataStore, DatastoreBackend, StoreProgress};
 
-use crate::tools::parallel_handler::ParallelHandler;
+use crate::tools::parallel_handler::{ParallelHandler, SendHandle};
 
 use crate::backup::hierarchy::ListAccessibleBackupGroups;
 
@@ -34,6 +34,15 @@ pub struct VerifyWorker {
     backend: DatastoreBackend,
 }
 
+struct IndexVerifyState {
+    read_bytes: AtomicU64,
+    decoded_bytes: AtomicU64,
+    errors: AtomicUsize,
+    datastore: Arc<DataStore>,
+    corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+    verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+}
+
 impl VerifyWorker {
     /// Creates a new VerifyWorker for a given task worker and datastore.
     pub fn new(
@@ -81,27 +90,25 @@ impl VerifyWorker {
         index: Box<dyn IndexFile + Send>,
         crypt_mode: CryptMode,
     ) -> Result<(), Error> {
-        let errors = Arc::new(AtomicUsize::new(0));
-
         let start_time = Instant::now();
 
-        let mut read_bytes = 0;
-        let mut decoded_bytes = 0;
-
-        let datastore2 = Arc::clone(&self.datastore);
-        let corrupt_chunks2 = Arc::clone(&self.corrupt_chunks);
-        let verified_chunks2 = Arc::clone(&self.verified_chunks);
-        let errors2 = Arc::clone(&errors);
-
-        let decoder_pool = ParallelHandler::new(
-            "verify chunk decoder",
-            4,
+        let verify_state = Arc::new(IndexVerifyState {
+            read_bytes: AtomicU64::new(0),
+            decoded_bytes: AtomicU64::new(0),
+            errors: AtomicUsize::new(0),
+            datastore: Arc::clone(&self.datastore),
+            corrupt_chunks: Arc::clone(&self.corrupt_chunks),
+            verified_chunks: Arc::clone(&self.verified_chunks),
+        });
+
+        let decoder_pool = ParallelHandler::new("verify chunk decoder", 4, {
+            let verify_state = Arc::clone(&verify_state);
             move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
                 let chunk_crypt_mode = match chunk.crypt_mode() {
                     Err(err) => {
-                        corrupt_chunks2.lock().unwrap().insert(digest);
+                        verify_state.corrupt_chunks.lock().unwrap().insert(digest);
                         info!("can't verify chunk, unknown CryptMode - {err}");
-                        errors2.fetch_add(1, Ordering::SeqCst);
+                        verify_state.errors.fetch_add(1, Ordering::SeqCst);
                         return Ok(());
                     }
                     Ok(mode) => mode,
@@ -111,25 +118,25 @@ impl VerifyWorker {
                     info!(
                     "chunk CryptMode {chunk_crypt_mode:?} does not match index CryptMode {crypt_mode:?}"
                 );
-                    errors2.fetch_add(1, Ordering::SeqCst);
+                    verify_state.errors.fetch_add(1, Ordering::SeqCst);
                 }
 
                 if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
-                    corrupt_chunks2.lock().unwrap().insert(digest);
+                    verify_state.corrupt_chunks.lock().unwrap().insert(digest);
                     info!("{err}");
-                    errors2.fetch_add(1, Ordering::SeqCst);
-                    match datastore2.rename_corrupt_chunk(&digest) {
+                    verify_state.errors.fetch_add(1, Ordering::SeqCst);
+                    match verify_state.datastore.rename_corrupt_chunk(&digest) {
                         Ok(Some(new_path)) => info!("corrupt chunk renamed to {new_path:?}"),
                         Err(err) => info!("{err}"),
                         _ => (),
                     }
                 } else {
-                    verified_chunks2.lock().unwrap().insert(digest);
+                    verify_state.verified_chunks.lock().unwrap().insert(digest);
                 }
 
                 Ok(())
-            },
-        );
+            }
+        });
 
         let skip_chunk = |digest: &[u8; 32]| -> bool {
             if self.verified_chunks.lock().unwrap().contains(digest) {
@@ -137,7 +144,7 @@ impl VerifyWorker {
             } else if self.corrupt_chunks.lock().unwrap().contains(digest) {
                 let digest_str = hex::encode(digest);
                 info!("chunk {digest_str} was marked as corrupt");
-                errors.fetch_add(1, Ordering::SeqCst);
+                verify_state.errors.fetch_add(1, Ordering::SeqCst);
                 true
             } else {
                 false
@@ -156,6 +163,21 @@ impl VerifyWorker {
             .datastore
             .get_chunks_in_order(&*index, skip_chunk, check_abort)?;
 
+        let reader_pool = ParallelHandler::new("read chunks", 4, {
+            let decoder_pool = decoder_pool.channel();
+            let verify_state = Arc::clone(&verify_state);
+            let backend = self.backend.clone();
+
+            move |info: ChunkReadInfo| {
+                Self::verify_chunk_by_backend(
+                    &backend,
+                    Arc::clone(&verify_state),
+                    &decoder_pool,
+                    &info,
+                )
+            }
+        });
+
         for (pos, _) in chunk_list {
             self.worker.check_abort()?;
             self.worker.fail_on_shutdown()?;
@@ -167,58 +189,56 @@ impl VerifyWorker {
                 continue; // already verified or marked corrupt
             }
 
-            self.verify_chunk_by_backend(
-                &info,
-                &mut read_bytes,
-                &mut decoded_bytes,
-                Arc::clone(&errors),
-                &decoder_pool,
-            )?;
+            reader_pool.send(info)?;
         }
 
-        decoder_pool.complete()?;
+        reader_pool.complete()?;
 
         let elapsed = start_time.elapsed().as_secs_f64();
 
+        let read_bytes = verify_state.read_bytes.load(Ordering::SeqCst);
+        let decoded_bytes = verify_state.decoded_bytes.load(Ordering::SeqCst);
+
         let read_bytes_mib = (read_bytes as f64) / (1024.0 * 1024.0);
         let decoded_bytes_mib = (decoded_bytes as f64) / (1024.0 * 1024.0);
 
         let read_speed = read_bytes_mib / elapsed;
         let decode_speed = decoded_bytes_mib / elapsed;
 
-        let error_count = errors.load(Ordering::SeqCst);
+        let error_count = verify_state.errors.load(Ordering::SeqCst);
 
         info!(
             "  verified {read_bytes_mib:.2}/{decoded_bytes_mib:.2} MiB in {elapsed:.2} seconds, speed {read_speed:.2}/{decode_speed:.2} MiB/s ({error_count} errors)"
         );
 
-        if errors.load(Ordering::SeqCst) > 0 {
+        if verify_state.errors.load(Ordering::SeqCst) > 0 {
             bail!("chunks could not be verified");
         }
 
         Ok(())
     }
 
+    #[allow(clippy::too_many_arguments)]
     fn verify_chunk_by_backend(
-        &self,
+        backend: &DatastoreBackend,
+        verify_state: Arc<IndexVerifyState>,
+        decoder_pool: &SendHandle<(DataBlob, [u8; 32], u64)>,
         info: &ChunkReadInfo,
-        read_bytes: &mut u64,
-        decoded_bytes: &mut u64,
-        errors: Arc<AtomicUsize>,
-        decoder_pool: &ParallelHandler<(DataBlob, [u8; 32], u64)>,
     ) -> Result<(), Error> {
-        match &self.backend {
-            DatastoreBackend::Filesystem => match self.datastore.load_chunk(&info.digest) {
-                Err(err) => self.add_corrupt_chunk(
+        match backend {
+            DatastoreBackend::Filesystem => match verify_state.datastore.load_chunk(&info.digest) {
+                Err(err) => Self::add_corrupt_chunk(
+                    verify_state,
                     info.digest,
-                    errors,
                     &format!("can't verify chunk, load failed - {err}"),
                 ),
                 Ok(chunk) => {
                     let size = info.size();
-                    *read_bytes += chunk.raw_size();
+                    verify_state
+                        .read_bytes
+                        .fetch_add(chunk.raw_size(), Ordering::SeqCst);
                     decoder_pool.send((chunk, info.digest, size))?;
-                    *decoded_bytes += size;
+                    verify_state.decoded_bytes.fetch_add(size, Ordering::SeqCst);
                 }
             },
             DatastoreBackend::S3(s3_client) => {
@@ -235,26 +255,28 @@ impl VerifyWorker {
                         match chunk_result {
                             Ok(chunk) => {
                                 let size = info.size();
-                                *read_bytes += chunk.raw_size();
+                                verify_state
+                                    .read_bytes
+                                    .fetch_add(chunk.raw_size(), Ordering::SeqCst);
                                 decoder_pool.send((chunk, info.digest, size))?;
-                                *decoded_bytes += size;
+                                verify_state.decoded_bytes.fetch_add(size, Ordering::SeqCst);
                             }
                             Err(err) => {
-                                errors.fetch_add(1, Ordering::SeqCst);
+                                verify_state.errors.fetch_add(1, Ordering::SeqCst);
                                 error!("can't verify chunk, load failed - {err}");
                             }
                         }
                     }
-                    Ok(None) => self.add_corrupt_chunk(
+                    Ok(None) => Self::add_corrupt_chunk(
+                        verify_state,
                         info.digest,
-                        errors,
                         &format!(
                             "can't verify missing chunk with digest {}",
                             hex::encode(info.digest)
                         ),
                     ),
                     Err(err) => {
-                        errors.fetch_add(1, Ordering::SeqCst);
+                        verify_state.errors.fetch_add(1, Ordering::SeqCst);
                         error!("can't verify chunk, load failed - {err}");
                     }
                 }
@@ -263,13 +285,13 @@ impl VerifyWorker {
         Ok(())
     }
 
-    fn add_corrupt_chunk(&self, digest: [u8; 32], errors: Arc<AtomicUsize>, message: &str) {
+    fn add_corrupt_chunk(verify_state: Arc<IndexVerifyState>, digest: [u8; 32], message: &str) {
         // Panic on poisoned mutex
-        let mut corrupt_chunks = self.corrupt_chunks.lock().unwrap();
+        let mut corrupt_chunks = verify_state.corrupt_chunks.lock().unwrap();
         corrupt_chunks.insert(digest);
         error!(message);
-        errors.fetch_add(1, Ordering::SeqCst);
-        match self.datastore.rename_corrupt_chunk(&digest) {
+        verify_state.errors.fetch_add(1, Ordering::SeqCst);
+        match verify_state.datastore.rename_corrupt_chunk(&digest) {
             Ok(Some(new_path)) => info!("corrupt chunk renamed to {new_path:?}"),
             Err(err) => info!("{err}"),
             _ => (),
-- 
2.47.3




More information about the pbs-devel mailing list