[pbs-devel] [RFC v2 proxmox-backup 26/42] verify worker: add datastore backed to verify worker
Christian Ebner
c.ebner at proxmox.com
Thu May 29 16:31:51 CEST 2025
In order to fetch chunks from an S3 compatible object store,
instantiate and store the s3 client in the verify worker by storing
the datastore's backend. This allows to reuse the same instance for
the whole verification task.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
src/api2/admin/datastore.rs | 2 +-
src/api2/backup/environment.rs | 2 +-
src/backup/verify.rs | 14 ++++++++++----
src/server/verify_job.rs | 2 +-
4 files changed, 13 insertions(+), 7 deletions(-)
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 7dc881ade..7b7f79b22 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -893,7 +893,7 @@ pub fn verify(
auth_id.to_string(),
to_stdout,
move |worker| {
- let verify_worker = VerifyWorker::new(worker.clone(), datastore);
+ let verify_worker = VerifyWorker::new(worker.clone(), datastore)?;
let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut res = Vec::new();
if !verify_worker.verify_backup_dir(
diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
index 685b78e89..384e8a73f 100644
--- a/src/api2/backup/environment.rs
+++ b/src/api2/backup/environment.rs
@@ -796,7 +796,7 @@ impl BackupEnvironment {
move |worker| {
worker.log_message("Automatically verifying newly added snapshot");
- let verify_worker = VerifyWorker::new(worker.clone(), datastore);
+ let verify_worker = VerifyWorker::new(worker.clone(), datastore)?;
if !verify_worker.verify_backup_dir_with_lock(
&backup_dir,
worker.upid().clone(),
diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index 0b954ae23..a01ddcca3 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -17,7 +17,7 @@ use pbs_api_types::{
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, FileInfo};
-use pbs_datastore::{DataBlob, DataStore, StoreProgress};
+use pbs_datastore::{DataBlob, DataStore, DatastoreBackend, StoreProgress};
use crate::tools::parallel_handler::ParallelHandler;
@@ -30,19 +30,25 @@ pub struct VerifyWorker {
datastore: Arc<DataStore>,
verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+ backend: DatastoreBackend,
}
impl VerifyWorker {
/// Creates a new VerifyWorker for a given task worker and datastore.
- pub fn new(worker: Arc<dyn WorkerTaskContext>, datastore: Arc<DataStore>) -> Self {
- Self {
+ pub fn new(
+ worker: Arc<dyn WorkerTaskContext>,
+ datastore: Arc<DataStore>,
+ ) -> Result<Self, Error> {
+ let backend = datastore.backend()?;
+ Ok(Self {
worker,
datastore,
// start with 16k chunks == up to 64G data
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16 * 1024))),
// start with 64 chunks since we assume there are few corrupt ones
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
- }
+ backend,
+ })
}
fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
diff --git a/src/server/verify_job.rs b/src/server/verify_job.rs
index 95a7b2a9b..c8792174b 100644
--- a/src/server/verify_job.rs
+++ b/src/server/verify_job.rs
@@ -41,7 +41,7 @@ pub fn do_verification_job(
None => Default::default(),
};
- let verify_worker = VerifyWorker::new(worker.clone(), datastore);
+ let verify_worker = VerifyWorker::new(worker.clone(), datastore)?;
let result = verify_worker.verify_all_backups(
worker.upid(),
ns,
--
2.39.5
More information about the pbs-devel
mailing list