[pbs-devel] [PATCH proxmox-backup v4 02/14] api/datastore: move s3 index upload helper to datastore backend
Christian Ebner
c.ebner at proxmox.com
Mon Nov 10 12:56:15 CET 2025
In an effort to decouple the api implementation from the backend
implementation and deduplicate code. Return a boolean flag to
distigush between successful uploads and no actions required
(filesystem backends only).
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
pbs-datastore/src/datastore.rs | 30 ++++++++++++++++++++++++++++
src/api2/backup/environment.rs | 36 ++++++++++++----------------------
src/server/pull.rs | 16 ++++-----------
3 files changed, 47 insertions(+), 35 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 70af94d8f..d66e68332 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -224,6 +224,36 @@ pub enum DatastoreBackend {
S3(Arc<S3Client>),
}
+impl DatastoreBackend {
+ /// Reads the index file and uploads it to the backend.
+ ///
+ /// Returns with true if the backend was updated, false if no action needed to be performed
+ pub async fn upload_index_to_backend(
+ &self,
+ backup_dir: &BackupDir,
+ name: &str,
+ ) -> Result<bool, Error> {
+ match self {
+ Self::Filesystem => Ok(false),
+ Self::S3(s3_client) => {
+ let object_key = crate::s3::object_key_from_path(&backup_dir.relative_path(), name)
+ .context("invalid index file object key")?;
+
+ let mut full_path = backup_dir.full_path();
+ full_path.push(name);
+ let data = tokio::fs::read(&full_path)
+ .await
+ .context("failed to read index contents")?;
+ let contents = hyper::body::Bytes::from(data);
+ let _is_duplicate = s3_client
+ .upload_replace_with_retry(object_key, contents)
+ .await?;
+ Ok(true)
+ }
+ }
+ }
+}
+
impl DataStore {
// This one just panics on everything
#[doc(hidden)]
diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
index 0faf6c8e0..1b8e0e1db 100644
--- a/src/api2/backup/environment.rs
+++ b/src/api2/backup/environment.rs
@@ -18,7 +18,6 @@ use pbs_datastore::dynamic_index::DynamicIndexWriter;
use pbs_datastore::fixed_index::FixedIndexWriter;
use pbs_datastore::{DataBlob, DataStore, DatastoreBackend};
use proxmox_rest_server::{formatter::*, WorkerTask};
-use proxmox_s3_client::S3Client;
use crate::backup::VerifyWorker;
@@ -560,11 +559,14 @@ impl BackupEnvironment {
drop(state);
// For S3 backends, upload the index file to the object store after closing
- if let DatastoreBackend::S3(s3_client) = &self.backend {
- self.s3_upload_index(s3_client, &writer_name)
- .context("failed to upload dynamic index to s3 backend")?;
+ if proxmox_async::runtime::block_on(
+ self.backend
+ .upload_index_to_backend(&self.backup_dir, &writer_name),
+ )
+ .context("failed to upload dynamic index to backend")?
+ {
self.log(format!(
- "Uploaded dynamic index file to s3 backend: {writer_name}"
+ "Uploaded dynamic index file to backend: {writer_name}"
))
}
@@ -659,9 +661,12 @@ impl BackupEnvironment {
drop(state);
// For S3 backends, upload the index file to the object store after closing
- if let DatastoreBackend::S3(s3_client) = &self.backend {
- self.s3_upload_index(s3_client, &writer_name)
- .context("failed to upload fixed index to s3 backend")?;
+ if proxmox_async::runtime::block_on(
+ self.backend
+ .upload_index_to_backend(&self.backup_dir, &writer_name),
+ )
+ .context("failed to upload fixed index to backend")?
+ {
self.log(format!(
"Uploaded fixed index file to object store: {writer_name}"
))
@@ -842,21 +847,6 @@ impl BackupEnvironment {
let state = self.state.lock().unwrap();
state.finished == BackupState::Finished
}
-
- fn s3_upload_index(&self, s3_client: &S3Client, name: &str) -> Result<(), Error> {
- let object_key =
- pbs_datastore::s3::object_key_from_path(&self.backup_dir.relative_path(), name)
- .context("invalid index file object key")?;
-
- let mut full_path = self.backup_dir.full_path();
- full_path.push(name);
- let data = std::fs::read(&full_path).context("failed to read index contents")?;
- let contents = hyper::body::Bytes::from(data);
- proxmox_async::runtime::block_on(
- s3_client.upload_replace_with_retry(object_key, contents),
- )?;
- Ok(())
- }
}
impl RpcEnvironment for BackupEnvironment {
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 92513fe70..ba79704cd 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -342,19 +342,11 @@ async fn pull_single_archive<'a>(
if let Err(err) = std::fs::rename(&tmp_path, &path) {
bail!("Atomic rename file {:?} failed - {}", path, err);
}
- if let DatastoreBackend::S3(s3_client) = backend {
- let object_key =
- pbs_datastore::s3::object_key_from_path(&snapshot.relative_path(), archive_name)
- .context("invalid archive object key")?;
- let data = tokio::fs::read(&path)
- .await
- .context("failed to read archive contents")?;
- let contents = hyper::body::Bytes::from(data);
- let _is_duplicate = s3_client
- .upload_replace_with_retry(object_key, contents)
- .await?;
- }
+ backend
+ .upload_index_to_backend(snapshot, archive_name)
+ .await?;
+
Ok(sync_stats)
}
--
2.47.3
More information about the pbs-devel
mailing list