[pbs-devel] [PATCH proxmox-backup v3 07/23] api/datastore: move s3 index upload helper to datastore backend

Fabian Grünbichler f.gruenbichler at proxmox.com
Thu Nov 6 10:37:51 CET 2025


On November 5, 2025 1:22 pm, Christian Ebner wrote:
> In an effort to decouple the api implementation from the backend
> implementation and deduplicate code.
> 
> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
> ---
> changes since version 2:
> - no changes
> 
>  pbs-datastore/src/datastore.rs | 26 ++++++++++++++++++++++++++
>  src/api2/backup/environment.rs | 32 ++++++++++----------------------
>  src/server/pull.rs             | 14 ++------------
>  3 files changed, 38 insertions(+), 34 deletions(-)
> 
> diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
> index 0d738f0ac..343f49f36 100644
> --- a/pbs-datastore/src/datastore.rs
> +++ b/pbs-datastore/src/datastore.rs
> @@ -223,6 +223,32 @@ pub enum DatastoreBackend {
>      S3(Arc<S3Client>),
>  }
>  
> +impl DatastoreBackend {
> +    /// Reads the index file and uploads it to the S3 backend.
> +    ///
> +    /// Returns with error if the backend variant is not S3.
> +    pub async fn s3_upload_index(&self, backup_dir: &BackupDir, name: &str) -> Result<(), Error> {

the interface here would be nicer/more ergonomic if this would just be

datastore.upload_index_to_backend(backend, backup_dir, name) -> Result<bool, Error>

and implemented as NOP for filesystem backends.. but I suspect we'd need
to change this once more if we integrate the backend more directly into
the datastore in the next series?

> +        match self {
> +            Self::Filesystem => bail!("datastore backend not of type S3"),
> +            Self::S3(s3_client) => {
> +                let object_key = crate::s3::object_key_from_path(&backup_dir.relative_path(), name)
> +                    .context("invalid index file object key")?;
> +
> +                let mut full_path = backup_dir.full_path();
> +                full_path.push(name);
> +                let data = tokio::fs::read(&full_path)
> +                    .await
> +                    .context("failed to read index contents")?;
> +                let contents = hyper::body::Bytes::from(data);
> +                let _is_duplicate = s3_client
> +                    .upload_replace_with_retry(object_key, contents)
> +                    .await?;
> +                Ok(())
> +            }
> +        }
> +    }
> +}
> +
>  impl DataStore {
>      // This one just panics on everything
>      #[doc(hidden)]
> diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
> index 0faf6c8e0..f87d5a89e 100644
> --- a/src/api2/backup/environment.rs
> +++ b/src/api2/backup/environment.rs
> @@ -18,7 +18,6 @@ use pbs_datastore::dynamic_index::DynamicIndexWriter;
>  use pbs_datastore::fixed_index::FixedIndexWriter;
>  use pbs_datastore::{DataBlob, DataStore, DatastoreBackend};
>  use proxmox_rest_server::{formatter::*, WorkerTask};
> -use proxmox_s3_client::S3Client;
>  
>  use crate::backup::VerifyWorker;
>  
> @@ -560,9 +559,11 @@ impl BackupEnvironment {
>          drop(state);
>  
>          // For S3 backends, upload the index file to the object store after closing
> -        if let DatastoreBackend::S3(s3_client) = &self.backend {
> -            self.s3_upload_index(s3_client, &writer_name)
> -                .context("failed to upload dynamic index to s3 backend")?;
> +        if let DatastoreBackend::S3(_s3_client) = &self.backend {
> +            proxmox_async::runtime::block_on(
> +                self.backend.s3_upload_index(&self.backup_dir, &writer_name),
> +            )
> +            .context("failed to upload dynamic index to s3 backend")?;
>              self.log(format!(
>                  "Uploaded dynamic index file to s3 backend: {writer_name}"
>              ))
> @@ -659,9 +660,11 @@ impl BackupEnvironment {
>          drop(state);
>  
>          // For S3 backends, upload the index file to the object store after closing
> -        if let DatastoreBackend::S3(s3_client) = &self.backend {
> -            self.s3_upload_index(s3_client, &writer_name)
> -                .context("failed to upload fixed index to s3 backend")?;
> +        if let DatastoreBackend::S3(_s3_client) = &self.backend {
> +            proxmox_async::runtime::block_on(
> +                self.backend.s3_upload_index(&self.backup_dir, &writer_name),
> +            )
> +            .context("failed to upload fixed index to s3 backend")?;
>              self.log(format!(
>                  "Uploaded fixed index file to object store: {writer_name}"
>              ))
> @@ -842,21 +845,6 @@ impl BackupEnvironment {
>          let state = self.state.lock().unwrap();
>          state.finished == BackupState::Finished
>      }
> -
> -    fn s3_upload_index(&self, s3_client: &S3Client, name: &str) -> Result<(), Error> {
> -        let object_key =
> -            pbs_datastore::s3::object_key_from_path(&self.backup_dir.relative_path(), name)
> -                .context("invalid index file object key")?;
> -
> -        let mut full_path = self.backup_dir.full_path();
> -        full_path.push(name);
> -        let data = std::fs::read(&full_path).context("failed to read index contents")?;
> -        let contents = hyper::body::Bytes::from(data);
> -        proxmox_async::runtime::block_on(
> -            s3_client.upload_replace_with_retry(object_key, contents),
> -        )?;
> -        Ok(())
> -    }
>  }
>  
>  impl RpcEnvironment for BackupEnvironment {
> diff --git a/src/server/pull.rs b/src/server/pull.rs
> index 2dcadf972..94b2fbf55 100644
> --- a/src/server/pull.rs
> +++ b/src/server/pull.rs
> @@ -359,19 +359,9 @@ async fn pull_single_archive<'a>(
>      if let Err(err) = std::fs::rename(&tmp_path, &path) {
>          bail!("Atomic rename file {:?} failed - {}", path, err);
>      }
> -    if let DatastoreBackend::S3(s3_client) = backend {
> -        let object_key =
> -            pbs_datastore::s3::object_key_from_path(&snapshot.relative_path(), archive_name)
> -                .context("invalid archive object key")?;
>  
> -        let data = tokio::fs::read(&path)
> -            .await
> -            .context("failed to read archive contents")?;
> -        let contents = hyper::body::Bytes::from(data);
> -        let _is_duplicate = s3_client
> -            .upload_replace_with_retry(object_key, contents)
> -            .await?;
> -    }
> +    backend.s3_upload_index(snapshot, archive_name).await?;
> +
>      Ok(sync_stats)
>  }
>  
> -- 
> 2.47.3
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 
> 
> 




More information about the pbs-devel mailing list