[pbs-devel] [PATCH proxmox-backup 04/17] api/datastore: move backup log upload by implementing datastore helper
Fabian Grünbichler
f.gruenbichler at proxmox.com
Mon Nov 3 15:51:13 CET 2025
On November 3, 2025 12:31 pm, Christian Ebner wrote:
> In an effort to decouple the api from the datastore backend, move the
> backup task log upload to use a new add blob helper method of the
> datastore.
>
> The new helper is fully sync and called on a blocking task, thereby
> now also solving the previously incorrectly blocking rename_file() in
> async context.
>
> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
> ---
> pbs-datastore/src/datastore.rs | 22 ++++++++++++++++++++++
> src/api2/admin/datastore.rs | 25 +++++++------------------
> 2 files changed, 29 insertions(+), 18 deletions(-)
>
> diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
> index 46600a88c..cc1267d78 100644
> --- a/pbs-datastore/src/datastore.rs
> +++ b/pbs-datastore/src/datastore.rs
> @@ -2453,4 +2453,26 @@ impl DataStore {
> snapshot.destroy(false, &backend)?;
> Ok(())
> }
> +
> + /// Adds the blob to the given snapshot.
> + /// Requires the caller to hold the exclusive lock.
> + pub fn add_blob(
> + self: &Arc<Self>,
> + filename: &str,
> + snapshot: BackupDir,
> + blob: DataBlob,
should this get a backend parameter to not require instantiating a new
one in contexts where lots of blobs might be added (backup env, sync
job)?
> + ) -> Result<(), Error> {
> + if let DatastoreBackend::S3(s3_client) = self.backend()? {
> + let object_key = crate::s3::object_key_from_path(&snapshot.relative_path(), filename)
> + .context("invalid client log object key")?;
> + let data = hyper::body::Bytes::copy_from_slice(blob.raw_data());
> + proxmox_async::runtime::block_on(s3_client.upload_replace_with_retry(object_key, data))
> + .context("failed to upload client log to s3 backend")?;
> + };
> +
> + let mut path = snapshot.full_path();
> + path.push(filename);
> + replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
> + Ok(())
> + }
the backup env also has this, and should switch to this new helper:
// always verify blob/CRC at server side
let blob = DataBlob::load_from_reader(&mut &data[..])?;
let raw_data = blob.raw_data();
if let DatastoreBackend::S3(s3_client) = &self.backend {
let object_key = pbs_datastore::s3::object_key_from_path(
&self.backup_dir.relative_path(),
file_name,
)
.context("invalid blob object key")?;
let data = hyper::body::Bytes::copy_from_slice(raw_data);
proxmox_async::runtime::block_on(
s3_client.upload_replace_with_retry(object_key.clone(), data),
)
.context("failed to upload blob to s3 backend")?;
self.log(format!("Uploaded blob to object store: {object_key}"))
}
replace_file(&path, raw_data, CreateOptions::new(), false)?;
> }
> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
> index 763440df9..6881b4093 100644
> --- a/src/api2/admin/datastore.rs
> +++ b/src/api2/admin/datastore.rs
> @@ -28,9 +28,7 @@ use proxmox_router::{
> use proxmox_rrd_api_types::{RrdMode, RrdTimeframe};
> use proxmox_schema::*;
> use proxmox_sortable_macro::sortable;
> -use proxmox_sys::fs::{
> - file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
> -};
> +use proxmox_sys::fs::{file_read_firstline, file_read_optional_string, CreateOptions};
> use proxmox_time::CalendarEvent;
> use proxmox_worker_task::WorkerTaskContext;
>
> @@ -63,7 +61,7 @@ use pbs_datastore::manifest::BackupManifest;
> use pbs_datastore::prune::compute_prune_info;
> use pbs_datastore::{
> check_backup_owner, ensure_datastore_is_mounted, task_tracking, BackupDir, DataStore,
> - DatastoreBackend, LocalChunkReader, StoreProgress,
> + LocalChunkReader, StoreProgress,
> };
> use pbs_tools::json::required_string_param;
> use proxmox_rest_server::{formatter, worker_is_active, WorkerTask};
> @@ -1536,20 +1534,11 @@ pub fn upload_backup_log(
> // always verify blob/CRC at server side
> let blob = DataBlob::load_from_reader(&mut &data[..])?;
>
> - if let DatastoreBackend::S3(s3_client) = datastore.backend()? {
> - let object_key = pbs_datastore::s3::object_key_from_path(
> - &backup_dir.relative_path(),
> - file_name.as_ref(),
> - )
> - .context("invalid client log object key")?;
> - let data = hyper::body::Bytes::copy_from_slice(blob.raw_data());
> - s3_client
> - .upload_replace_with_retry(object_key, data)
> - .await
> - .context("failed to upload client log to s3 backend")?;
> - };
> -
> - replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
> + tokio::task::spawn_blocking(move || {
> + datastore.add_blob(file_name.as_ref(), backup_dir, blob)
> + })
> + .await
> + .map_err(|err| format_err!("{err:#?}"))??;
>
> // fixme: use correct formatter
> Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
> --
> 2.47.3
>
>
>
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>
>
>
More information about the pbs-devel
mailing list