[pbs-devel] [PATCH proxmox-backup 04/17] api/datastore: move backup log upload by implementing datastore helper
Christian Ebner
c.ebner at proxmox.com
Tue Nov 4 09:47:33 CET 2025
On 11/3/25 3:51 PM, Fabian Grünbichler wrote:
> On November 3, 2025 12:31 pm, Christian Ebner wrote:
>> In an effort to decouple the api from the datastore backend, move the
>> backup task log upload to use a new add blob helper method of the
>> datastore.
>>
>> The new helper is fully sync and called on a blocking task, thereby
>> now also solving the previously incorrectly blocking rename_file() in
>> async context.
>>
>> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
>> ---
>> pbs-datastore/src/datastore.rs | 22 ++++++++++++++++++++++
>> src/api2/admin/datastore.rs | 25 +++++++------------------
>> 2 files changed, 29 insertions(+), 18 deletions(-)
>>
>> diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
>> index 46600a88c..cc1267d78 100644
>> --- a/pbs-datastore/src/datastore.rs
>> +++ b/pbs-datastore/src/datastore.rs
>> @@ -2453,4 +2453,26 @@ impl DataStore {
>> snapshot.destroy(false, &backend)?;
>> Ok(())
>> }
>> +
>> + /// Adds the blob to the given snapshot.
>> + /// Requires the caller to hold the exclusive lock.
>> + pub fn add_blob(
>> + self: &Arc<Self>,
>> + filename: &str,
>> + snapshot: BackupDir,
>> + blob: DataBlob,
>
> should this get a backend parameter to not require instantiating a new
> one in contexts where lots of blobs might be added (backup env, sync
> job)?
Yes, good point! Will adapt this to get the backend passed in as well so
the same client and it's connection can be reused.
>
>> + ) -> Result<(), Error> {
>> + if let DatastoreBackend::S3(s3_client) = self.backend()? {
>> + let object_key = crate::s3::object_key_from_path(&snapshot.relative_path(), filename)
>> + .context("invalid client log object key")?;
>> + let data = hyper::body::Bytes::copy_from_slice(blob.raw_data());
>> + proxmox_async::runtime::block_on(s3_client.upload_replace_with_retry(object_key, data))
>> + .context("failed to upload client log to s3 backend")?;
>> + };
>> +
>> + let mut path = snapshot.full_path();
>> + path.push(filename);
>> + replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
>> + Ok(())
>> + }
>
> the backup env also has this, and should switch to this new helper:
>
> // always verify blob/CRC at server side
> let blob = DataBlob::load_from_reader(&mut &data[..])?;
>
> let raw_data = blob.raw_data();
> if let DatastoreBackend::S3(s3_client) = &self.backend {
> let object_key = pbs_datastore::s3::object_key_from_path(
> &self.backup_dir.relative_path(),
> file_name,
> )
> .context("invalid blob object key")?;
> let data = hyper::body::Bytes::copy_from_slice(raw_data);
> proxmox_async::runtime::block_on(
> s3_client.upload_replace_with_retry(object_key.clone(), data),
> )
> .context("failed to upload blob to s3 backend")?;
> self.log(format!("Uploaded blob to object store: {object_key}"))
> }
>
> replace_file(&path, raw_data, CreateOptions::new(), false)?;
True, will adapt this as well, thanks!
>
>> }
>> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
>> index 763440df9..6881b4093 100644
>> --- a/src/api2/admin/datastore.rs
>> +++ b/src/api2/admin/datastore.rs
>> @@ -28,9 +28,7 @@ use proxmox_router::{
>> use proxmox_rrd_api_types::{RrdMode, RrdTimeframe};
>> use proxmox_schema::*;
>> use proxmox_sortable_macro::sortable;
>> -use proxmox_sys::fs::{
>> - file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
>> -};
>> +use proxmox_sys::fs::{file_read_firstline, file_read_optional_string, CreateOptions};
>> use proxmox_time::CalendarEvent;
>> use proxmox_worker_task::WorkerTaskContext;
>>
>> @@ -63,7 +61,7 @@ use pbs_datastore::manifest::BackupManifest;
>> use pbs_datastore::prune::compute_prune_info;
>> use pbs_datastore::{
>> check_backup_owner, ensure_datastore_is_mounted, task_tracking, BackupDir, DataStore,
>> - DatastoreBackend, LocalChunkReader, StoreProgress,
>> + LocalChunkReader, StoreProgress,
>> };
>> use pbs_tools::json::required_string_param;
>> use proxmox_rest_server::{formatter, worker_is_active, WorkerTask};
>> @@ -1536,20 +1534,11 @@ pub fn upload_backup_log(
>> // always verify blob/CRC at server side
>> let blob = DataBlob::load_from_reader(&mut &data[..])?;
>>
>> - if let DatastoreBackend::S3(s3_client) = datastore.backend()? {
>> - let object_key = pbs_datastore::s3::object_key_from_path(
>> - &backup_dir.relative_path(),
>> - file_name.as_ref(),
>> - )
>> - .context("invalid client log object key")?;
>> - let data = hyper::body::Bytes::copy_from_slice(blob.raw_data());
>> - s3_client
>> - .upload_replace_with_retry(object_key, data)
>> - .await
>> - .context("failed to upload client log to s3 backend")?;
>> - };
>> -
>> - replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
>> + tokio::task::spawn_blocking(move || {
>> + datastore.add_blob(file_name.as_ref(), backup_dir, blob)
>> + })
>> + .await
>> + .map_err(|err| format_err!("{err:#?}"))??;
>>
>> // fixme: use correct formatter
>> Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
>> --
>> 2.47.3
>>
>>
>>
>> _______________________________________________
>> pbs-devel mailing list
>> pbs-devel at lists.proxmox.com
>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>>
>>
>>
>
>
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>
>
More information about the pbs-devel
mailing list