[pbs-devel] [PATCH proxmox-backup v2 04/19] api/datastore: move backup log upload by implementing datastore helper
Christian Ebner
c.ebner at proxmox.com
Tue Nov 4 14:06:44 CET 2025
In an effort to decouple the api from the datastore backend, move the
backup task log upload to use a new add blob helper method of the
datastore. This methods gets the backend as parameter for cases where
it outlives the call, e.g. during a backup session or sync session.
The new helper is fully sync and called on a blocking task, thereby
now also solving the previously incorrectly blocking rename_file() in
async context.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
Changes since version 1:
- allow to pass in the backend instance, so it can be reused where
possible
pbs-datastore/src/datastore.rs | 23 +++++++++++++++++++++++
src/api2/admin/datastore.rs | 28 ++++++++++------------------
2 files changed, 33 insertions(+), 18 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 46600a88c..277489f5f 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -2453,4 +2453,27 @@ impl DataStore {
snapshot.destroy(false, &backend)?;
Ok(())
}
+
+ /// Adds the blob to the given snapshot.
+ /// Requires the caller to hold the exclusive lock.
+ pub fn add_blob(
+ self: &Arc<Self>,
+ filename: &str,
+ snapshot: BackupDir,
+ blob: DataBlob,
+ backend: &DatastoreBackend,
+ ) -> Result<(), Error> {
+ if let DatastoreBackend::S3(s3_client) = backend {
+ let object_key = crate::s3::object_key_from_path(&snapshot.relative_path(), filename)
+ .context("invalid blob object key")?;
+ let data = hyper::body::Bytes::copy_from_slice(blob.raw_data());
+ proxmox_async::runtime::block_on(s3_client.upload_replace_with_retry(object_key, data))
+ .context("failed to upload blob to s3 backend")?;
+ };
+
+ let mut path = snapshot.full_path();
+ path.push(filename);
+ replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
+ Ok(())
+ }
}
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 763440df9..b54ea9a04 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -28,9 +28,7 @@ use proxmox_router::{
use proxmox_rrd_api_types::{RrdMode, RrdTimeframe};
use proxmox_schema::*;
use proxmox_sortable_macro::sortable;
-use proxmox_sys::fs::{
- file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
-};
+use proxmox_sys::fs::{file_read_firstline, file_read_optional_string, CreateOptions};
use proxmox_time::CalendarEvent;
use proxmox_worker_task::WorkerTaskContext;
@@ -63,7 +61,7 @@ use pbs_datastore::manifest::BackupManifest;
use pbs_datastore::prune::compute_prune_info;
use pbs_datastore::{
check_backup_owner, ensure_datastore_is_mounted, task_tracking, BackupDir, DataStore,
- DatastoreBackend, LocalChunkReader, StoreProgress,
+ LocalChunkReader, StoreProgress,
};
use pbs_tools::json::required_string_param;
use proxmox_rest_server::{formatter, worker_is_active, WorkerTask};
@@ -1536,20 +1534,14 @@ pub fn upload_backup_log(
// always verify blob/CRC at server side
let blob = DataBlob::load_from_reader(&mut &data[..])?;
- if let DatastoreBackend::S3(s3_client) = datastore.backend()? {
- let object_key = pbs_datastore::s3::object_key_from_path(
- &backup_dir.relative_path(),
- file_name.as_ref(),
- )
- .context("invalid client log object key")?;
- let data = hyper::body::Bytes::copy_from_slice(blob.raw_data());
- s3_client
- .upload_replace_with_retry(object_key, data)
- .await
- .context("failed to upload client log to s3 backend")?;
- };
-
- replace_file(&path, blob.raw_data(), CreateOptions::new(), false)?;
+ tokio::task::spawn_blocking(move || {
+ let backend = datastore
+ .backend()
+ .context("failed to get datastore backend")?;
+ datastore.add_blob(file_name.as_ref(), backup_dir, blob, &backend)
+ })
+ .await
+ .map_err(|err| format_err!("{err:#?}"))??;
// fixme: use correct formatter
Ok(formatter::JSON_FORMATTER.format_data(Value::Null, &*rpcenv))
--
2.47.3
More information about the pbs-devel
mailing list