[pbs-devel] [PATCH proxmox 2/3] s3-client: dynamically calculate put request timeout based on payload
Christian Ebner
c.ebner at proxmox.com
Wed Aug 20 11:58:04 CEST 2025
The request-response time for uploading objects to the S3 API via
put_object call depends on the size of the payload to be transferred.
The current default of 60 seconds might not be enough to upload
chunks or blobs in case of low available upload bandwidth.
Blobs are currently the largest objects which can be uploaded
to the PBS API, reaching up to 128 MiB [0]. Other files such as
notes and index files are typically smaller.
To allow uploads over low upload bandwidth connections to S3 as well,
expose the optional timeout for the put object method and calculate
it dynamically, based on the size of the to be uploaded request
payload assuming a minimum average upload rate of 1KiB/s. However,
keep the default value as minimum timeout.
[0] https://git.proxmox.com/?p=proxmox-backup.git;a=blob;f=pbs-datastore/src/data_blob.rs;h=0c05c5a40ae67d4a0d7847817102f30de1df3933;hb=HEAD#l13
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
proxmox-s3-client/examples/s3_client.rs | 3 ++-
proxmox-s3-client/src/client.rs | 15 +++++++++++++--
2 files changed, 15 insertions(+), 3 deletions(-)
diff --git a/proxmox-s3-client/examples/s3_client.rs b/proxmox-s3-client/examples/s3_client.rs
index 61b88077..67baf467 100644
--- a/proxmox-s3-client/examples/s3_client.rs
+++ b/proxmox-s3-client/examples/s3_client.rs
@@ -50,8 +50,9 @@ async fn run() -> Result<(), anyhow::Error> {
let rel_object_key = S3ObjectKey::try_from("object.txt")?;
let body = proxmox_http::Body::empty();
let replace_existing_key = true;
+ let request_timeout = Some(std::time::Duration::from_secs(60));
let _response = s3_client
- .put_object(rel_object_key, body, replace_existing_key)
+ .put_object(rel_object_key, body, request_timeout, replace_existing_key)
.await?;
// List object, limiting to ones matching the given prefix. Since the api limits the response
diff --git a/proxmox-s3-client/src/client.rs b/proxmox-s3-client/src/client.rs
index 2a48240e..8a551f3a 100644
--- a/proxmox-s3-client/src/client.rs
+++ b/proxmox-s3-client/src/client.rs
@@ -35,6 +35,8 @@ const S3_HTTP_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
const S3_HTTP_REQUEST_TIMEOUT: Duration = Duration::from_secs(60);
const S3_TCP_KEEPALIVE_TIME: u32 = 120;
const MAX_S3_UPLOAD_RETRY: usize = 3;
+// Assumed minimum upload rate of 1 KiB/s for dynamic put object request timeout calculation.
+const S3_MIN_ASSUMED_UPLOAD_RATE: u64 = 1024;
/// S3 object key path prefix without the context prefix as defined by the client options.
///
@@ -413,6 +415,7 @@ impl S3Client {
&self,
object_key: S3ObjectKey,
object_data: Body,
+ timeout: Option<Duration>,
replace: bool,
) -> Result<PutObjectResponse, Error> {
let object_key = object_key.to_full_key(&self.options.common_prefix);
@@ -435,7 +438,7 @@ impl S3Client {
let request = request.body(object_data)?;
- let response = self.send(request, Some(S3_HTTP_REQUEST_TIMEOUT)).await?;
+ let response = self.send(request, timeout).await?;
let response_reader = ResponseReader::new(response);
response_reader.put_object_response().await
}
@@ -664,9 +667,17 @@ impl S3Client {
object_data: Bytes,
replace: bool,
) -> Result<bool, Error> {
+ let content_size = object_data.len() as u64;
+ let timeout_secs = content_size
+ .div_ceil(S3_MIN_ASSUMED_UPLOAD_RATE)
+ .max(S3_HTTP_REQUEST_TIMEOUT.as_secs());
+ let timeout = Some(Duration::from_secs(timeout_secs));
for retry in 0..MAX_S3_UPLOAD_RETRY {
let body = Body::from(object_data.clone());
- match self.put_object(object_key.clone(), body, replace).await {
+ match self
+ .put_object(object_key.clone(), body, timeout, replace)
+ .await
+ {
Ok(PutObjectResponse::Success(_response_body)) => return Ok(false),
Ok(PutObjectResponse::PreconditionFailed) => return Ok(true),
Ok(PutObjectResponse::NeedsRetry) => {
--
2.47.2
More information about the pbs-devel
mailing list