[pbs-devel] [PATCH proxmox-backup v8 04/45] api: datastore: check s3 backend bucket access on datastore create
Christian Ebner
c.ebner at proxmox.com
Fri Jul 18 10:55:50 CEST 2025
On 7/18/25 9:40 AM, Lukas Wagner wrote:
> With the two string constants moved:
>
> Reviewed-by: Lukas Wagner <l.wagner at proxmox.com>
>
>
> On 2025-07-15 14:52, Christian Ebner wrote:
>> Check if the configured S3 object store backend can be reached and
>> the provided secrets have the permissions to access the bucket.
>>
>> Perform the check before creating the chunk store, so it is not left
>> behind if the bucket cannot be reached.
>>
>> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
>> ---
>> changes since version 7:
>> - no changes
>>
>> Cargo.toml | 2 +-
>> src/api2/config/datastore.rs | 48 ++++++++++++++++++++++++++++++++----
>> 2 files changed, 44 insertions(+), 6 deletions(-)
>>
>> diff --git a/Cargo.toml b/Cargo.toml
>> index c7a77060e..a5954635a 100644
>> --- a/Cargo.toml
>> +++ b/Cargo.toml
>> @@ -77,7 +77,7 @@ proxmox-rest-server = { version = "1", features = [ "templates" ] }
>> proxmox-router = { version = "3.2.2", default-features = false }
>> proxmox-rrd = "1"
>> proxmox-rrd-api-types = "1.0.2"
>> -proxmox-s3-client = "1.0.0"
>> +proxmox-s3-client = { version = "1.0.0", features = [ "impl" ] }
>> # everything but pbs-config and pbs-client use "api-macro"
>> proxmox-schema = "4"
>> proxmox-section-config = "3"
>> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
>> index b133be707..0fb822c79 100644
>> --- a/src/api2/config/datastore.rs
>> +++ b/src/api2/config/datastore.rs
>> @@ -1,21 +1,22 @@
>> use std::path::{Path, PathBuf};
>>
>> use ::serde::{Deserialize, Serialize};
>> -use anyhow::{bail, Context, Error};
>> +use anyhow::{bail, format_err, Context, Error};
>> use hex::FromHex;
>> use serde_json::Value;
>> use tracing::{info, warn};
>>
>> use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
>> +use proxmox_s3_client::{S3Client, S3ClientConfig, S3ClientOptions, S3ClientSecretsConfig};
>> use proxmox_schema::{api, param_bail, ApiType};
>> use proxmox_section_config::SectionConfigData;
>> use proxmox_uuid::Uuid;
>>
>> use pbs_api_types::{
>> - Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
>> - MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE,
>> - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
>> - UPID_SCHEMA,
>> + Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreBackendConfig, DatastoreBackendType,
>> + DatastoreNotify, DatastoreTuning, KeepOptions, MaintenanceMode, PruneJobConfig,
>> + PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT,
>> + PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
>> };
>> use pbs_config::BackupLockGuard;
>> use pbs_datastore::chunk_store::ChunkStore;
>> @@ -116,6 +117,43 @@ pub(crate) fn do_create_datastore(
>> .parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?,
>> )?;
>>
>> + if let Some(ref backend_config) = datastore.backend {
>> + let backend_config: DatastoreBackendConfig = backend_config.parse()?;
>> + match backend_config.ty.unwrap_or_default() {
>> + DatastoreBackendType::Filesystem => (),
>> + DatastoreBackendType::S3 => {
>> + let s3_client_id = backend_config
>> + .client
>> + .as_ref()
>> + .ok_or_else(|| format_err!("missing required client"))?;
>> + let bucket = backend_config
>> + .bucket
>> + .clone()
>> + .ok_or_else(|| format_err!("missing required bucket"))?;
>> + let (config, _config_digest) =
>> + pbs_config::s3::config().context("failed to get s3 config")?;
>> + let (secrets, _secrets_digest) =
>> + pbs_config::s3::secrets_config().context("failed to get s3 secrets")?;
>> + let config: S3ClientConfig = config
>> + .lookup("s3client", s3_client_id)
>> + .with_context(|| format!("no '{s3_client_id}' in config"))?;
>> + let secrets: S3ClientSecretsConfig = secrets
>> + .lookup("s3secrets", s3_client_id)
>> + .with_context(|| format!("no '{s3_client_id}' in secrets"))?;
>
> The "s3client" and "s3secrets" section type strings should be `pub const` where the the config parser is defined.
We do not do that for other configs consistently as well, but I do agree
that this makes sense and is best placed as constants next to the s3
config related code, so defininig it there.
>> + let options = S3ClientOptions::from_config(
>> + config,
>> + secrets,
>> + bucket,
>> + datastore.name.to_owned(),
>> + );
>> + let s3_client = S3Client::new(options).context("failed to create s3 client")?;
>> + // Fine to block since this runs in worker task
>> + proxmox_async::runtime::block_on(s3_client.head_bucket())
>> + .context("failed to access bucket")?;
>
> I wonder whether we should add some kind of retry logic not only here, but also for anywhere else
> where we interact with S3. Might of course be easier to implement that right in the s3 client crate.
> Also, no need to add this right away, just some idea for future improvements.
I would refrain from adding a retry logic everywhere, while this could
help circumvent some inter-mitten failures, it will cause additional
requests which we certainly do not want to have and might not help to
debug possible issues.
Also, the retry is limited to put requests, where the API itself might
tell that a retry is required.
quote:
If a conflicting operation occurs during the upload S3 returns a 409
ConditionalRequestConflict response. On a 409 failure you should fetch
the object's ETag and retry the upload.
see: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
But I did take a note to have a look at this again.
>
>> + }
>> + }
>> + }
>> +
>> let unmount_guard = if datastore.backing_device.is_some() {
>> do_mount_device(datastore.clone())?;
>> UnmountGuard::new(Some(path.clone()))
>
More information about the pbs-devel
mailing list