[pbs-devel] [PATCH proxmox-backup v10 06/46] datastore: allow to get the backend for a datastore
Christian Ebner
c.ebner at proxmox.com
Mon Jul 21 18:44:27 CEST 2025
Implements an enum with variants Filesystem and S3 to distinguish
between available backends. Filesystem will be used as default, if no
backend is configured in the datastores configuration. If the
datastore has an s3 backend configured, the backend method will
instantiate and s3 client and return it with the S3 variant.
This allows to instantiate the client once, keeping and reusing the
same open connection to the api for the lifetime of task or job, e.g.
in the backup writer/readers runtime environment.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
changes since version 9:
- no changes
pbs-datastore/src/datastore.rs | 58 ++++++++++++++++++++++++++++++++--
pbs-datastore/src/lib.rs | 1 +
2 files changed, 57 insertions(+), 2 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index d663465e2..bc829c5b8 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -12,6 +12,7 @@ use pbs_tools::lru_cache::LruCache;
use tracing::{info, warn};
use proxmox_human_byte::HumanByte;
+use proxmox_s3_client::{S3Client, S3ClientConfig, S3ClientOptions, S3ClientSecretsConfig};
use proxmox_schema::ApiType;
use proxmox_sys::error::SysError;
@@ -23,9 +24,11 @@ use proxmox_worker_task::WorkerTaskContext;
use pbs_api_types::{
ArchiveType, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, ChunkOrder,
- DataStoreConfig, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionCacheStats,
- GarbageCollectionStatus, MaintenanceMode, MaintenanceType, Operation, UPID,
+ DataStoreConfig, DatastoreBackendConfig, DatastoreBackendType, DatastoreFSyncLevel,
+ DatastoreTuning, GarbageCollectionCacheStats, GarbageCollectionStatus, MaintenanceMode,
+ MaintenanceType, Operation, UPID,
};
+use pbs_config::s3::{S3_CFG_TYPE_ID, S3_SECRETS_CFG_TYPE_ID};
use pbs_config::BackupLockGuard;
use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, OLD_LOCKING};
@@ -127,6 +130,7 @@ pub struct DataStoreImpl {
chunk_order: ChunkOrder,
last_digest: Option<[u8; 32]>,
sync_level: DatastoreFSyncLevel,
+ backend_config: DatastoreBackendConfig,
}
impl DataStoreImpl {
@@ -141,6 +145,7 @@ impl DataStoreImpl {
chunk_order: Default::default(),
last_digest: None,
sync_level: Default::default(),
+ backend_config: Default::default(),
})
}
}
@@ -196,6 +201,15 @@ impl Drop for DataStore {
}
}
+#[derive(Clone)]
+/// Storage backend type for a datastore.
+pub enum DatastoreBackend {
+ /// Storage is located on local filesystem.
+ Filesystem,
+ /// Storage is located on S3 compatible object store.
+ S3(Arc<S3Client>),
+}
+
impl DataStore {
// This one just panics on everything
#[doc(hidden)]
@@ -206,6 +220,40 @@ impl DataStore {
})
}
+ /// Get the backend for this datastore based on it's configuration
+ pub fn backend(&self) -> Result<DatastoreBackend, Error> {
+ let backend_type = match self.inner.backend_config.ty.unwrap_or_default() {
+ DatastoreBackendType::Filesystem => DatastoreBackend::Filesystem,
+ DatastoreBackendType::S3 => {
+ let s3_client_id = self
+ .inner
+ .backend_config
+ .client
+ .as_ref()
+ .ok_or_else(|| format_err!("missing client for s3 backend"))?;
+ let bucket = self
+ .inner
+ .backend_config
+ .bucket
+ .clone()
+ .ok_or_else(|| format_err!("missing bucket for s3 backend"))?;
+
+ let (config, _config_digest) = pbs_config::s3::config()?;
+ let (secrets, _secrets_digest) = pbs_config::s3::secrets_config()?;
+ let config: S3ClientConfig = config.lookup(S3_CFG_TYPE_ID, s3_client_id)?;
+ let secrets: S3ClientSecretsConfig =
+ secrets.lookup(S3_SECRETS_CFG_TYPE_ID, s3_client_id)?;
+
+ let options =
+ S3ClientOptions::from_config(config, secrets, bucket, self.name().to_owned());
+ let s3_client = S3Client::new(options)?;
+ DatastoreBackend::S3(Arc::new(s3_client))
+ }
+ };
+
+ Ok(backend_type)
+ }
+
pub fn lookup_datastore(
name: &str,
operation: Option<Operation>,
@@ -383,6 +431,11 @@ impl DataStore {
.parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
)?;
+ let backend_config: DatastoreBackendConfig = serde_json::from_value(
+ DatastoreBackendConfig::API_SCHEMA
+ .parse_property_string(config.backend.as_deref().unwrap_or(""))?,
+ )?;
+
Ok(DataStoreImpl {
chunk_store,
gc_mutex: Mutex::new(()),
@@ -391,6 +444,7 @@ impl DataStore {
chunk_order: tuning.chunk_order.unwrap_or_default(),
last_digest,
sync_level: tuning.sync_level.unwrap_or_default(),
+ backend_config,
})
}
diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs
index ffd0d91b2..ca6fdb7d8 100644
--- a/pbs-datastore/src/lib.rs
+++ b/pbs-datastore/src/lib.rs
@@ -204,6 +204,7 @@ pub use store_progress::StoreProgress;
mod datastore;
pub use datastore::{
check_backup_owner, ensure_datastore_is_mounted, get_datastore_mount_status, DataStore,
+ DatastoreBackend,
};
mod hierarchy;
--
2.47.2
More information about the pbs-devel
mailing list