[pbs-devel] [PATCH proxmox-backup v2 03/12] datastore: add generics and new lookup functions
Hannes Laimer
h.laimer at proxmox.com
Mon May 26 16:14:36 CEST 2025
Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
---
pbs-datastore/src/datastore.rs | 80 +++++++++++++++++++++++++++++-----
1 file changed, 68 insertions(+), 12 deletions(-)
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index cbf78ecb..6936875e 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -8,6 +8,7 @@ use std::time::Duration;
use anyhow::{bail, format_err, Context, Error};
use nix::unistd::{unlinkat, UnlinkatFlags};
+use pbs_config::BackupLockGuard;
use pbs_tools::lru_cache::LruCache;
use tracing::{info, warn};
@@ -29,7 +30,7 @@ use pbs_api_types::{
use pbs_config::BackupLockGuard;
use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, OLD_LOCKING};
-use crate::chunk_store::ChunkStore;
+use crate::chunk_store::{CanRead, CanWrite, ChunkStore, Lookup as L, Read as R, Write as W};
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive};
@@ -37,7 +38,12 @@ use crate::index::IndexFile;
use crate::task_tracking::{self, update_active_operations};
use crate::DataBlob;
-static DATASTORE_MAP: LazyLock<Mutex<HashMap<String, Arc<DataStoreImpl>>>> =
+type DataStoreCache<T> = HashMap<String, Arc<DataStoreImpl<T>>>;
+
+static DATASTORE_MAP_READ: LazyLock<Mutex<DataStoreCache<R>>> =
+ LazyLock::new(|| Mutex::new(HashMap::new()));
+
+static DATASTORE_MAP_WRITE: LazyLock<Mutex<DataStoreCache<W>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
/// checks if auth_id is owner, or, if owner is a token, if
@@ -117,8 +123,8 @@ pub fn ensure_datastore_is_mounted(config: &DataStoreConfig) -> Result<(), Error
///
/// A Datastore can store severals backups, and provides the
/// management interface for backup.
-pub struct DataStoreImpl {
- chunk_store: Arc<ChunkStore>,
+pub struct DataStoreImpl<T> {
+ chunk_store: Arc<ChunkStore<T>>,
gc_mutex: Mutex<()>,
last_gc_status: Mutex<GarbageCollectionStatus>,
verify_new: bool,
@@ -127,12 +133,12 @@ pub struct DataStoreImpl {
sync_level: DatastoreFSyncLevel,
}
-impl DataStoreImpl {
+impl<T> DataStoreImpl<T> {
// This one just panics on everything
#[doc(hidden)]
- pub(crate) unsafe fn new_test() -> Arc<Self> {
+ pub(crate) fn new_test() -> Arc<Self> {
Arc::new(Self {
- chunk_store: Arc::new(unsafe { ChunkStore::panic_store() }),
+ chunk_store: Arc::new(ChunkStore::dummy_store()),
gc_mutex: Mutex::new(()),
last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
verify_new: false,
@@ -143,12 +149,12 @@ impl DataStoreImpl {
}
}
-pub struct DataStore {
- inner: Arc<DataStoreImpl>,
+pub struct DataStore<T> {
+ inner: Arc<DataStoreImpl<T>>,
operation: Option<Operation>,
}
-impl Clone for DataStore {
+impl<T> Clone for DataStore<T> {
fn clone(&self) -> Self {
let mut new_operation = self.operation;
if let Some(operation) = self.operation {
@@ -165,7 +171,7 @@ impl Clone for DataStore {
}
}
-impl Drop for DataStore {
+impl<T> Drop for DataStore<T> {
fn drop(&mut self) {
if let Some(operation) = self.operation {
let mut last_task = false;
@@ -188,12 +194,62 @@ impl Drop for DataStore {
});
if remove_from_cache {
- DATASTORE_MAP.lock().unwrap().remove(self.name());
+ DATASTORE_MAP_READ.lock().unwrap().remove(self.name());
+ DATASTORE_MAP_WRITE.lock().unwrap().remove(self.name());
}
}
}
}
+impl DataStore<L> {
+ pub fn lookup_datastore(name: &str) -> Result<Arc<Self>, Error> {
+ let (config, digest, _lock) = Self::read_config(name)?;
+ let chunk_store = Arc::new(ChunkStore::open_lookup(name, config.absolute_path())?);
+ let tuning: DatastoreTuning = serde_json::from_value(
+ DatastoreTuning::API_SCHEMA
+ .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
+ )?;
+ let store = DataStoreImpl {
+ chunk_store,
+ gc_mutex: Mutex::new(()),
+ last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
+ verify_new: config.verify_new.unwrap_or(false),
+ chunk_order: tuning.chunk_order.unwrap_or_default(),
+ last_digest: Some(digest),
+ sync_level: tuning.sync_level.unwrap_or_default(),
+ };
+
+ Ok(Arc::new(Self {
+ inner: Arc::new(store),
+ operation: Some(Operation::Lookup),
+ }))
+ }
+}
+
+impl DataStore<R> {
+ pub fn lookup_datastore_read(name: &str) -> Result<Arc<Self>, Error> {
+ let mut datastore_cache = DATASTORE_MAP_READ.lock().unwrap();
+ let cache_entry = datastore_cache.get(name);
+ let store = Self::open_datastore(name, Some(Operation::Read), cache_entry.cloned())?;
+ if cache_entry.is_none() {
+ datastore_cache.insert(name.to_string(), store.inner.clone());
+ }
+ Ok(store)
+ }
+}
+
+impl DataStore<W> {
+ pub fn lookup_datastore_write(name: &str) -> Result<Arc<Self>, Error> {
+ let mut datastore_cache = DATASTORE_MAP_WRITE.lock().unwrap();
+ let cache_entry = datastore_cache.get(name);
+ let store = Self::open_datastore(name, Some(Operation::Write), cache_entry.cloned())?;
+ if cache_entry.is_none() {
+ datastore_cache.insert(name.to_string(), store.inner.clone());
+ }
+ Ok(store)
+ }
+}
+
impl DataStore {
// This one just panics on everything
#[doc(hidden)]
--
2.39.5
More information about the pbs-devel
mailing list