[pbs-devel] [PATCH proxmox-backup RFC 03/10] datastore: add generics and new lookup functions

Hannes Laimer h.laimer at proxmox.com
Tue Sep 3 14:33:54 CEST 2024


Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
---
 pbs-datastore/src/datastore.rs | 83 +++++++++++++++++++++++++++++-----
 1 file changed, 71 insertions(+), 12 deletions(-)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index d0f3c53a..be7767ff 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -6,6 +6,7 @@ use std::sync::{Arc, LazyLock, Mutex};
 
 use anyhow::{bail, format_err, Error};
 use nix::unistd::{unlinkat, UnlinkatFlags};
+use pbs_config::BackupLockGuard;
 use tracing::{info, warn};
 
 use proxmox_human_byte::HumanByte;
@@ -23,7 +24,9 @@ use pbs_api_types::{
 };
 
 use crate::backup_info::{BackupDir, BackupGroup, BackupGroupDeleteStats};
-use crate::chunk_store::ChunkStore;
+use crate::chunk_store::{
+    CanRead, CanWrite, ChunkStore, Lookup, Read as ReadStore, Write as WriteStore,
+};
 use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
 use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
 use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive};
@@ -32,7 +35,12 @@ use crate::manifest::ArchiveType;
 use crate::task_tracking::{self, update_active_operations};
 use crate::DataBlob;
 
-static DATASTORE_MAP: LazyLock<Mutex<HashMap<String, Arc<DataStoreImpl>>>> =
+type DataStoreCache<T> = HashMap<String, Arc<DataStoreImpl<T>>>;
+
+static DATASTORE_MAP_READ: LazyLock<Mutex<DataStoreCache<ReadStore>>> =
+    LazyLock::new(|| Mutex::new(HashMap::new()));
+
+static DATASTORE_MAP_WRITE: LazyLock<Mutex<DataStoreCache<WriteStore>>> =
     LazyLock::new(|| Mutex::new(HashMap::new()));
 
 /// checks if auth_id is owner, or, if owner is a token, if
@@ -50,8 +58,8 @@ pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error>
 ///
 /// A Datastore can store severals backups, and provides the
 /// management interface for backup.
-pub struct DataStoreImpl {
-    chunk_store: Arc<ChunkStore>,
+pub struct DataStoreImpl<T> {
+    chunk_store: Arc<ChunkStore<T>>,
     gc_mutex: Mutex<()>,
     last_gc_status: Mutex<GarbageCollectionStatus>,
     verify_new: bool,
@@ -60,12 +68,12 @@ pub struct DataStoreImpl {
     sync_level: DatastoreFSyncLevel,
 }
 
-impl DataStoreImpl {
+impl<T> DataStoreImpl<T> {
     // This one just panics on everything
     #[doc(hidden)]
-    pub(crate) unsafe fn new_test() -> Arc<Self> {
+    pub(crate) fn new_test() -> Arc<Self> {
         Arc::new(Self {
-            chunk_store: Arc::new(unsafe { ChunkStore::panic_store() }),
+            chunk_store: Arc::new(ChunkStore::dummy_store()),
             gc_mutex: Mutex::new(()),
             last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
             verify_new: false,
@@ -76,12 +84,12 @@ impl DataStoreImpl {
     }
 }
 
-pub struct DataStore {
-    inner: Arc<DataStoreImpl>,
+pub struct DataStore<T> {
+    inner: Arc<DataStoreImpl<T>>,
     operation: Option<Operation>,
 }
 
-impl Clone for DataStore {
+impl<T> Clone for DataStore<T> {
     fn clone(&self) -> Self {
         let mut new_operation = self.operation;
         if let Some(operation) = self.operation {
@@ -98,7 +106,7 @@ impl Clone for DataStore {
     }
 }
 
-impl Drop for DataStore {
+impl<T> Drop for DataStore<T> {
     fn drop(&mut self) {
         if let Some(operation) = self.operation {
             let mut last_task = false;
@@ -120,12 +128,63 @@ impl Drop for DataStore {
                     });
 
             if remove_from_cache {
-                DATASTORE_MAP.lock().unwrap().remove(self.name());
+                DATASTORE_MAP_READ.lock().unwrap().remove(self.name());
+                DATASTORE_MAP_WRITE.lock().unwrap().remove(self.name());
             }
         }
     }
 }
 
+impl DataStore<Lookup> {
+    pub fn lookup_datastore(name: &str) -> Result<Arc<Self>, Error> {
+        let (config, digest, _lock) = Self::read_config(name)?;
+        let chunk_store = Arc::new(ChunkStore::open_lookup(name, &config.path)?);
+        let tuning: DatastoreTuning = serde_json::from_value(
+            DatastoreTuning::API_SCHEMA
+                .parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
+        )?;
+        let store = DataStoreImpl {
+            chunk_store,
+            gc_mutex: Mutex::new(()),
+            last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
+            verify_new: config.verify_new.unwrap_or(false),
+            chunk_order: tuning.chunk_order.unwrap_or_default(),
+            last_digest: Some(digest),
+            sync_level: tuning.sync_level.unwrap_or_default(),
+        };
+
+        Ok(Arc::new(Self {
+            inner: Arc::new(store),
+            operation: Some(Operation::Lookup),
+        }))
+    }
+}
+impl DataStore<ReadStore> {
+    pub fn lookup_datastore_read(name: &str) -> Result<Arc<Self>, Error> {
+        let mut datastore_cache = DATASTORE_MAP_READ.lock().unwrap();
+        let cache_entry = datastore_cache.get(name);
+        let store = Self::open_datastore(name, Some(Operation::Write), cache_entry.cloned())?;
+        if cache_entry.is_none() {
+            datastore_cache.insert(name.to_string(), store.inner.clone());
+        }
+        Ok(store)
+    }
+}
+impl DataStore<WriteStore> {
+    pub fn lookup_datastore_write(name: &str) -> Result<Arc<Self>, Error> {
+        let mut datastore_cache = DATASTORE_MAP_WRITE.lock().unwrap();
+        let cache_entry = datastore_cache.get(name);
+        let store = Self::open_datastore(name, Some(Operation::Write), cache_entry.cloned())?;
+        if cache_entry.is_none() {
+            datastore_cache.insert(name.to_string(), store.inner.clone());
+        }
+        Ok(store)
+    }
+}
+
+    }
+}
+
 impl DataStore {
     // This one just panics on everything
     #[doc(hidden)]
-- 
2.39.2





More information about the pbs-devel mailing list