[pbs-devel] [PATCH proxmox-backup RFC 06/10] pbs-datastore: add generics and separate functions into impl blocks

Hannes Laimer h.laimer at proxmox.com
Tue Sep 3 14:33:57 CEST 2024


Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
---
 pbs-datastore/src/dynamic_index.rs      | 22 +++---
 pbs-datastore/src/fixed_index.rs        | 50 +++++++-------
 pbs-datastore/src/hierarchy.rs          | 92 ++++++++++++++-----------
 pbs-datastore/src/local_chunk_reader.rs | 13 ++--
 pbs-datastore/src/prune.rs              | 19 +++--
 pbs-datastore/src/snapshot_reader.rs    | 30 ++++----
 6 files changed, 121 insertions(+), 105 deletions(-)

diff --git a/pbs-datastore/src/dynamic_index.rs b/pbs-datastore/src/dynamic_index.rs
index 0e99ce58..75f6edff 100644
--- a/pbs-datastore/src/dynamic_index.rs
+++ b/pbs-datastore/src/dynamic_index.rs
@@ -18,7 +18,7 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
 use pbs_tools::lru_cache::LruCache;
 
 use crate::chunk_stat::ChunkStat;
-use crate::chunk_store::ChunkStore;
+use crate::chunk_store::{CanWrite, ChunkStore};
 use crate::data_blob::{DataBlob, DataChunkBuilder};
 use crate::file_formats;
 use crate::index::{ChunkReadInfo, IndexFile};
@@ -275,8 +275,8 @@ impl IndexFile for DynamicIndexReader {
 }
 
 /// Create dynamic index files (`.dixd`)
-pub struct DynamicIndexWriter {
-    store: Arc<ChunkStore>,
+pub struct DynamicIndexWriter<T> {
+    store: Arc<ChunkStore<T>>,
     _lock: ProcessLockSharedGuard,
     writer: BufWriter<File>,
     closed: bool,
@@ -287,14 +287,14 @@ pub struct DynamicIndexWriter {
     pub ctime: i64,
 }
 
-impl Drop for DynamicIndexWriter {
+impl<T> Drop for DynamicIndexWriter<T> {
     fn drop(&mut self) {
         let _ = std::fs::remove_file(&self.tmp_filename); // ignore errors
     }
 }
 
-impl DynamicIndexWriter {
-    pub fn create(store: Arc<ChunkStore>, path: &Path) -> Result<Self, Error> {
+impl<T: CanWrite> DynamicIndexWriter<T> {
+    pub fn create(store: Arc<ChunkStore<T>>, path: &Path) -> Result<Self, Error> {
         let shared_lock = store.try_shared_lock()?;
 
         let full_path = store.relative_path(path);
@@ -394,8 +394,8 @@ impl DynamicIndexWriter {
 /// Writer which splits a binary stream into dynamic sized chunks
 ///
 /// And store the resulting chunk list into the index file.
-pub struct DynamicChunkWriter {
-    index: DynamicIndexWriter,
+pub struct DynamicChunkWriter<T: CanWrite> {
+    index: DynamicIndexWriter<T>,
     closed: bool,
     chunker: ChunkerImpl,
     stat: ChunkStat,
@@ -404,8 +404,8 @@ pub struct DynamicChunkWriter {
     chunk_buffer: Vec<u8>,
 }
 
-impl DynamicChunkWriter {
-    pub fn new(index: DynamicIndexWriter, chunk_size: usize) -> Self {
+impl<T: CanWrite> DynamicChunkWriter<T> {
+    pub fn new(index: DynamicIndexWriter<T>, chunk_size: usize) -> Self {
         Self {
             index,
             closed: false,
@@ -490,7 +490,7 @@ impl DynamicChunkWriter {
     }
 }
 
-impl Write for DynamicChunkWriter {
+impl<T: CanWrite> Write for DynamicChunkWriter<T> {
     fn write(&mut self, data: &[u8]) -> std::result::Result<usize, std::io::Error> {
         let chunker = &mut self.chunker;
 
diff --git a/pbs-datastore/src/fixed_index.rs b/pbs-datastore/src/fixed_index.rs
index d67c388e..30e86add 100644
--- a/pbs-datastore/src/fixed_index.rs
+++ b/pbs-datastore/src/fixed_index.rs
@@ -214,8 +214,8 @@ impl IndexFile for FixedIndexReader {
     }
 }
 
-pub struct FixedIndexWriter {
-    store: Arc<ChunkStore>,
+pub struct FixedIndexWriter<T> {
+    store: Arc<ChunkStore<T>>,
     file: File,
     _lock: ProcessLockSharedGuard,
     filename: PathBuf,
@@ -229,9 +229,9 @@ pub struct FixedIndexWriter {
 }
 
 // `index` is mmap()ed which cannot be thread-local so should be sendable
-unsafe impl Send for FixedIndexWriter {}
+unsafe impl<T> Send for FixedIndexWriter<T> {}
 
-impl Drop for FixedIndexWriter {
+impl<T> Drop for FixedIndexWriter<T> {
     fn drop(&mut self) {
         let _ = std::fs::remove_file(&self.tmp_filename); // ignore errors
         if let Err(err) = self.unmap() {
@@ -240,10 +240,30 @@ impl Drop for FixedIndexWriter {
     }
 }
 
-impl FixedIndexWriter {
+impl<T> FixedIndexWriter<T> {
+    fn unmap(&mut self) -> Result<(), Error> {
+        if self.index.is_null() {
+            return Ok(());
+        }
+
+        let index_size = self.index_length * 32;
+
+        if let Err(err) =
+            unsafe { nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, index_size) }
+        {
+            bail!("unmap file {:?} failed - {}", self.tmp_filename, err);
+        }
+
+        self.index = std::ptr::null_mut();
+
+        Ok(())
+    }
+}
+
+impl<T: crate::chunk_store::CanWrite> FixedIndexWriter<T> {
     #[allow(clippy::cast_ptr_alignment)]
     pub fn create(
-        store: Arc<ChunkStore>,
+        store: Arc<ChunkStore<T>>,
         path: &Path,
         size: usize,
         chunk_size: usize,
@@ -320,24 +340,6 @@ impl FixedIndexWriter {
         self.index_length
     }
 
-    fn unmap(&mut self) -> Result<(), Error> {
-        if self.index.is_null() {
-            return Ok(());
-        }
-
-        let index_size = self.index_length * 32;
-
-        if let Err(err) =
-            unsafe { nix::sys::mman::munmap(self.index as *mut std::ffi::c_void, index_size) }
-        {
-            bail!("unmap file {:?} failed - {}", self.tmp_filename, err);
-        }
-
-        self.index = std::ptr::null_mut();
-
-        Ok(())
-    }
-
     pub fn close(&mut self) -> Result<[u8; 32], Error> {
         if self.index.is_null() {
             bail!("cannot close already closed index file.");
diff --git a/pbs-datastore/src/hierarchy.rs b/pbs-datastore/src/hierarchy.rs
index 8b7af038..1e313a82 100644
--- a/pbs-datastore/src/hierarchy.rs
+++ b/pbs-datastore/src/hierarchy.rs
@@ -9,16 +9,17 @@ use pbs_api_types::{BackupNamespace, BackupType, BACKUP_DATE_REGEX, BACKUP_ID_RE
 use proxmox_sys::fs::get_file_type;
 
 use crate::backup_info::{BackupDir, BackupGroup};
+use crate::chunk_store::CanRead;
 use crate::DataStore;
 
 /// A iterator for all BackupDir's (Snapshots) in a BackupGroup
-pub struct ListSnapshots {
-    group: BackupGroup,
+pub struct ListSnapshots<T: CanRead> {
+    group: BackupGroup<T>,
     fd: proxmox_sys::fs::ReadDir,
 }
 
-impl ListSnapshots {
-    pub fn new(group: BackupGroup) -> Result<Self, Error> {
+impl<T: CanRead> ListSnapshots<T> {
+    pub fn new(group: BackupGroup<T>) -> Result<Self, Error> {
         let group_path = group.full_group_path();
         Ok(ListSnapshots {
             fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &group_path)
@@ -28,8 +29,8 @@ impl ListSnapshots {
     }
 }
 
-impl Iterator for ListSnapshots {
-    type Item = Result<BackupDir, Error>;
+impl<T: CanRead> Iterator for ListSnapshots<T> {
+    type Item = Result<BackupDir<T>, Error>;
 
     fn next(&mut self) -> Option<Self::Item> {
         loop {
@@ -69,21 +70,25 @@ impl Iterator for ListSnapshots {
 }
 
 /// An iterator for a single backup group type.
-pub struct ListGroupsType {
-    store: Arc<DataStore>,
+pub struct ListGroupsType<T> {
+    store: Arc<DataStore<T>>,
     ns: BackupNamespace,
     ty: BackupType,
     dir: proxmox_sys::fs::ReadDir,
 }
 
-impl ListGroupsType {
-    pub fn new(store: Arc<DataStore>, ns: BackupNamespace, ty: BackupType) -> Result<Self, Error> {
+impl<T: CanRead> ListGroupsType<T> {
+    pub fn new(
+        store: Arc<DataStore<T>>,
+        ns: BackupNamespace,
+        ty: BackupType,
+    ) -> Result<Self, Error> {
         Self::new_at(libc::AT_FDCWD, store, ns, ty)
     }
 
     fn new_at(
         fd: RawFd,
-        store: Arc<DataStore>,
+        store: Arc<DataStore<T>>,
         ns: BackupNamespace,
         ty: BackupType,
     ) -> Result<Self, Error> {
@@ -95,13 +100,13 @@ impl ListGroupsType {
         })
     }
 
-    pub(crate) fn ok(self) -> ListGroupsOk<Self> {
+    pub(crate) fn ok(self) -> ListGroupsOk<Self, T> {
         ListGroupsOk::new(self)
     }
 }
 
-impl Iterator for ListGroupsType {
-    type Item = Result<BackupGroup, Error>;
+impl<T: CanRead> Iterator for ListGroupsType<T> {
+    type Item = Result<BackupGroup<T>, Error>;
 
     fn next(&mut self) -> Option<Self::Item> {
         loop {
@@ -139,15 +144,15 @@ impl Iterator for ListGroupsType {
 }
 
 /// A iterator for a (single) level of Backup Groups
-pub struct ListGroups {
-    store: Arc<DataStore>,
+pub struct ListGroups<T> {
+    store: Arc<DataStore<T>>,
     ns: BackupNamespace,
     type_fd: proxmox_sys::fs::ReadDir,
-    id_state: Option<ListGroupsType>,
+    id_state: Option<ListGroupsType<T>>,
 }
 
-impl ListGroups {
-    pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+impl<T: CanRead> ListGroups<T> {
+    pub fn new(store: Arc<DataStore<T>>, ns: BackupNamespace) -> Result<Self, Error> {
         Ok(Self {
             type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.namespace_path(&ns))?,
             store,
@@ -156,13 +161,13 @@ impl ListGroups {
         })
     }
 
-    pub(crate) fn ok(self) -> ListGroupsOk<Self> {
+    pub(crate) fn ok(self) -> ListGroupsOk<Self, T> {
         ListGroupsOk::new(self)
     }
 }
 
-impl Iterator for ListGroups {
-    type Item = Result<BackupGroup, Error>;
+impl<T: CanRead> Iterator for ListGroups<T> {
+    type Item = Result<BackupGroup<T>, Error>;
 
     fn next(&mut self) -> Option<Self::Item> {
         loop {
@@ -222,36 +227,36 @@ pub(crate) trait GroupIter {
     fn store_name(&self) -> &str;
 }
 
-impl GroupIter for ListGroups {
+impl<T: CanRead> GroupIter for ListGroups<T> {
     fn store_name(&self) -> &str {
         self.store.name()
     }
 }
 
-impl GroupIter for ListGroupsType {
+impl<T: CanRead> GroupIter for ListGroupsType<T> {
     fn store_name(&self) -> &str {
         self.store.name()
     }
 }
 
-pub(crate) struct ListGroupsOk<I>(Option<I>)
+pub(crate) struct ListGroupsOk<I, T: CanRead>(Option<I>)
 where
-    I: GroupIter + Iterator<Item = Result<BackupGroup, Error>>;
+    I: GroupIter + Iterator<Item = Result<BackupGroup<T>, Error>>;
 
-impl<I> ListGroupsOk<I>
+impl<I, T: CanRead> ListGroupsOk<I, T>
 where
-    I: GroupIter + Iterator<Item = Result<BackupGroup, Error>>,
+    I: GroupIter + Iterator<Item = Result<BackupGroup<T>, Error>>,
 {
     fn new(inner: I) -> Self {
         Self(Some(inner))
     }
 }
 
-impl<I> Iterator for ListGroupsOk<I>
+impl<I, T: CanRead> Iterator for ListGroupsOk<I, T>
 where
-    I: GroupIter + Iterator<Item = Result<BackupGroup, Error>>,
+    I: GroupIter + Iterator<Item = Result<BackupGroup<T>, Error>>,
 {
-    type Item = BackupGroup;
+    type Item = BackupGroup<T>;
 
     fn next(&mut self) -> Option<Self::Item> {
         if let Some(iter) = &mut self.0 {
@@ -274,19 +279,21 @@ where
 }
 
 /// A iterator for a (single) level of Namespaces
-pub struct ListNamespaces {
+pub struct ListNamespaces<T> {
     ns: BackupNamespace,
     base_path: PathBuf,
     ns_state: Option<proxmox_sys::fs::ReadDir>,
+    _marker: std::marker::PhantomData<T>,
 }
 
-impl ListNamespaces {
+impl<T: CanRead> ListNamespaces<T> {
     /// construct a new single-level namespace iterator on a datastore with an optional anchor ns
-    pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+    pub fn new(store: Arc<DataStore<T>>, ns: BackupNamespace) -> Result<Self, Error> {
         Ok(ListNamespaces {
             ns,
             base_path: store.base_path(),
             ns_state: None,
+            _marker: std::marker::PhantomData,
         })
     }
 
@@ -298,11 +305,12 @@ impl ListNamespaces {
             ns: ns.unwrap_or_default(),
             base_path: path,
             ns_state: None,
+            _marker: std::marker::PhantomData,
         })
     }
 }
 
-impl Iterator for ListNamespaces {
+impl<T: CanRead> Iterator for ListNamespaces<T> {
     type Item = Result<BackupNamespace, Error>;
 
     fn next(&mut self) -> Option<Self::Item> {
@@ -366,18 +374,18 @@ impl Iterator for ListNamespaces {
 /// can be useful for searching all backup groups from a certain anchor, as that can contain
 /// sub-namespaces but also groups on its own level, so otherwise one would need to special case
 /// the ones from the own level.
-pub struct ListNamespacesRecursive {
-    store: Arc<DataStore>,
+pub struct ListNamespacesRecursive<T> {
+    store: Arc<DataStore<T>>,
     /// the starting namespace we search downward from
     ns: BackupNamespace,
     /// the maximal recursion depth from the anchor start ns (depth == 0) downwards
     max_depth: u8,
-    state: Option<Vec<ListNamespaces>>, // vector to avoid code recursion
+    state: Option<Vec<ListNamespaces<T>>>, // vector to avoid code recursion
 }
 
-impl ListNamespacesRecursive {
+impl<T: CanRead> ListNamespacesRecursive<T> {
     /// Creates an recursive namespace iterator.
-    pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+    pub fn new(store: Arc<DataStore<T>>, ns: BackupNamespace) -> Result<Self, Error> {
         Self::new_max_depth(store, ns, pbs_api_types::MAX_NAMESPACE_DEPTH)
     }
 
@@ -388,7 +396,7 @@ impl ListNamespacesRecursive {
     /// Depth is counted relatively, that means not from the datastore as anchor, but from `ns`,
     /// and it will be clamped to `min(depth, MAX_NAMESPACE_DEPTH - ns.depth())` automatically.
     pub fn new_max_depth(
-        store: Arc<DataStore>,
+        store: Arc<DataStore<T>>,
         ns: BackupNamespace,
         max_depth: usize,
     ) -> Result<Self, Error> {
@@ -408,7 +416,7 @@ impl ListNamespacesRecursive {
     }
 }
 
-impl Iterator for ListNamespacesRecursive {
+impl<T: CanRead> Iterator for ListNamespacesRecursive<T> {
     type Item = Result<BackupNamespace, Error>;
 
     fn next(&mut self) -> Option<Self::Item> {
diff --git a/pbs-datastore/src/local_chunk_reader.rs b/pbs-datastore/src/local_chunk_reader.rs
index 05a70c06..ccdde9f1 100644
--- a/pbs-datastore/src/local_chunk_reader.rs
+++ b/pbs-datastore/src/local_chunk_reader.rs
@@ -7,20 +7,21 @@ use anyhow::{bail, Error};
 use pbs_api_types::CryptMode;
 use pbs_tools::crypt_config::CryptConfig;
 
+use crate::chunk_store::CanRead;
 use crate::data_blob::DataBlob;
 use crate::read_chunk::{AsyncReadChunk, ReadChunk};
 use crate::DataStore;
 
 #[derive(Clone)]
-pub struct LocalChunkReader {
-    store: Arc<DataStore>,
+pub struct LocalChunkReader<T: CanRead> {
+    store: Arc<DataStore<T>>,
     crypt_config: Option<Arc<CryptConfig>>,
     crypt_mode: CryptMode,
 }
 
-impl LocalChunkReader {
+impl<T: CanRead> LocalChunkReader<T> {
     pub fn new(
-        store: Arc<DataStore>,
+        store: Arc<DataStore<T>>,
         crypt_config: Option<Arc<CryptConfig>>,
         crypt_mode: CryptMode,
     ) -> Self {
@@ -47,7 +48,7 @@ impl LocalChunkReader {
     }
 }
 
-impl ReadChunk for LocalChunkReader {
+impl<T: CanRead> ReadChunk for LocalChunkReader<T> {
     fn read_raw_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
         let chunk = self.store.load_chunk(digest)?;
         self.ensure_crypt_mode(chunk.crypt_mode()?)?;
@@ -63,7 +64,7 @@ impl ReadChunk for LocalChunkReader {
     }
 }
 
-impl AsyncReadChunk for LocalChunkReader {
+impl<T: CanRead + Send + Sync> AsyncReadChunk for LocalChunkReader<T> {
     fn read_raw_chunk<'a>(
         &'a self,
         digest: &'a [u8; 32],
diff --git a/pbs-datastore/src/prune.rs b/pbs-datastore/src/prune.rs
index ad1493bf..fc0f9332 100644
--- a/pbs-datastore/src/prune.rs
+++ b/pbs-datastore/src/prune.rs
@@ -5,6 +5,8 @@ use anyhow::Error;
 
 use pbs_api_types::KeepOptions;
 
+use crate::chunk_store::CanRead;
+
 use super::BackupInfo;
 
 #[derive(Clone, Copy, PartialEq, Eq)]
@@ -36,9 +38,9 @@ impl std::fmt::Display for PruneMark {
     }
 }
 
-fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>>(
+fn mark_selections<F: Fn(&BackupInfo<T>) -> Result<String, Error>, T: CanRead>(
     mark: &mut HashMap<PathBuf, PruneMark>,
-    list: &[BackupInfo],
+    list: &[BackupInfo<T>],
     keep: usize,
     select_id: F,
 ) -> Result<(), Error> {
@@ -82,7 +84,10 @@ fn mark_selections<F: Fn(&BackupInfo) -> Result<String, Error>>(
     Ok(())
 }
 
-fn remove_incomplete_snapshots(mark: &mut HashMap<PathBuf, PruneMark>, list: &[BackupInfo]) {
+fn remove_incomplete_snapshots<T: CanRead>(
+    mark: &mut HashMap<PathBuf, PruneMark>,
+    list: &[BackupInfo<T>],
+) {
     let mut keep_unfinished = true;
     for info in list.iter() {
         // backup is considered unfinished if there is no manifest
@@ -104,10 +109,10 @@ fn remove_incomplete_snapshots(mark: &mut HashMap<PathBuf, PruneMark>, list: &[B
 }
 
 /// This filters incomplete and kept backups.
-pub fn compute_prune_info(
-    mut list: Vec<BackupInfo>,
+pub fn compute_prune_info<T: CanRead>(
+    mut list: Vec<BackupInfo<T>>,
     options: &KeepOptions,
-) -> Result<Vec<(BackupInfo, PruneMark)>, Error> {
+) -> Result<Vec<(BackupInfo<T>, PruneMark)>, Error> {
     let mut mark = HashMap::new();
 
     BackupInfo::sort_list(&mut list, false);
@@ -154,7 +159,7 @@ pub fn compute_prune_info(
         })?;
     }
 
-    let prune_info: Vec<(BackupInfo, PruneMark)> = list
+    let prune_info: Vec<(BackupInfo<T>, PruneMark)> = list
         .into_iter()
         .map(|info| {
             let backup_id = info.backup_dir.relative_path();
diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs
index f9c77207..2ba91f0b 100644
--- a/pbs-datastore/src/snapshot_reader.rs
+++ b/pbs-datastore/src/snapshot_reader.rs
@@ -8,9 +8,10 @@ use nix::dir::Dir;
 
 use proxmox_sys::fs::lock_dir_noblock_shared;
 
-use pbs_api_types::{print_store_and_ns, BackupNamespace, Operation};
+use pbs_api_types::{print_store_and_ns, BackupNamespace};
 
 use crate::backup_info::BackupDir;
+use crate::chunk_store::CanRead;
 use crate::dynamic_index::DynamicIndexReader;
 use crate::fixed_index::FixedIndexReader;
 use crate::index::IndexFile;
@@ -20,24 +21,24 @@ use crate::DataStore;
 /// Helper to access the contents of a datastore backup snapshot
 ///
 /// This make it easy to iterate over all used chunks and files.
-pub struct SnapshotReader {
-    snapshot: BackupDir,
+pub struct SnapshotReader<T: CanRead> {
+    snapshot: BackupDir<T>,
     datastore_name: String,
     file_list: Vec<String>,
     locked_dir: Dir,
 }
 
-impl SnapshotReader {
+impl<T: CanRead> SnapshotReader<T> {
     /// Lock snapshot, reads the manifest and returns a new instance
     pub fn new(
-        datastore: Arc<DataStore>,
+        datastore: Arc<DataStore<T>>,
         ns: BackupNamespace,
         snapshot: pbs_api_types::BackupDir,
     ) -> Result<Self, Error> {
         Self::new_do(datastore.backup_dir(ns, snapshot)?)
     }
 
-    pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
+    pub(crate) fn new_do(snapshot: BackupDir<T>) -> Result<Self, Error> {
         let datastore = snapshot.datastore();
         let snapshot_path = snapshot.full_path();
 
@@ -81,7 +82,7 @@ impl SnapshotReader {
     }
 
     /// Return the snapshot directory
-    pub fn snapshot(&self) -> &BackupDir {
+    pub fn snapshot(&self) -> &BackupDir<T> {
         &self.snapshot
     }
 
@@ -111,7 +112,7 @@ impl SnapshotReader {
     pub fn chunk_iterator<F: Fn(&[u8; 32]) -> bool>(
         &self,
         skip_fn: F,
-    ) -> Result<SnapshotChunkIterator<F>, Error> {
+    ) -> Result<SnapshotChunkIterator<F, T>, Error> {
         SnapshotChunkIterator::new(self, skip_fn)
     }
 }
@@ -121,15 +122,15 @@ impl SnapshotReader {
 /// Note: The iterator returns a `Result`, and the iterator state is
 /// undefined after the first error. So it make no sense to continue
 /// iteration after the first error.
-pub struct SnapshotChunkIterator<'a, F: Fn(&[u8; 32]) -> bool> {
-    snapshot_reader: &'a SnapshotReader,
+pub struct SnapshotChunkIterator<'a, F: Fn(&[u8; 32]) -> bool, T: CanRead> {
+    snapshot_reader: &'a SnapshotReader<T>,
     todo_list: Vec<String>,
     skip_fn: F,
     #[allow(clippy::type_complexity)]
     current_index: Option<(Arc<Box<dyn IndexFile + Send>>, usize, Vec<(usize, u64)>)>,
 }
 
-impl<'a, F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'a, F> {
+impl<'a, F: Fn(&[u8; 32]) -> bool, T: CanRead> Iterator for SnapshotChunkIterator<'a, F, T> {
     type Item = Result<[u8; 32], Error>;
 
     fn next(&mut self) -> Option<Self::Item> {
@@ -149,9 +150,8 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'a, F> {
                                 ),
                             };
 
-                        let datastore = DataStore::lookup_datastore(
+                        let datastore = DataStore::lookup_datastore_read(
                             self.snapshot_reader.datastore_name(),
-                            Some(Operation::Read),
                         )?;
                         let order =
                             datastore.get_chunks_in_order(&*index, &self.skip_fn, |_| Ok(()))?;
@@ -176,8 +176,8 @@ impl<'a, F: Fn(&[u8; 32]) -> bool> Iterator for SnapshotChunkIterator<'a, F> {
     }
 }
 
-impl<'a, F: Fn(&[u8; 32]) -> bool> SnapshotChunkIterator<'a, F> {
-    pub fn new(snapshot_reader: &'a SnapshotReader, skip_fn: F) -> Result<Self, Error> {
+impl<'a, F: Fn(&[u8; 32]) -> bool, T: CanRead> SnapshotChunkIterator<'a, F, T> {
+    pub fn new(snapshot_reader: &'a SnapshotReader<T>, skip_fn: F) -> Result<Self, Error> {
         let mut todo_list = Vec::new();
 
         for filename in snapshot_reader.file_list() {
-- 
2.39.2





More information about the pbs-devel mailing list