[pbs-devel] [PATCH v3 proxmox-backup 2/5] fix #3847: datastore: test FixedIndexWriter
Robert Obkircher
r.obkircher at proxmox.com
Fri Jan 9 18:35:37 CET 2026
Create a dummy chunk store and write fidx files with fixed and
dynamically sized writers. Compare the resulting binary files directly
(ignoring uuid and ctime) and also read them back using the reader.
The chunk hashes are made up and don't actually exist in the chunk
store.
Signed-off-by: Robert Obkircher <r.obkircher at proxmox.com>
---
pbs-datastore/src/fixed_index.rs | 184 +++++++++++++++++++++++++++++++
1 file changed, 184 insertions(+)
diff --git a/pbs-datastore/src/fixed_index.rs b/pbs-datastore/src/fixed_index.rs
index 8036a519..a20edc94 100644
--- a/pbs-datastore/src/fixed_index.rs
+++ b/pbs-datastore/src/fixed_index.rs
@@ -530,3 +530,187 @@ impl FixedIndexWriter {
Ok(())
}
}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::chunk_store::ChunkStore;
+ use pbs_api_types::DatastoreFSyncLevel;
+ use std::fs;
+ use std::sync::Arc;
+
+ const CS: usize = 4096;
+
+ #[test]
+ fn test_fixed_index_writer() {
+ let mut testdir = fs::canonicalize(".").unwrap();
+ testdir.push(".testdir_fixed_index");
+
+ if let Err(_e) = std::fs::remove_dir_all(&testdir) { /* ignore */ }
+
+ // this is a lot faster than ChunkStore::create, which takes almost 3 seconds
+ let store = {
+ let mut chunks = PathBuf::from(&testdir);
+ chunks.push(".chunks");
+ fs::create_dir_all(chunks).unwrap();
+ Arc::new(ChunkStore::open("test", &testdir, DatastoreFSyncLevel::None).unwrap())
+ };
+
+ test_empty(store.clone());
+ test_smaller_than_initial_capacity(store.clone());
+ test_grow_to_multiples_of_chunk_size(store.clone());
+ test_grow_to_fixed_size(store);
+
+ std::fs::remove_dir_all(&testdir).unwrap();
+ }
+
+ fn test_empty(store: Arc<ChunkStore>) {
+ let path = Path::new("test_empty");
+ let mut w = FixedIndexWriter::create(store, path, None, CS).unwrap();
+
+ assert_eq!(0, w.index_length(), "returns length, not capacity");
+ assert!(w.add_digest(0, &[1u8; 32]).is_err(), "out of bounds");
+ assert!(w.close().is_err(), "should refuse to create empty file");
+ }
+
+ fn test_smaller_than_initial_capacity(store: Arc<ChunkStore>) {
+ let path = Path::new("test_smaller_than_initial_capacity");
+ let mut w = FixedIndexWriter::create(store.clone(), path, None, CS).unwrap();
+
+ let size = CS - 1;
+ let expected = test_data(size);
+ w.grow_to_size(size).unwrap();
+ expected[0].add_to(&mut w);
+
+ w.close().unwrap();
+ drop(w);
+
+ check_with_reader(&store.relative_path(path), size, &expected);
+ compare_to_known_size_writer(store, path, size, &expected);
+ }
+
+ fn test_grow_to_multiples_of_chunk_size(store: Arc<ChunkStore>) {
+ let path = Path::new("test_grow_to_multiples_of_chunk_size");
+ let mut w = FixedIndexWriter::create(store.clone(), path, None, CS).unwrap();
+
+ let initial = FixedIndexWriter::INITIAL_CAPACITY;
+ let steps = [1, 2, initial, initial + 1, 5 * initial, 10 * initial + 1];
+ let expected = test_data(steps.last().unwrap() * CS);
+
+ let mut begin = 0;
+ for chunk_count in steps {
+ let last = &expected[chunk_count - 1];
+ w.grow_to_size(last.end).unwrap();
+ assert_eq!(last.index + 1, w.index_length());
+ assert!(w.add_digest(last.index + 1, &[1u8; 32]).is_err());
+
+ for c in expected[begin..chunk_count].iter().rev() {
+ c.add_to(&mut w);
+ }
+ begin = chunk_count;
+ }
+ w.close().unwrap();
+ drop(w);
+
+ let size = expected.len() * CS;
+ check_with_reader(&store.relative_path(path), size, &expected);
+ compare_to_known_size_writer(store, path, size, &expected);
+ }
+
+ fn test_grow_to_fixed_size(store: Arc<ChunkStore>) {
+ let path = Path::new("test_grow_to_fixed_size");
+ let mut w = FixedIndexWriter::create(store.clone(), path, None, CS).unwrap();
+
+ let size = (FixedIndexWriter::INITIAL_CAPACITY + 42) * CS - 1; // last is not full
+ let expected = test_data(size);
+
+ w.grow_to_size(size).unwrap();
+ assert!(w.grow_to_size(size + 1).is_err(), "size must be fixed now");
+ assert_eq!(expected.len(), w.index_length());
+ assert!(w.add_digest(expected.len(), &[1u8; 32]).is_err());
+
+ for c in expected.iter().rev() {
+ c.add_to(&mut w);
+ }
+
+ w.close().unwrap();
+ drop(w);
+
+ check_with_reader(&store.relative_path(path), size, &expected);
+ compare_to_known_size_writer(store, path, size, &expected);
+ }
+
+ struct TestChunk {
+ digest: [u8; 32],
+ index: usize,
+ size: usize,
+ end: usize,
+ }
+
+ impl TestChunk {
+ fn add_to(&self, w: &mut FixedIndexWriter) {
+ assert_eq!(
+ self.index,
+ w.check_chunk_alignment(self.end, self.size).unwrap()
+ );
+ w.add_digest(self.index, &self.digest).unwrap();
+ }
+ }
+
+ fn test_data(size: usize) -> Vec<TestChunk> {
+ (0..size.div_ceil(CS))
+ .map(|index| {
+ let mut digest = [0u8; 32];
+ let i = &(index as u64).to_le_bytes();
+ for c in digest.chunks_mut(i.len()) {
+ c.copy_from_slice(i);
+ }
+ let size = if ((index + 1) * CS) <= size {
+ CS
+ } else {
+ size % CS
+ };
+ TestChunk {
+ digest,
+ index,
+ size,
+ end: index * CS + size,
+ }
+ })
+ .collect()
+ }
+
+ fn check_with_reader(path: &Path, size: usize, chunks: &[TestChunk]) {
+ let reader = FixedIndexReader::open(path).unwrap();
+ assert_eq!(size as u64, reader.index_bytes());
+ assert_eq!(chunks.len(), reader.index_count());
+ for c in chunks {
+ assert_eq!(&c.digest, reader.index_digest(c.index).unwrap());
+ }
+ }
+
+ fn compare_to_known_size_writer(
+ store: Arc<ChunkStore>,
+ name: &Path,
+ size: usize,
+ chunks: &[TestChunk],
+ ) {
+ let mut path = PathBuf::from(name);
+ path.set_extension("reference");
+ let mut w = FixedIndexWriter::create(store.clone(), &path, Some(size), CS).unwrap();
+ for c in chunks {
+ c.add_to(&mut w);
+ }
+ w.close().unwrap();
+ drop(w);
+
+ let mut reference = fs::read(store.relative_path(&path)).unwrap();
+ let mut tested = fs::read(store.relative_path(name)).unwrap();
+
+ // ignore uuid and ctime
+ reference[8..32].fill(0);
+ tested[8..32].fill(0);
+
+ assert_eq!(reference, tested);
+ }
+}
--
2.47.3
More information about the pbs-devel
mailing list