[pbs-devel] [PATCH v3 proxmox-backup 19/24] fix #3174: archiver: reuse files with unchanged metadata
Christian Ebner
c.ebner at proxmox.com
Fri Nov 3 14:37:24 CET 2023
During pxar archive encoding, check regular files against their
previous backup catalogs metadata, if present.
Instead of re-encoding files with unchanged metadata with file size over
a given threshold limit, mark the entries as appendix references in the
pxar archive and append the chunks containing the file payload in the
appendix.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
Changes since version 2:
- Increase file size threshold for metadata based detection to 1k to
reduce chunk fragmentation.
- Normalize appendix section chunks to inject in order to minimize
artifical bloating of the index. This further drastically reduces
restore time for a fragmented index
Changes since version 1:
- Refactor to use new catalog file format and types
- fix issue with injected chunks not being included in the list to
upload in case of consecutive files referencing the same chunk
- remove restriction of needing the previous backup run to be regular in
order to perform metadata based file change detection. This can be
done now, since the catalog stores the appendix start offset for each
pxar archive in an easily accessible way.
pbs-client/src/pxar/create.rs | 275 +++++++++++++++++-
proxmox-backup-client/src/main.rs | 8 +-
.../src/proxmox_restore_daemon/api.rs | 1 +
pxar-bin/src/main.rs | 1 +
src/tape/file_formats/snapshot_archive.rs | 2 +-
5 files changed, 267 insertions(+), 20 deletions(-)
diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs
index 4464a3ed..d369ed8e 100644
--- a/pbs-client/src/pxar/create.rs
+++ b/pbs-client/src/pxar/create.rs
@@ -17,14 +17,17 @@ use nix::sys::stat::{FileStat, Mode};
use pathpatterns::{MatchEntry, MatchFlag, MatchList, MatchType, PatternFlag};
use proxmox_sys::error::SysError;
-use pxar::encoder::{LinkOffset, SeqWrite};
+use pxar::encoder::{LinkOffset, SeqSink, SeqWrite};
use pxar::Metadata;
use proxmox_io::vec;
use proxmox_lang::c_str;
use proxmox_sys::fs::{self, acl, xattr};
-use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader};
+use pbs_datastore::catalog::{
+ AppendixStartOffset, BackupCatalogWriter, CatalogReader, CatalogV2Extension, DirEntry,
+ DirEntryAttribute,
+};
use pbs_datastore::dynamic_index::{DynamicEntry, DynamicIndexReader};
use crate::inject_reused_chunks::InjectChunks;
@@ -32,6 +35,8 @@ use crate::pxar::metadata::errno_is_unsupported;
use crate::pxar::tools::assert_single_path_component;
use crate::pxar::Flags;
+const MAX_FILE_SIZE: u64 = 1024;
+
/// Pxar options for creating a pxar archive/stream
#[derive(Default)]
pub struct PxarCreateOptions {
@@ -45,6 +50,8 @@ pub struct PxarCreateOptions {
pub skip_lost_and_found: bool,
/// Reference state for partial backups
pub previous_ref: Option<PxarPrevRef>,
+ /// Catalog archive name
+ pub archive_name: Option<CString>,
}
/// Contains statefull information of previous backups snapshots for partial backups
@@ -144,7 +151,8 @@ struct Archiver {
file_copy_buffer: Vec<u8>,
previous_ref: Option<PxarPrevRef>,
forced_boundaries: Arc<Mutex<VecDeque<InjectChunks>>>,
- inject: (usize, Vec<DynamicEntry>),
+ inject: (pxar::encoder::AppendixRefOffset, Vec<DynamicEntry>),
+ prev_appendix: Option<AppendixStartOffset>,
}
type Encoder<'a, T> = pxar::encoder::aio::Encoder<'a, T>;
@@ -155,7 +163,7 @@ pub async fn create_archive<T, F>(
feature_flags: Flags,
callback: F,
catalog: Option<Arc<Mutex<dyn BackupCatalogWriter + Send>>>,
- options: PxarCreateOptions,
+ mut options: PxarCreateOptions,
forced_boundaries: Arc<Mutex<VecDeque<InjectChunks>>>,
) -> Result<(), Error>
where
@@ -196,6 +204,22 @@ where
)?);
}
+ let (appendix_start, prev_cat_parent) = if let Some(ref mut prev_ref) = options.previous_ref {
+ let entry = prev_ref
+ .catalog
+ .lookup_recursive(prev_ref.archive_name.as_bytes())?;
+ let parent = match entry.attr {
+ DirEntryAttribute::Archive { .. } => Some(entry),
+ _ => None,
+ };
+ let appendix_start = prev_ref
+ .catalog
+ .appendix_offset(prev_ref.archive_name.as_bytes())?;
+ (appendix_start, parent)
+ } else {
+ (None, None)
+ };
+
let mut archiver = Archiver {
feature_flags,
fs_feature_flags,
@@ -212,13 +236,42 @@ where
file_copy_buffer: vec::undefined(4 * 1024 * 1024),
previous_ref: options.previous_ref,
forced_boundaries,
- inject: (0, Vec::new()),
+ inject: (pxar::encoder::AppendixRefOffset::default(), Vec::new()),
+ prev_appendix: appendix_start,
};
+ if let Some(ref mut catalog) = archiver.catalog {
+ if let Some(ref archive_name) = options.archive_name {
+ catalog
+ .lock()
+ .unwrap()
+ .start_archive(archive_name.as_c_str())?;
+ }
+ }
+
archiver
- .archive_dir_contents(&mut encoder, source_dir, true)
+ .archive_dir_contents(&mut encoder, source_dir, prev_cat_parent.as_ref(), true)
.await?;
- encoder.finish().await?;
+
+ if archiver.inject.1.len() > 0 {
+ let (appendix_offset, appendix_size) = archiver.add_appendix(&mut encoder).await?;
+ encoder
+ .finish(Some((appendix_offset, appendix_size)))
+ .await?;
+ if let Some(catalog) = archiver.catalog {
+ if options.archive_name.is_some() {
+ catalog.lock().unwrap().end_archive(Some(appendix_offset))?;
+ }
+ }
+ } else {
+ encoder.finish(None).await?;
+ if let Some(ref mut catalog) = archiver.catalog {
+ if options.archive_name.is_some() {
+ catalog.lock().unwrap().end_archive(None)?;
+ }
+ }
+ }
+
Ok(())
}
@@ -247,6 +300,7 @@ impl Archiver {
&'a mut self,
encoder: &'a mut Encoder<'b, T>,
mut dir: Dir,
+ prev_cat_parent: Option<&'a DirEntry>,
is_root: bool,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
@@ -269,6 +323,13 @@ impl Archiver {
let old_path = std::mem::take(&mut self.path);
+ let mut prev_catalog_dir_entries = Vec::new();
+ if let Some(ref mut prev_ref) = self.previous_ref {
+ if let Some(ref parent) = prev_cat_parent {
+ prev_catalog_dir_entries = prev_ref.catalog.read_dir(parent)?;
+ }
+ }
+
for file_entry in file_list {
let file_name = file_entry.name.to_bytes();
@@ -280,9 +341,15 @@ impl Archiver {
(self.callback)(&file_entry.path)?;
self.path = file_entry.path;
- self.add_entry(encoder, dir_fd, &file_entry.name, &file_entry.stat)
- .await
- .map_err(|err| self.wrap_err(err))?;
+ self.add_entry(
+ encoder,
+ dir_fd,
+ &file_entry.name,
+ &file_entry.stat,
+ &prev_catalog_dir_entries,
+ )
+ .await
+ .map_err(|err| self.wrap_err(err))?;
}
self.path = old_path;
self.entry_counter = entry_counter;
@@ -529,12 +596,171 @@ impl Archiver {
Ok(())
}
+ async fn add_appendix<T: SeqWrite + Send>(
+ &mut self,
+ encoder: &mut Encoder<'_, T>,
+ ) -> Result<(pxar::encoder::AppendixStartOffset, u64), Error> {
+ let total = self
+ .inject
+ .1
+ .iter()
+ .fold(0, |sum, inject| sum + inject.end());
+ let appendix_offset = encoder.add_appendix(total).await?;
+ let mut boundaries = self.forced_boundaries.lock().unwrap();
+ let mut position = unsafe { encoder.position_add(0) };
+
+ // Inject reused chunks in patches of 128 to not exceed upload post req size limit
+ for injects in self.inject.1.chunks(128) {
+ let size = injects
+ .iter()
+ .fold(0, |sum, inject| sum + inject.end() as usize);
+ let inject_chunks = InjectChunks {
+ boundary: position,
+ chunks: injects.to_vec(),
+ size,
+ };
+ boundaries.push_back(inject_chunks);
+ position = unsafe { encoder.position_add(size as u64) };
+ }
+
+ Ok((appendix_offset, total))
+ }
+
+ fn digest_sequence_contained(
+ indices: &[DynamicEntry],
+ appendix_indices: &[DynamicEntry],
+ ) -> Option<u64> {
+ let digest = if let Some(first) = indices.first() {
+ first.digest()
+ } else {
+ return None;
+ };
+
+ let mut appendix_offset = 0;
+ let mut appendix_iter = appendix_indices.iter();
+ while let Some(position) = appendix_iter.position(|e| {
+ appendix_offset += e.end();
+ e.digest() == digest
+ }) {
+ if indices.len() + position > appendix_indices.len() {
+ // There are not enough remaining chunks, so this will never match
+ return None;
+ }
+
+ // Skip first digest comparison, as this is already covered
+ for (ind, chunk) in indices.iter().skip(1).enumerate() {
+ if chunk.digest() != appendix_indices[ind + position].digest() {
+ // Not all chunks in the sequence matched,
+ // try with next position is there is one
+ break;
+ }
+ }
+ // Remove added value of the while loop again
+ appendix_offset -= appendix_indices[position].end();
+ // All chunks matched the sequence
+ return Some(appendix_offset);
+ }
+
+ None
+ }
+
+ async fn reuse_if_metadata_unchanged<T: SeqWrite + Send>(
+ &mut self,
+ encoder: &mut Encoder<'_, T>,
+ c_file_name: &CStr,
+ metadata: &Metadata,
+ stat: &FileStat,
+ catalog_entries: &[DirEntry],
+ ) -> Result<bool, Error> {
+ let prev_ref = match self.previous_ref {
+ None => return Ok(false),
+ Some(ref mut prev_ref) => prev_ref,
+ };
+
+ let catalog_entry = catalog_entries
+ .iter()
+ .find(|entry| entry.name == c_file_name.to_bytes());
+
+ let (size, ctime, start_offset) = match catalog_entry {
+ Some(&DirEntry {
+ attr:
+ DirEntryAttribute::File {
+ size,
+ mtime: _,
+ extension: Some(CatalogV2Extension { ctime, file_offset }),
+ },
+ ..
+ }) => (size, ctime, file_offset.raw()),
+ Some(&DirEntry {
+ attr:
+ DirEntryAttribute::AppendixRef {
+ size,
+ mtime: _,
+ ctime,
+ appendix_ref_offset,
+ },
+ ..
+ }) => (
+ size,
+ ctime,
+ self.prev_appendix.unwrap().raw() + appendix_ref_offset.raw(),
+ ),
+ // The entry type found in the catalog is not a regular file,
+ // do not reuse entry but rather encode it again.
+ _ => return Ok(false),
+ };
+
+ let file_size = stat.st_size as u64;
+ if ctime == stat.st_ctime && size == file_size {
+ // Since ctime is unchanged, use current metadata size to calculate size and thereby
+ // end offset for the file payload in the reference archive.
+ // This is required to find the existing indexes to be included in the new index file.
+ let mut bytes = pxar::encoder::Encoder::<SeqSink>::byte_len(c_file_name, metadata)?;
+ // payload header size
+ bytes += std::mem::size_of::<pxar::format::Header>() as u64;
+
+ let end_offset = start_offset + bytes + file_size;
+ let (indices, padding_start) = prev_ref.index.indices(start_offset, end_offset)?;
+
+ let appendix_ref_offset = if let Some(off) = Archiver::digest_sequence_contained(&indices, &self.inject.1) {
+ let appendix_ref_offset = pxar::encoder::AppendixRefOffset::default();
+ appendix_ref_offset.add(off + padding_start)
+ } else {
+ let current_end_offset = self.inject.0;
+ for chunk in indices.into_iter() {
+ self.inject.0 = self.inject.0.add(chunk.end());
+ self.inject.1.push(chunk);
+ }
+ current_end_offset.add(padding_start)
+ };
+
+ let file_name: &Path = OsStr::from_bytes(c_file_name.to_bytes()).as_ref();
+ self.add_appendix_ref(encoder, file_name, appendix_ref_offset, file_size)
+ .await?;
+
+ if let Some(ref catalog) = self.catalog {
+ catalog.lock().unwrap().add_appendix_ref(
+ c_file_name,
+ file_size,
+ stat.st_mtime,
+ stat.st_ctime,
+ appendix_ref_offset,
+ )?;
+ }
+
+ return Ok(true);
+ }
+
+ Ok(false)
+ }
+
async fn add_entry<T: SeqWrite + Send>(
&mut self,
encoder: &mut Encoder<'_, T>,
parent: RawFd,
c_file_name: &CStr,
stat: &FileStat,
+ prev_cat_entries: &[DirEntry],
) -> Result<(), Error> {
use pxar::format::mode;
@@ -595,6 +821,20 @@ impl Archiver {
}
let file_size = stat.st_size as u64;
+ if file_size > MAX_FILE_SIZE
+ && self
+ .reuse_if_metadata_unchanged(
+ encoder,
+ c_file_name,
+ &metadata,
+ stat,
+ prev_cat_entries,
+ )
+ .await?
+ {
+ return Ok(());
+ }
+
let offset: LinkOffset = self
.add_regular_file(encoder, fd, file_name, &metadata, file_size)
.await?;
@@ -622,8 +862,15 @@ impl Archiver {
if let Some(ref catalog) = self.catalog {
catalog.lock().unwrap().start_directory(c_file_name)?;
}
+ let prev_cat_entry = prev_cat_entries.iter().find(|entry| match entry {
+ DirEntry {
+ name,
+ attr: DirEntryAttribute::Directory { .. },
+ } => name == c_file_name.to_bytes(),
+ _ => false,
+ });
let result = self
- .add_directory(encoder, dir, c_file_name, &metadata, stat)
+ .add_directory(encoder, dir, c_file_name, &metadata, stat, prev_cat_entry)
.await;
if let Some(ref catalog) = self.catalog {
catalog.lock().unwrap().end_directory()?;
@@ -680,6 +927,7 @@ impl Archiver {
dir_name: &CStr,
metadata: &Metadata,
stat: &FileStat,
+ prev_cat_parent: Option<&DirEntry>,
) -> Result<(), Error> {
let dir_name = OsStr::from_bytes(dir_name.to_bytes());
@@ -706,14 +954,15 @@ impl Archiver {
log::info!("skipping mount point: {:?}", self.path);
Ok(())
} else {
- self.archive_dir_contents(&mut encoder, dir, false).await
+ self.archive_dir_contents(&mut encoder, dir, prev_cat_parent, false)
+ .await
};
self.fs_magic = old_fs_magic;
self.fs_feature_flags = old_fs_feature_flags;
self.current_st_dev = old_st_dev;
- encoder.finish().await?;
+ encoder.finish(None).await?;
result
}
diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs
index 5945ae5d..cbdd9f43 100644
--- a/proxmox-backup-client/src/main.rs
+++ b/proxmox-backup-client/src/main.rs
@@ -48,7 +48,7 @@ use pbs_client::{
FixedChunkStream, HttpClient, PxarBackupStream, RemoteChunkReader, UploadOptions,
BACKUP_SOURCE_SCHEMA,
};
-use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
+use pbs_datastore::catalog::{CatalogReader, CatalogWriter};
use pbs_datastore::chunk_store::verify_chunk_size;
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader};
use pbs_datastore::fixed_index::FixedIndexReader;
@@ -1005,10 +1005,6 @@ async fn create_backup(
let catalog = catalog.as_ref().unwrap();
log_file("directory", &filename, &target);
- catalog
- .lock()
- .unwrap()
- .start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?;
let pxar_options = pbs_client::pxar::PxarCreateOptions {
device_set: devices.clone(),
@@ -1016,6 +1012,7 @@ async fn create_backup(
entries_max: entries_max as usize,
skip_lost_and_found,
previous_ref: None,
+ archive_name: Some(std::ffi::CString::new(target.as_str())?),
};
let upload_options = UploadOptions {
@@ -1036,7 +1033,6 @@ async fn create_backup(
)
.await?;
manifest.add_file(target, stats.size, stats.csum, crypto.mode)?;
- catalog.lock().unwrap().end_directory()?;
}
(BackupSpecificationType::IMAGE, false) => {
log_file("image", &filename, &target);
diff --git a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs
index 5eff673e..734fa976 100644
--- a/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs
+++ b/proxmox-restore-daemon/src/proxmox_restore_daemon/api.rs
@@ -359,6 +359,7 @@ fn extract(
patterns,
skip_lost_and_found: false,
previous_ref: None,
+ archive_name: None,
};
let pxar_writer = TokioWriter::new(writer);
diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs
index c019f3e4..6d365115 100644
--- a/pxar-bin/src/main.rs
+++ b/pxar-bin/src/main.rs
@@ -371,6 +371,7 @@ async fn create_archive(
patterns,
skip_lost_and_found: false,
previous_ref: None,
+ archive_name: None,
};
let writer = pxar::encoder::sync::StandardWriter::new(writer);
diff --git a/src/tape/file_formats/snapshot_archive.rs b/src/tape/file_formats/snapshot_archive.rs
index 252384b5..4bbf4727 100644
--- a/src/tape/file_formats/snapshot_archive.rs
+++ b/src/tape/file_formats/snapshot_archive.rs
@@ -88,7 +88,7 @@ pub fn tape_write_snapshot_archive<'a>(
proxmox_lang::io_bail!("file '{}' shrunk while reading", filename);
}
}
- encoder.finish()?;
+ encoder.finish(None)?;
Ok(())
});
--
2.39.2
More information about the pbs-devel
mailing list