[pbs-devel] [PATCH v4 proxmox 03/31] client: backup writer: allow push uploading index and chunks
Christian Ebner
c.ebner at proxmox.com
Thu Oct 17 15:26:48 CEST 2024
Add a method `upload_index_chunk_info` to be used for uploading an
existing index and the corresponding chunk stream.
Instead of taking an input stream of raw bytes as the
`upload_stream`, this takes a stream of `MergedChunkInfo` object
provided by the local chunk reader of the sync jobs source.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
changes since version 3:
- known chunks are now handled by the caller (so it can be avoided to
read them)
- adapt for new upload stat counters
pbs-client/src/backup_writer.rs | 95 +++++++++++++++++++++++++++++++++
pbs-client/src/lib.rs | 1 +
2 files changed, 96 insertions(+)
diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs
index 27d1c73b1..573f48f54 100644
--- a/pbs-client/src/backup_writer.rs
+++ b/pbs-client/src/backup_writer.rs
@@ -259,6 +259,101 @@ impl BackupWriter {
.await
}
+ /// Upload chunks and index
+ pub async fn upload_index_chunk_info(
+ &self,
+ archive_name: &str,
+ stream: impl Stream<Item = Result<MergedChunkInfo, Error>>,
+ options: UploadOptions,
+ ) -> Result<BackupStats, Error> {
+ let mut param = json!({ "archive-name": archive_name });
+ let prefix = if let Some(size) = options.fixed_size {
+ param["size"] = size.into();
+ "fixed"
+ } else {
+ "dynamic"
+ };
+
+ if options.encrypt && self.crypt_config.is_none() {
+ bail!("requested encryption without a crypt config");
+ }
+
+ let wid = self
+ .h2
+ .post(&format!("{prefix}_index"), Some(param))
+ .await?
+ .as_u64()
+ .unwrap();
+
+ let mut counters = UploadCounters::new();
+ let counters_readonly = counters.clone();
+
+ let is_fixed_chunk_size = prefix == "fixed";
+
+ let index_csum = Arc::new(Mutex::new(Some(Sha256::new())));
+ let index_csum_2 = index_csum.clone();
+
+ let stream = stream
+ .and_then(move |mut merged_chunk_info| {
+ match merged_chunk_info {
+ MergedChunkInfo::New(ref chunk_info) => {
+ counters.inc_total_chunks(1);
+ let chunk_len = chunk_info.chunk_len;
+ let offset = counters.inc_total_stream_len(chunk_len as usize);
+ let end_offset = offset as u64 + chunk_len;
+ let mut guard = index_csum.lock().unwrap();
+ let csum = guard.as_mut().unwrap();
+ if !is_fixed_chunk_size {
+ csum.update(&end_offset.to_le_bytes());
+ }
+ csum.update(&chunk_info.digest);
+ }
+ MergedChunkInfo::Known(ref mut known_chunk_list) => {
+ for (chunk_len, digest) in known_chunk_list {
+ counters.inc_total_chunks(1);
+ counters.inc_known_chunks(1);
+ counters.inc_reused_stream_len(*chunk_len as usize);
+ let offset = counters.inc_total_stream_len(*chunk_len as usize);
+ let end_offset = offset as u64 + *chunk_len;
+ let mut guard = index_csum.lock().unwrap();
+ let csum = guard.as_mut().unwrap();
+ if !is_fixed_chunk_size {
+ csum.update(&end_offset.to_le_bytes());
+ }
+ csum.update(digest);
+ // Replace size with offset, expected by further stream
+ *chunk_len = offset as u64;
+ }
+ }
+ }
+ future::ok(merged_chunk_info)
+ })
+ .merge_known_chunks();
+
+ let upload_stats = Self::upload_merged_chunk_stream(
+ self.h2.clone(),
+ wid,
+ prefix,
+ stream,
+ index_csum_2,
+ counters_readonly,
+ )
+ .await?;
+
+ let param = json!({
+ "wid": wid ,
+ "chunk-count": upload_stats.chunk_count,
+ "size": upload_stats.size,
+ "csum": hex::encode(upload_stats.csum),
+ });
+ let _value = self
+ .h2
+ .post(&format!("{prefix}_close"), Some(param))
+ .await?;
+
+ Ok(upload_stats.to_backup_stats())
+ }
+
pub async fn upload_stream(
&self,
archive_name: &str,
diff --git a/pbs-client/src/lib.rs b/pbs-client/src/lib.rs
index b875347bb..4b8e4e4f4 100644
--- a/pbs-client/src/lib.rs
+++ b/pbs-client/src/lib.rs
@@ -9,6 +9,7 @@ pub mod tools;
mod inject_reused_chunks;
mod merge_known_chunks;
+pub use merge_known_chunks::MergedChunkInfo;
pub mod pipe_to_stream;
mod http_client;
--
2.39.5
More information about the pbs-devel
mailing list