[pbs-devel] [PATCH v2 proxmox-backup 08/31] client: backup writer: allow push uploading index and chunks
Christian Ebner
c.ebner at proxmox.com
Thu Aug 1 09:43:40 CEST 2024
Add a method `upload_index_chunk_info` to be used for uploading an
existing index and the corresponding chunk stream.
Instead of taking an input stream of raw bytes as the
`upload_stream`, this takes a stream of `ChunkInfo` object provided
by the local chunk reader of the sync jobs source.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
changes since version 1:
- no changes
pbs-client/src/backup_writer.rs | 106 ++++++++++++++++++++++++++++++++
1 file changed, 106 insertions(+)
diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs
index b71157164..dbb7db0e8 100644
--- a/pbs-client/src/backup_writer.rs
+++ b/pbs-client/src/backup_writer.rs
@@ -288,6 +288,112 @@ impl BackupWriter {
.await
}
+ /// Upload chunks and index
+ pub async fn upload_index_chunk_info(
+ &self,
+ archive_name: &str,
+ stream: impl Stream<Item = Result<ChunkInfo, Error>>,
+ options: UploadOptions,
+ known_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
+ ) -> Result<BackupStats, Error> {
+ let mut param = json!({ "archive-name": archive_name });
+ let prefix = if let Some(size) = options.fixed_size {
+ param["size"] = size.into();
+ "fixed"
+ } else {
+ "dynamic"
+ };
+
+ if options.encrypt && self.crypt_config.is_none() {
+ bail!("requested encryption without a crypt config");
+ }
+
+ let wid = self
+ .h2
+ .post(&format!("{prefix}_index"), Some(param))
+ .await?
+ .as_u64()
+ .unwrap();
+
+ let total_chunks = Arc::new(AtomicUsize::new(0));
+ let known_chunk_count = Arc::new(AtomicUsize::new(0));
+
+ let stream_len = Arc::new(AtomicUsize::new(0));
+ let compressed_stream_len = Arc::new(AtomicU64::new(0));
+ let reused_len = Arc::new(AtomicUsize::new(0));
+
+ let counters = UploadStatsCounters {
+ injected_chunk_count: Arc::new(AtomicUsize::new(0)),
+ known_chunk_count: known_chunk_count.clone(),
+ total_chunks: total_chunks.clone(),
+ compressed_stream_len: compressed_stream_len.clone(),
+ injected_len: Arc::new(AtomicUsize::new(0)),
+ reused_len: reused_len.clone(),
+ stream_len: stream_len.clone(),
+ };
+
+ let is_fixed_chunk_size = prefix == "fixed";
+
+ let index_csum = Arc::new(Mutex::new(Some(Sha256::new())));
+ let index_csum_2 = index_csum.clone();
+
+ let stream = stream
+ .and_then(move |chunk_info| {
+ total_chunks.fetch_add(1, Ordering::SeqCst);
+ reused_len.fetch_add(chunk_info.chunk_len as usize, Ordering::SeqCst);
+ let offset = stream_len.fetch_add(chunk_info.chunk_len as usize, Ordering::SeqCst);
+
+ let end_offset = offset as u64 + chunk_info.chunk_len;
+ let mut guard = index_csum.lock().unwrap();
+ let csum = guard.as_mut().unwrap();
+ if !is_fixed_chunk_size {
+ csum.update(&end_offset.to_le_bytes());
+ }
+ csum.update(&chunk_info.digest);
+
+ let mut known_chunks = known_chunks.lock().unwrap();
+ if known_chunks.contains(&chunk_info.digest) {
+ known_chunk_count.fetch_add(1, Ordering::SeqCst);
+ future::ok(MergedChunkInfo::Known(vec![(
+ chunk_info.offset,
+ chunk_info.digest,
+ )]))
+ } else {
+ known_chunks.insert(chunk_info.digest);
+ future::ok(MergedChunkInfo::New(chunk_info))
+ }
+ })
+ .merge_known_chunks();
+
+ let upload_stats = Self::upload_merged_chunk_stream(
+ self.h2.clone(),
+ wid,
+ prefix,
+ stream,
+ index_csum_2,
+ counters,
+ )
+ .await?;
+
+ let param = json!({
+ "wid": wid ,
+ "chunk-count": upload_stats.chunk_count,
+ "size": upload_stats.size,
+ "csum": hex::encode(upload_stats.csum),
+ });
+ let _value = self
+ .h2
+ .post(&format!("{prefix}_close"), Some(param))
+ .await?;
+
+ Ok(BackupStats {
+ size: upload_stats.size as u64,
+ csum: upload_stats.csum,
+ duration: upload_stats.duration,
+ chunk_count: upload_stats.chunk_count as u64,
+ })
+ }
+
pub async fn upload_stream(
&self,
archive_name: &str,
--
2.39.2
More information about the pbs-devel
mailing list