[pbs-devel] [PATCH v2 proxmox-backup 6/6] server: push: prefix log messages and add additional logging
Christian Ebner
c.ebner at proxmox.com
Mon Jan 20 11:51:04 CET 2025
Pushing groups and therefore also snapshots in parallel leads to
unordered log outputs, making it mostly impossible to relate a log
message to a backup snapshot/group.
Therefore, prefix push job log messages by the corresponding group or
snapshot.
Also, be more verbose for push syncs, adding additional log output
for the groups, snapshots and archives being pushed.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
changes since version 1:
- not present in pervious version
src/server/push.rs | 49 ++++++++++++++++++++++++++++++++++++----------
1 file changed, 39 insertions(+), 10 deletions(-)
diff --git a/src/server/push.rs b/src/server/push.rs
index b3de214b4..4827dbe92 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -25,6 +25,8 @@ use pbs_datastore::index::IndexFile;
use pbs_datastore::read_chunk::AsyncReadChunk;
use pbs_datastore::{DataStore, StoreProgress};
+use proxmox_human_byte::HumanByte;
+
use super::sync::{
check_namespace_depth_limit, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason,
SyncSource, SyncStats,
@@ -701,6 +703,7 @@ pub(crate) async fn push_group(
group: &BackupGroup,
store_progress: Arc<Mutex<StoreProgress>>,
) -> Result<SyncStats, Error> {
+ let prefix = format!("Group {group}");
let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced);
let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast);
@@ -746,11 +749,11 @@ pub(crate) async fn push_group(
.collect();
if already_synced_skip_info.count > 0 {
- info!("{already_synced_skip_info}");
+ info!("{prefix}: {already_synced_skip_info}");
already_synced_skip_info.reset();
}
if transfer_last_skip_info.count > 0 {
- info!("{transfer_last_skip_info}");
+ info!("{prefix}: {transfer_last_skip_info}");
transfer_last_skip_info.reset();
}
@@ -760,6 +763,7 @@ pub(crate) async fn push_group(
let mut stats = SyncStats::default();
let mut fetch_previous_manifest = !target_snapshots.is_empty();
for (pos, source_snapshot) in snapshots.into_iter().enumerate() {
+ info!("Snapshot {source_snapshot}: start sync");
let result =
push_snapshot(params, namespace, &source_snapshot, fetch_previous_manifest).await;
fetch_previous_manifest = true;
@@ -768,10 +772,11 @@ pub(crate) async fn push_group(
local_progress.done_snapshots = pos as u64 + 1;
// Update done groups progress by other parallel running pushes
local_progress.done_groups = store_progress.lock().unwrap().done_groups;
- info!("Percentage done: {local_progress}");
// stop on error
let sync_stats = result?;
+ info!("Snapshot {source_snapshot}: sync done");
+ info!("Percentage done: {local_progress}");
stats.add(sync_stats);
}
@@ -782,7 +787,7 @@ pub(crate) async fn push_group(
}
if snapshot.protected {
info!(
- "Kept protected snapshot {name} on remote",
+ "{prefix}: Kept protected snapshot {name} on remote",
name = snapshot.backup
);
continue;
@@ -790,14 +795,14 @@ pub(crate) async fn push_group(
match forget_target_snapshot(params, &target_namespace, &snapshot.backup).await {
Ok(()) => {
info!(
- "Removed vanished snapshot {name} from remote",
+ "{prefix}: Removed vanished snapshot {name} from remote",
name = snapshot.backup
);
}
Err(err) => {
- warn!("Encountered errors: {err:#}");
+ warn!("{prefix}: Encountered errors: {err:#}");
warn!(
- "Failed to remove vanished snapshot {name} from remote!",
+ "{prefix}: Failed to remove vanished snapshot {name} from remote!",
name = snapshot.backup
);
}
@@ -825,6 +830,7 @@ pub(crate) async fn push_snapshot(
snapshot: &BackupDir,
fetch_previous_manifest: bool,
) -> Result<SyncStats, Error> {
+ let prefix = format!("Snapshot {snapshot}");
let mut stats = SyncStats::default();
let target_ns = params.map_to_target(namespace)?;
let backup_dir = params
@@ -840,8 +846,8 @@ pub(crate) async fn push_snapshot(
Ok((manifest, _raw_size)) => manifest,
Err(err) => {
// No manifest in snapshot or failed to read, warn and skip
- log::warn!("Encountered errors: {err:#}");
- log::warn!("Failed to load manifest for '{snapshot}'!");
+ warn!("{prefix}: Encountered errors: {err:#}");
+ warn!("{prefix}: Failed to load manifest!");
return Ok(stats);
}
};
@@ -863,7 +869,7 @@ pub(crate) async fn push_snapshot(
if fetch_previous_manifest {
match backup_writer.download_previous_manifest().await {
Ok(manifest) => previous_manifest = Some(Arc::new(manifest)),
- Err(err) => log::info!("Could not download previous manifest - {err}"),
+ Err(err) => info!("{prefix}: Could not download previous manifest - {err}"),
}
};
@@ -892,12 +898,21 @@ pub(crate) async fn push_snapshot(
path.push(&entry.filename);
if path.try_exists()? {
let archive_name = BackupArchiveName::from_path(&entry.filename)?;
+ info!("{prefix}: sync archive {archive_name}");
+ let prefix = format!("Snapshot {snapshot}: archive {archive_name}");
match archive_name.archive_type() {
ArchiveType::Blob => {
let file = std::fs::File::open(path.clone())?;
let backup_stats = backup_writer
.upload_blob(file, archive_name.as_ref())
.await?;
+ info!(
+ "{prefix}: uploaded {} ({}/s)",
+ HumanByte::from(backup_stats.size),
+ HumanByte::new_binary(
+ backup_stats.size as f64 / backup_stats.duration.as_secs_f64()
+ ),
+ );
stats.add(SyncStats {
chunk_count: backup_stats.chunk_count as usize,
bytes: backup_stats.size as usize,
@@ -927,6 +942,13 @@ pub(crate) async fn push_snapshot(
known_chunks.clone(),
)
.await?;
+ info!(
+ "{prefix}: uploaded {} ({}/s)",
+ HumanByte::from(sync_stats.bytes),
+ HumanByte::new_binary(
+ sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+ ),
+ );
stats.add(sync_stats);
}
ArchiveType::FixedIndex => {
@@ -952,6 +974,13 @@ pub(crate) async fn push_snapshot(
known_chunks.clone(),
)
.await?;
+ info!(
+ "{prefix}: uploaded {} ({}/s)",
+ HumanByte::from(sync_stats.bytes),
+ HumanByte::new_binary(
+ sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+ ),
+ );
stats.add(sync_stats);
}
}
--
2.39.5
More information about the pbs-devel
mailing list