[pbs-devel] [PATCH proxmox-backup v6 4/4] api: switch from task_log! macro to tracing

Gabriel Goller g.goller at proxmox.com
Wed Jun 26 15:10:33 CEST 2024


Import `proxmox-log` and substitute all `task_log!`
(and task_warn!, task_error!) invocations with tracing calls (info!,
warn!, etc..). Remove worker references where it isn't necessary
anymore.

Signed-off-by: Gabriel Goller <g.goller at proxmox.com>
---
 src/api2/admin/datastore.rs      |  26 ++--
 src/api2/config/acme.rs          |  21 +--
 src/api2/config/datastore.rs     |  16 +-
 src/api2/config/prune.rs         |  14 +-
 src/api2/node/apt.rs             |  13 +-
 src/api2/node/certificates.rs    |  67 ++++----
 src/api2/node/disks/directory.rs |  13 +-
 src/api2/node/disks/mod.rs       |  12 +-
 src/api2/node/disks/zfs.rs       |  31 ++--
 src/api2/node/mod.rs             |  11 +-
 src/api2/pull.rs                 |  37 ++---
 src/api2/tape/backup.rs          |  75 ++++-----
 src/api2/tape/drive.rs           | 152 +++++++-----------
 src/api2/tape/restore.rs         | 259 ++++++++++---------------------
 14 files changed, 262 insertions(+), 485 deletions(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 82221c503d16..8342078a162f 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -13,6 +13,7 @@ use hyper::{header, Body, Response, StatusCode};
 use serde::Deserialize;
 use serde_json::{json, Value};
 use tokio_stream::wrappers::ReceiverStream;
+use tracing::{info, warn};
 
 use proxmox_async::blocking::WrappedReaderStream;
 use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream};
@@ -26,7 +27,6 @@ use proxmox_sortable_macro::sortable;
 use proxmox_sys::fs::{
     file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
 };
-use proxmox_sys::{task_log, task_warn};
 use proxmox_time::CalendarEvent;
 
 use pxar::accessor::aio::Accessor;
@@ -909,9 +909,9 @@ pub fn verify(
                 )?
             };
             if !failed_dirs.is_empty() {
-                task_log!(worker, "Failed to verify the following snapshots/groups:");
+                info!("Failed to verify the following snapshots/groups:");
                 for dir in failed_dirs {
-                    task_log!(worker, "\t{}", dir);
+                    info!("\t{dir}");
                 }
                 bail!("verification failed - please check the log for details");
             }
@@ -1031,9 +1031,9 @@ pub fn prune(
         return Ok(json!(prune_result));
     }
 
-    let prune_group = move |worker: Arc<WorkerTask>| {
+    let prune_group = move |_worker: Arc<WorkerTask>| {
         if keep_all {
-            task_log!(worker, "No prune selection - keeping all files.");
+            info!("No prune selection - keeping all files.");
         } else {
             let mut opts = Vec::new();
             if !ns.is_root() {
@@ -1041,9 +1041,8 @@ pub fn prune(
             }
             crate::server::cli_keep_options(&mut opts, &keep_options);
 
-            task_log!(worker, "retention options: {}", opts.join(" "));
-            task_log!(
-                worker,
+            info!("retention options: {}", opts.join(" "));
+            info!(
                 "Starting prune on {} group \"{}\"",
                 print_store_and_ns(&store, &ns),
                 group.group(),
@@ -1060,7 +1059,7 @@ pub fn prune(
 
             let msg = format!("{}/{}/{timestamp} {mark}", group.ty, group.id);
 
-            task_log!(worker, "{msg}");
+            info!("{msg}");
 
             prune_result.push(PruneResult {
                 backup_type: group.ty,
@@ -1073,8 +1072,7 @@ pub fn prune(
 
             if !keep {
                 if let Err(err) = backup_dir.destroy(false) {
-                    task_warn!(
-                        worker,
+                    warn!(
                         "failed to remove dir {:?}: {}",
                         backup_dir.relative_path(),
                         err,
@@ -1098,7 +1096,7 @@ pub fn prune(
         )?;
         Ok(json!(upid))
     } else {
-        let worker = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
+        let (worker, _) = WorkerTask::new("prune", Some(worker_id), auth_id.to_string(), true)?;
         let result = prune_group(worker.clone());
         worker.log_result(&Ok(()));
         Ok(json!(result))
@@ -1161,9 +1159,7 @@ pub fn prune_datastore(
         Some(worker_id),
         auth_id.to_string(),
         to_stdout,
-        move |worker| {
-            crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
-        },
+        move |_worker| crate::server::prune_datastore(auth_id, prune_options, datastore, dry_run),
     )?;
 
     Ok(upid_str)
diff --git a/src/api2/config/acme.rs b/src/api2/config/acme.rs
index 0b6acc5053ab..35c817cc6009 100644
--- a/src/api2/config/acme.rs
+++ b/src/api2/config/acme.rs
@@ -9,12 +9,12 @@ use hex::FromHex;
 use lazy_static::lazy_static;
 use serde::{Deserialize, Serialize};
 use serde_json::{json, Value};
+use tracing::{info, warn};
 
 use proxmox_router::{
     http_bail, list_subdirs_api_method, Permission, Router, RpcEnvironment, SubdirMap,
 };
 use proxmox_schema::{api, param_bail};
-use proxmox_sys::{task_log, task_warn};
 
 use proxmox_acme::types::AccountData as AcmeAccountData;
 use proxmox_acme::Account;
@@ -240,10 +240,10 @@ fn register_account(
         Some(name.to_string()),
         auth_id.to_string(),
         true,
-        move |worker| async move {
+        move |_worker| async move {
             let mut client = AcmeClient::new(directory);
 
-            task_log!(worker, "Registering ACME account '{}'...", &name);
+            info!("Registering ACME account '{}'...", &name);
 
             let account = do_register_account(
                 &mut client,
@@ -255,11 +255,7 @@ fn register_account(
             )
             .await?;
 
-            task_log!(
-                worker,
-                "Registration successful, account URL: {}",
-                account.location
-            );
+            info!("Registration successful, account URL: {}", account.location);
 
             Ok(())
         },
@@ -354,7 +350,7 @@ pub fn deactivate_account(
         Some(name.to_string()),
         auth_id.to_string(),
         true,
-        move |worker| async move {
+        move |_worker| async move {
             match AcmeClient::load(&name)
                 .await?
                 .update_account(&json!({"status": "deactivated"}))
@@ -363,12 +359,7 @@ pub fn deactivate_account(
                 Ok(_account) => (),
                 Err(err) if !force => return Err(err),
                 Err(err) => {
-                    task_warn!(
-                        worker,
-                        "error deactivating account {}, proceedeing anyway - {}",
-                        name,
-                        err,
-                    );
+                    warn!("error deactivating account {name}, proceedeing anyway - {err}");
                 }
             }
             crate::config::acme::mark_account_deactivated(&name)?;
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index 6b742acbc42a..3f7b06e0ac9b 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -4,11 +4,11 @@ use ::serde::{Deserialize, Serialize};
 use anyhow::Error;
 use hex::FromHex;
 use serde_json::Value;
+use tracing::warn;
 
 use proxmox_router::{http_bail, Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::{api, param_bail, ApiType};
 use proxmox_section_config::SectionConfigData;
-use proxmox_sys::{task_warn, WorkerTaskContext};
 use proxmox_uuid::Uuid;
 
 use pbs_api_types::{
@@ -70,7 +70,6 @@ pub(crate) fn do_create_datastore(
     _lock: BackupLockGuard,
     mut config: SectionConfigData,
     datastore: DataStoreConfig,
-    worker: Option<&dyn WorkerTaskContext>,
 ) -> Result<(), Error> {
     let path: PathBuf = datastore.path.clone().into();
 
@@ -84,7 +83,6 @@ pub(crate) fn do_create_datastore(
         path,
         backup_user.uid,
         backup_user.gid,
-        worker,
         tuning.sync_level.unwrap_or_default(),
     )?;
 
@@ -155,11 +153,11 @@ pub fn create_datastore(
         Some(config.name.to_string()),
         auth_id.to_string(),
         to_stdout,
-        move |worker| {
-            do_create_datastore(lock, section_config, config, Some(&worker))?;
+        move |_worker| {
+            do_create_datastore(lock, section_config, config)?;
 
             if let Some(prune_job_config) = prune_job_config {
-                do_create_prune_job(prune_job_config, Some(&worker))
+                do_create_prune_job(prune_job_config)
             } else {
                 Ok(())
             }
@@ -528,8 +526,8 @@ pub async fn delete_datastore(
         Some(name.clone()),
         auth_id.to_string(),
         to_stdout,
-        move |worker| {
-            pbs_datastore::DataStore::destroy(&name, destroy_data, &worker)?;
+        move |_worker| {
+            pbs_datastore::DataStore::destroy(&name, destroy_data)?;
 
             // ignore errors
             let _ = jobstate::remove_state_file("prune", &name);
@@ -538,7 +536,7 @@ pub async fn delete_datastore(
             if let Err(err) =
                 proxmox_async::runtime::block_on(crate::server::notify_datastore_removed())
             {
-                task_warn!(worker, "failed to notify after datastore removal: {err}");
+                warn!("failed to notify after datastore removal: {err}");
             }
 
             Ok(())
diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs
index 4f7ce39cd50c..4f3d94af2b1b 100644
--- a/src/api2/config/prune.rs
+++ b/src/api2/config/prune.rs
@@ -1,7 +1,5 @@
 use anyhow::Error;
 use hex::FromHex;
-use proxmox_sys::task_log;
-use proxmox_sys::WorkerTaskContext;
 use serde::{Deserialize, Serialize};
 use serde_json::Value;
 
@@ -15,6 +13,7 @@ use pbs_api_types::{
 use pbs_config::prune;
 
 use pbs_config::CachedUserInfo;
+use tracing::info;
 
 #[api(
     input: {
@@ -58,10 +57,7 @@ pub fn list_prune_jobs(
     Ok(list)
 }
 
-pub fn do_create_prune_job(
-    config: PruneJobConfig,
-    worker: Option<&dyn WorkerTaskContext>,
-) -> Result<(), Error> {
+pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> {
     let _lock = prune::lock_config()?;
 
     let (mut section_config, _digest) = prune::config()?;
@@ -76,9 +72,7 @@ pub fn do_create_prune_job(
 
     crate::server::jobstate::create_state_file("prunejob", &config.id)?;
 
-    if let Some(worker) = worker {
-        task_log!(worker, "Prune job created: {}", config.id);
-    }
+    info!("Prune job created: {}", config.id);
 
     Ok(())
 }
@@ -108,7 +102,7 @@ pub fn create_prune_job(
 
     user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_MODIFY, true)?;
 
-    do_create_prune_job(config, None)
+    do_create_prune_job(config)
 }
 
 #[api(
diff --git a/src/api2/node/apt.rs b/src/api2/node/apt.rs
index 0ae353d5bd5d..9ac24d8181f4 100644
--- a/src/api2/node/apt.rs
+++ b/src/api2/node/apt.rs
@@ -1,6 +1,7 @@
 use anyhow::{bail, format_err, Error};
 use serde_json::{json, Value};
 use std::os::unix::prelude::OsStrExt;
+use tracing::{info, warn};
 
 use proxmox_router::{
     list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
@@ -83,9 +84,9 @@ fn read_and_update_proxy_config() -> Result<Option<ProxyConfig>, Error> {
     Ok(proxy_config)
 }
 
-fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
+fn do_apt_update(quiet: bool) -> Result<(), Error> {
     if !quiet {
-        worker.log_message("starting apt-get update")
+        info!("starting apt-get update");
     }
 
     read_and_update_proxy_config()?;
@@ -99,7 +100,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
         .map_err(|err| format_err!("failed to execute {:?} - {}", command, err))?;
 
     if !quiet {
-        worker.log_message(String::from_utf8(output.stdout)?);
+        info!("{}", String::from_utf8(output.stdout)?);
     }
 
     // TODO: improve run_command to allow outputting both, stderr and stdout
@@ -114,7 +115,7 @@ fn do_apt_update(worker: &WorkerTask, quiet: bool) -> Result<(), Error> {
                     }
                 })
                 .unwrap_or_else(|_| String::from("non utf8 error message (suppressed)"));
-            worker.log_warning(msg);
+            warn!(msg);
         } else {
             bail!("terminated by signal");
         }
@@ -160,8 +161,8 @@ pub fn apt_update_database(
     let auth_id = rpcenv.get_auth_id().unwrap();
     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
 
-    let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |worker| {
-        do_apt_update(&worker, quiet)?;
+    let upid_str = WorkerTask::new_thread("aptupdate", None, auth_id, to_stdout, move |_worker| {
+        do_apt_update(quiet)?;
 
         let mut cache = apt::update_cache()?;
 
diff --git a/src/api2/node/certificates.rs b/src/api2/node/certificates.rs
index 6dfcd0c0626e..9f838b664e04 100644
--- a/src/api2/node/certificates.rs
+++ b/src/api2/node/certificates.rs
@@ -5,16 +5,17 @@ use anyhow::{bail, format_err, Error};
 use openssl::pkey::PKey;
 use openssl::x509::X509;
 use serde::{Deserialize, Serialize};
+use tracing::info;
 
 use proxmox_router::list_subdirs_api_method;
 use proxmox_router::SubdirMap;
 use proxmox_router::{Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
-use proxmox_sys::{task_log, task_warn};
 
 use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_MODIFY};
 use pbs_buildcfg::configdir;
 use pbs_tools::cert;
+use tracing::warn;
 
 use crate::acme::AcmeClient;
 use crate::api2::types::AcmeDomain;
@@ -301,10 +302,7 @@ async fn order_certificate(
     };
 
     if domains.is_empty() {
-        task_log!(
-            worker,
-            "No domains configured to be ordered from an ACME server."
-        );
+        info!("No domains configured to be ordered from an ACME server.");
         return Ok(None);
     }
 
@@ -312,11 +310,11 @@ async fn order_certificate(
 
     let mut acme = node_config.acme_client().await?;
 
-    task_log!(worker, "Placing ACME order");
+    info!("Placing ACME order");
     let order = acme
         .new_order(domains.iter().map(|d| d.domain.to_ascii_lowercase()))
         .await?;
-    task_log!(worker, "Order URL: {}", order.location);
+    info!("Order URL: {}", order.location);
 
     let identifiers: Vec<String> = order
         .data
@@ -328,7 +326,7 @@ async fn order_certificate(
         .collect();
 
     for auth_url in &order.data.authorizations {
-        task_log!(worker, "Getting authorization details from '{}'", auth_url);
+        info!("Getting authorization details from '{auth_url}'");
         let mut auth = acme.get_authorization(auth_url).await?;
 
         let domain = match &mut auth.identifier {
@@ -336,43 +334,37 @@ async fn order_certificate(
         };
 
         if auth.status == Status::Valid {
-            task_log!(worker, "{} is already validated!", domain);
+            info!("{domain} is already validated!");
             continue;
         }
 
-        task_log!(worker, "The validation for {} is pending", domain);
+        info!("The validation for {domain} is pending");
         let domain_config: &AcmeDomain = get_domain_config(&domain)?;
         let plugin_id = domain_config.plugin.as_deref().unwrap_or("standalone");
         let mut plugin_cfg =
             crate::acme::get_acme_plugin(&plugins, plugin_id)?.ok_or_else(|| {
-                format_err!("plugin '{}' for domain '{}' not found!", plugin_id, domain)
+                format_err!("plugin '{plugin_id}' for domain '{domain}' not found!")
             })?;
 
-        task_log!(worker, "Setting up validation plugin");
+        info!("Setting up validation plugin");
         let validation_url = plugin_cfg
             .setup(&mut acme, &auth, domain_config, Arc::clone(&worker))
             .await?;
 
-        let result = request_validation(&worker, &mut acme, auth_url, validation_url).await;
+        let result = request_validation(&mut acme, auth_url, validation_url).await;
 
         if let Err(err) = plugin_cfg
             .teardown(&mut acme, &auth, domain_config, Arc::clone(&worker))
             .await
         {
-            task_warn!(
-                worker,
-                "Failed to teardown plugin '{}' for domain '{}' - {}",
-                plugin_id,
-                domain,
-                err
-            );
+            warn!("Failed to teardown plugin '{plugin_id}' for domain '{domain}' - {err}");
         }
 
         result?;
     }
 
-    task_log!(worker, "All domains validated");
-    task_log!(worker, "Creating CSR");
+    info!("All domains validated");
+    info!("Creating CSR");
 
     let csr = proxmox_acme::util::Csr::generate(&identifiers, &Default::default())?;
     let mut finalize_error_cnt = 0u8;
@@ -385,7 +377,7 @@ async fn order_certificate(
 
         match order.status {
             Status::Pending => {
-                task_log!(worker, "still pending, trying to finalize anyway");
+                info!("still pending, trying to finalize anyway");
                 let finalize = order
                     .finalize
                     .as_deref()
@@ -400,7 +392,7 @@ async fn order_certificate(
                 tokio::time::sleep(Duration::from_secs(5)).await;
             }
             Status::Ready => {
-                task_log!(worker, "order is ready, finalizing");
+                info!("order is ready, finalizing");
                 let finalize = order
                     .finalize
                     .as_deref()
@@ -409,18 +401,18 @@ async fn order_certificate(
                 tokio::time::sleep(Duration::from_secs(5)).await;
             }
             Status::Processing => {
-                task_log!(worker, "still processing, trying again in 30 seconds");
+                info!("still processing, trying again in 30 seconds");
                 tokio::time::sleep(Duration::from_secs(30)).await;
             }
             Status::Valid => {
-                task_log!(worker, "valid");
+                info!("valid");
                 break;
             }
             other => bail!("order status: {:?}", other),
         }
     }
 
-    task_log!(worker, "Downloading certificate");
+    info!("Downloading certificate");
     let certificate = acme
         .get_certificate(
             order
@@ -437,15 +429,14 @@ async fn order_certificate(
 }
 
 async fn request_validation(
-    worker: &WorkerTask,
     acme: &mut AcmeClient,
     auth_url: &str,
     validation_url: &str,
 ) -> Result<(), Error> {
-    task_log!(worker, "Triggering validation");
+    info!("Triggering validation");
     acme.request_challenge_validation(validation_url).await?;
 
-    task_log!(worker, "Sleeping for 5 seconds");
+    info!("Sleeping for 5 seconds");
     tokio::time::sleep(Duration::from_secs(5)).await;
 
     loop {
@@ -454,10 +445,7 @@ async fn request_validation(
         let auth = acme.get_authorization(auth_url).await?;
         match auth.status {
             Status::Pending => {
-                task_log!(
-                    worker,
-                    "Status is still 'pending', trying again in 10 seconds"
-                );
+                info!("Status is still 'pending', trying again in 10 seconds");
                 tokio::time::sleep(Duration::from_secs(10)).await;
             }
             Status::Valid => return Ok(()),
@@ -582,15 +570,12 @@ pub fn revoke_acme_cert(rpcenv: &mut dyn RpcEnvironment) -> Result<String, Error
         None,
         auth_id,
         true,
-        move |worker| async move {
-            task_log!(worker, "Loading ACME account");
+        move |_worker| async move {
+            info!("Loading ACME account");
             let mut acme = node_config.acme_client().await?;
-            task_log!(worker, "Revoking old certificate");
+            info!("Revoking old certificate");
             acme.revoke_certificate(cert_pem.as_bytes(), None).await?;
-            task_log!(
-                worker,
-                "Deleting certificate and regenerating a self-signed one"
-            );
+            info!("Deleting certificate and regenerating a self-signed one");
             delete_custom_certificate().await?;
             Ok(())
         },
diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 9f1112a9faee..5522679424db 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -2,11 +2,11 @@ use ::serde::{Deserialize, Serialize};
 use anyhow::{bail, Error};
 use serde_json::json;
 use std::os::linux::fs::MetadataExt;
+use tracing::info;
 
 use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::api;
 use proxmox_section_config::SectionConfigData;
-use proxmox_sys::task_log;
 
 use pbs_api_types::{
     DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT,
@@ -179,8 +179,8 @@ pub fn create_datastore_disk(
         Some(name.clone()),
         auth_id,
         to_stdout,
-        move |worker| {
-            task_log!(worker, "create datastore '{}' on disk {}", name, disk);
+        move |_worker| {
+            info!("create datastore '{name}' on disk {disk}");
 
             let add_datastore = add_datastore.unwrap_or(false);
             let filesystem = filesystem.unwrap_or(FileSystemType::Ext4);
@@ -213,12 +213,7 @@ pub fn create_datastore_disk(
                     bail!("datastore '{}' already exists.", datastore.name);
                 }
 
-                crate::api2::config::datastore::do_create_datastore(
-                    lock,
-                    config,
-                    datastore,
-                    Some(&worker),
-                )?;
+                crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?;
             }
 
             Ok(())
diff --git a/src/api2/node/disks/mod.rs b/src/api2/node/disks/mod.rs
index 711dae7beac7..4ef4ee2b8ac7 100644
--- a/src/api2/node/disks/mod.rs
+++ b/src/api2/node/disks/mod.rs
@@ -6,7 +6,7 @@ use proxmox_router::{
 };
 use proxmox_schema::api;
 use proxmox_sortable_macro::sortable;
-use proxmox_sys::task_log;
+use tracing::info;
 
 use pbs_api_types::{
     BLOCKDEVICE_DISK_AND_PARTITION_NAME_SCHEMA, BLOCKDEVICE_NAME_SCHEMA, NODE_SCHEMA,
@@ -164,8 +164,8 @@ pub fn initialize_disk(
         Some(disk.clone()),
         auth_id,
         to_stdout,
-        move |worker| {
-            task_log!(worker, "initialize disk {}", disk);
+        move |_worker| {
+            info!("initialize disk {disk}");
 
             let disk_manager = DiskManage::new();
             let disk_info = disk_manager.disk_by_name(&disk)?;
@@ -209,13 +209,13 @@ pub fn wipe_disk(disk: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Value,
         Some(disk.clone()),
         auth_id,
         to_stdout,
-        move |worker| {
-            task_log!(worker, "wipe disk {}", disk);
+        move |_worker| {
+            info!("wipe disk {disk}");
 
             let disk_manager = DiskManage::new();
             let disk_info = disk_manager.partition_by_name(&disk)?;
 
-            wipe_blockdev(&disk_info, worker)?;
+            wipe_blockdev(&disk_info)?;
 
             Ok(())
         },
diff --git a/src/api2/node/disks/zfs.rs b/src/api2/node/disks/zfs.rs
index 673dc1bf45c1..ab8362a7892b 100644
--- a/src/api2/node/disks/zfs.rs
+++ b/src/api2/node/disks/zfs.rs
@@ -1,9 +1,9 @@
 use anyhow::{bail, Error};
 use serde_json::{json, Value};
+use tracing::{error, info};
 
 use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::api;
-use proxmox_sys::{task_error, task_log};
 
 use pbs_api_types::{
     DataStoreConfig, ZfsCompressionType, ZfsRaidLevel, ZpoolListItem, DATASTORE_SCHEMA,
@@ -228,14 +228,8 @@ pub fn create_zpool(
         Some(name.clone()),
         auth_id,
         to_stdout,
-        move |worker| {
-            task_log!(
-                worker,
-                "create {:?} zpool '{}' on devices '{}'",
-                raidlevel,
-                name,
-                devices_text
-            );
+        move |_worker| {
+            info!("create {raidlevel:?} zpool '{name}' on devices '{devices_text}'");
 
             let mut command = std::process::Command::new("zpool");
             command.args([
@@ -275,12 +269,12 @@ pub fn create_zpool(
                 }
             }
 
-            task_log!(worker, "# {:?}", command);
+            info!("# {command:?}");
 
             match proxmox_sys::command::run_command(command, None) {
-                Ok(output) => task_log!(worker, "{output}"),
+                Ok(output) => info!("{output}"),
                 Err(err) => {
-                    task_error!(worker, "{err}");
+                    error!("{err}");
                     bail!("Error during 'zpool create', see task log for more details");
                 }
             };
@@ -299,11 +293,11 @@ pub fn create_zpool(
                 command.arg(&format!("compression={}", compression));
             }
             command.args(["relatime=on", &name]);
-            task_log!(worker, "# {:?}", command);
+            info!("# {command:?}");
             match proxmox_sys::command::run_command(command, None) {
-                Ok(output) => task_log!(worker, "{output}"),
+                Ok(output) => info!("{output}"),
                 Err(err) => {
-                    task_error!(worker, "{err}");
+                    error!("{err}");
                     bail!("Error during 'zfs set', see task log for more details");
                 }
             };
@@ -319,12 +313,7 @@ pub fn create_zpool(
                     bail!("datastore '{}' already exists.", datastore.name);
                 }
 
-                crate::api2::config::datastore::do_create_datastore(
-                    lock,
-                    config,
-                    datastore,
-                    Some(&worker),
-                )?;
+                crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?;
             }
 
             Ok(())
diff --git a/src/api2/node/mod.rs b/src/api2/node/mod.rs
index 931143fd9264..62b44709672e 100644
--- a/src/api2/node/mod.rs
+++ b/src/api2/node/mod.rs
@@ -25,6 +25,7 @@ use proxmox_sortable_macro::sortable;
 use proxmox_sys::fd::fd_change_cloexec;
 
 use pbs_api_types::{NODE_SCHEMA, PRIV_SYS_CONSOLE};
+use tracing::{info, warn};
 
 use crate::auth::{private_auth_keyring, public_auth_keyring};
 use crate::tools;
@@ -181,20 +182,18 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
             let stdout = child.stdout.take().expect("no child stdout handle");
             let stderr = child.stderr.take().expect("no child stderr handle");
 
-            let worker_stdout = worker.clone();
             let stdout_fut = async move {
                 let mut reader = BufReader::new(stdout).lines();
                 while let Some(line) = reader.next_line().await? {
-                    worker_stdout.log_message(line);
+                    info!(line);
                 }
                 Ok::<(), Error>(())
             };
 
-            let worker_stderr = worker.clone();
             let stderr_fut = async move {
                 let mut reader = BufReader::new(stderr).lines();
                 while let Some(line) = reader.next_line().await? {
-                    worker_stderr.log_warning(line);
+                    warn!(line);
                 }
                 Ok::<(), Error>(())
             };
@@ -226,9 +225,9 @@ async fn termproxy(cmd: Option<String>, rpcenv: &mut dyn RpcEnvironment) -> Resu
                 }
 
                 if let Err(err) = child.kill().await {
-                    worker.log_warning(format!("error killing termproxy: {}", err));
+                    warn!("error killing termproxy: {err}");
                 } else if let Err(err) = child.wait().await {
-                    worker.log_warning(format!("error awaiting termproxy: {}", err));
+                    warn!("error awaiting termproxy: {err}");
                 }
             }
 
diff --git a/src/api2/pull.rs b/src/api2/pull.rs
index 5d02510c7748..e733c9839e3a 100644
--- a/src/api2/pull.rs
+++ b/src/api2/pull.rs
@@ -1,10 +1,10 @@
 //! Sync datastore from remote server
 use anyhow::{bail, format_err, Error};
 use futures::{future::FutureExt, select};
+use tracing::info;
 
 use proxmox_router::{Permission, Router, RpcEnvironment};
 use proxmox_schema::api;
-use proxmox_sys::task_log;
 
 use pbs_api_types::{
     Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
@@ -128,12 +128,12 @@ pub fn do_sync_job(
             let worker_future = async move {
                 let pull_params = PullParameters::try_from(&sync_job)?;
 
-                task_log!(worker, "Starting datastore sync job '{}'", job_id);
+                info!("Starting datastore sync job '{job_id}'");
                 if let Some(event_str) = schedule {
-                    task_log!(worker, "task triggered by schedule '{}'", event_str);
+                    info!("task triggered by schedule '{event_str}'");
                 }
-                task_log!(
-                    worker,
+
+                info!(
                     "sync datastore '{}' from '{}{}'",
                     sync_job.store,
                     sync_job
@@ -143,33 +143,29 @@ pub fn do_sync_job(
                     sync_job.remote_store,
                 );
 
-                let pull_stats = pull_store(&worker, pull_params).await?;
+                let pull_stats = pull_store(pull_params).await?;
 
                 if pull_stats.bytes != 0 {
                     let amount = HumanByte::from(pull_stats.bytes);
                     let rate = HumanByte::new_binary(
                         pull_stats.bytes as f64 / pull_stats.elapsed.as_secs_f64(),
                     );
-                    task_log!(
-                        worker,
+                    info!(
                         "Summary: sync job pulled {amount} in {} chunks (average rate: {rate}/s)",
                         pull_stats.chunk_count,
                     );
                 } else {
-                    task_log!(worker, "Summary: sync job found no new data to pull");
+                    info!("Summary: sync job found no new data to pull");
                 }
 
                 if let Some(removed) = pull_stats.removed {
-                    task_log!(
-                        worker,
+                    info!(
                         "Summary: removed vanished: snapshots: {}, groups: {}, namespaces: {}",
-                        removed.snapshots,
-                        removed.groups,
-                        removed.namespaces,
+                        removed.snapshots, removed.groups, removed.namespaces,
                     );
                 }
 
-                task_log!(worker, "sync job '{}' end", &job_id);
+                info!("sync job '{}' end", &job_id);
 
                 Ok(())
             };
@@ -315,21 +311,18 @@ async fn pull(
         auth_id.to_string(),
         true,
         move |worker| async move {
-            task_log!(
-                worker,
-                "pull datastore '{}' from '{}/{}'",
-                store,
+            info!(
+                "pull datastore '{store}' from '{}/{remote_store}'",
                 remote.as_deref().unwrap_or("-"),
-                remote_store,
             );
 
-            let pull_future = pull_store(&worker, pull_params);
+            let pull_future = pull_store(pull_params);
             (select! {
                 success = pull_future.fuse() => success,
                 abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
             })?;
 
-            task_log!(worker, "pull datastore '{}' end", store);
+            info!("pull datastore '{store}' end");
 
             Ok(())
         },
diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs
index 896e809bce0d..ad8417a3cdf3 100644
--- a/src/api2/tape/backup.rs
+++ b/src/api2/tape/backup.rs
@@ -2,11 +2,12 @@ use std::sync::{Arc, Mutex};
 
 use anyhow::{bail, format_err, Error};
 use serde_json::Value;
+use tracing::{info, warn};
 
 use proxmox_lang::try_block;
 use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::api;
-use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
+use proxmox_sys::WorkerTaskContext;
 
 use pbs_api_types::{
     print_ns_and_snapshot, print_store_and_ns, Authid, MediaPoolConfig, Operation,
@@ -176,7 +177,7 @@ pub fn do_tape_backup_job(
             let job_result = try_block!({
                 if schedule.is_some() {
                     // for scheduled tape backup jobs, we wait indefinitely for the lock
-                    task_log!(worker, "waiting for drive lock...");
+                    info!("waiting for drive lock...");
                     loop {
                         worker.check_abort()?;
                         match lock_tape_device(&drive_config, &setup.drive) {
@@ -191,9 +192,9 @@ pub fn do_tape_backup_job(
                 }
                 set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
 
-                task_log!(worker, "Starting tape backup job '{}'", job_id);
+                info!("Starting tape backup job '{job_id}'");
                 if let Some(event_str) = schedule {
-                    task_log!(worker, "task triggered by schedule '{}'", event_str);
+                    info!("task triggered by schedule '{event_str}'");
                 }
 
                 backup_worker(
@@ -369,7 +370,7 @@ fn backup_worker(
 ) -> Result<(), Error> {
     let start = std::time::Instant::now();
 
-    task_log!(worker, "update media online status");
+    info!("update media online status");
     let changer_name = update_media_online_status(&setup.drive)?;
 
     let root_namespace = setup.ns.clone().unwrap_or_default();
@@ -381,7 +382,6 @@ fn backup_worker(
     let mut pool_writer = PoolWriter::new(
         pool,
         &setup.drive,
-        worker,
         notification_mode,
         force_media_set,
         ns_magic,
@@ -405,11 +405,9 @@ fn backup_worker(
         None => group_list,
     };
 
-    task_log!(
-        worker,
-        "found {} groups (out of {} total)",
-        group_list.len(),
-        group_count_full
+    info!(
+        "found {} groups (out of {group_count_full} total)",
+        group_list.len()
     );
 
     let mut progress = StoreProgress::new(group_list.len() as u64);
@@ -417,10 +415,7 @@ fn backup_worker(
     let latest_only = setup.latest_only.unwrap_or(false);
 
     if latest_only {
-        task_log!(
-            worker,
-            "latest-only: true (only considering latest snapshots)"
-        );
+        info!("latest-only: true (only considering latest snapshots)");
     }
 
     let datastore_name = datastore.name();
@@ -443,8 +438,7 @@ fn backup_worker(
             .collect();
 
         if snapshot_list.is_empty() {
-            task_log!(
-                worker,
+            info!(
                 "{}, group {} was empty",
                 print_store_and_ns(datastore_name, group.backup_ns()),
                 group.group()
@@ -464,7 +458,7 @@ fn backup_worker(
                     info.backup_dir.backup_ns(),
                     info.backup_dir.as_ref(),
                 ) {
-                    task_log!(worker, "skip snapshot {}", rel_path);
+                    info!("skip snapshot {rel_path}");
                     continue;
                 }
 
@@ -477,7 +471,7 @@ fn backup_worker(
                     SnapshotBackupResult::Ignored => {}
                 }
                 progress.done_snapshots = 1;
-                task_log!(worker, "percentage done: {}", progress);
+                info!("percentage done: {progress}");
             }
         } else {
             progress.group_snapshots = snapshot_list.len() as u64;
@@ -490,7 +484,7 @@ fn backup_worker(
                     info.backup_dir.backup_ns(),
                     info.backup_dir.as_ref(),
                 ) {
-                    task_log!(worker, "skip snapshot {}", rel_path);
+                    info!("skip snapshot {rel_path}");
                     continue;
                 }
 
@@ -503,7 +497,7 @@ fn backup_worker(
                     SnapshotBackupResult::Ignored => {}
                 }
                 progress.done_snapshots = snapshot_number as u64 + 1;
-                task_log!(worker, "percentage done: {}", progress);
+                info!("percentage done: {progress}");
             }
         }
     }
@@ -511,18 +505,15 @@ fn backup_worker(
     pool_writer.commit()?;
 
     if need_catalog {
-        task_log!(worker, "append media catalog");
+        info!("append media catalog");
 
         let uuid = pool_writer.load_writable_media(worker)?;
-        let done = pool_writer.append_catalog_archive(worker)?;
+        let done = pool_writer.append_catalog_archive()?;
         if !done {
-            task_log!(
-                worker,
-                "catalog does not fit on tape, writing to next volume"
-            );
+            info!("catalog does not fit on tape, writing to next volume");
             pool_writer.set_media_status_full(&uuid)?;
             pool_writer.load_writable_media(worker)?;
-            let done = pool_writer.append_catalog_archive(worker)?;
+            let done = pool_writer.append_catalog_archive()?;
             if !done {
                 bail!("write_catalog_archive failed on second media");
             }
@@ -530,9 +521,9 @@ fn backup_worker(
     }
 
     if setup.export_media_set.unwrap_or(false) {
-        pool_writer.export_media_set(worker)?;
+        pool_writer.export_media_set()?;
     } else if setup.eject_media.unwrap_or(false) {
-        pool_writer.eject_media(worker)?;
+        pool_writer.eject_media()?;
     }
 
     if errors {
@@ -542,7 +533,7 @@ fn backup_worker(
     summary.used_tapes = match pool_writer.get_used_media_labels() {
         Ok(tapes) => Some(tapes),
         Err(err) => {
-            task_warn!(worker, "could not collect list of used tapes: {err}");
+            warn!("could not collect list of used tapes: {err}");
             None
         }
     };
@@ -576,7 +567,7 @@ fn backup_snapshot(
     snapshot: BackupDir,
 ) -> Result<SnapshotBackupResult, Error> {
     let snapshot_path = snapshot.relative_path();
-    task_log!(worker, "backup snapshot {:?}", snapshot_path);
+    info!("backup snapshot {snapshot_path:?}");
 
     let snapshot_reader = match snapshot.locked_reader() {
         Ok(reader) => reader,
@@ -584,15 +575,10 @@ fn backup_snapshot(
             if !snapshot.full_path().exists() {
                 // we got an error and the dir does not exist,
                 // it probably just vanished, so continue
-                task_log!(worker, "snapshot {:?} vanished, skipping", snapshot_path);
+                info!("snapshot {snapshot_path:?} vanished, skipping");
                 return Ok(SnapshotBackupResult::Ignored);
             }
-            task_warn!(
-                worker,
-                "failed opening snapshot {:?}: {}",
-                snapshot_path,
-                err
-            );
+            warn!("failed opening snapshot {snapshot_path:?}: {err}");
             return Ok(SnapshotBackupResult::Error);
         }
     };
@@ -638,7 +624,7 @@ fn backup_snapshot(
 
     let snapshot_reader = snapshot_reader.lock().unwrap();
 
-    let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
+    let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?;
 
     if !done {
         // does not fit on tape, so we try on next volume
@@ -647,19 +633,14 @@ fn backup_snapshot(
         worker.check_abort()?;
 
         pool_writer.load_writable_media(worker)?;
-        let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
+        let (done, _bytes) = pool_writer.append_snapshot_archive(&snapshot_reader)?;
 
         if !done {
             bail!("write_snapshot_archive failed on second media");
         }
     }
 
-    task_log!(
-        worker,
-        "end backup {}:{:?}",
-        datastore.name(),
-        snapshot_path
-    );
+    info!("end backup {}:{snapshot_path:?}", datastore.name());
 
     Ok(SnapshotBackupResult::Success)
 }
diff --git a/src/api2/tape/drive.rs b/src/api2/tape/drive.rs
index c6fc9f9c27ea..69f4455a3926 100644
--- a/src/api2/tape/drive.rs
+++ b/src/api2/tape/drive.rs
@@ -5,6 +5,7 @@ use std::sync::Arc;
 use anyhow::{bail, format_err, Error};
 use pbs_tape::sg_tape::SgTape;
 use serde_json::Value;
+use tracing::{info, warn};
 
 use proxmox_router::{
     list_subdirs_api_method, Permission, Router, RpcEnvironment, RpcEnvironmentType, SubdirMap,
@@ -12,7 +13,6 @@ use proxmox_router::{
 use proxmox_schema::api;
 use proxmox_section_config::SectionConfigData;
 use proxmox_sortable_macro::sortable;
-use proxmox_sys::{task_log, task_warn};
 use proxmox_uuid::Uuid;
 
 use pbs_api_types::{
@@ -131,13 +131,8 @@ pub fn load_media(
         drive.clone(),
         "load-media",
         Some(job_id),
-        move |worker, config| {
-            task_log!(
-                worker,
-                "loading media '{}' into drive '{}'",
-                label_text,
-                drive
-            );
+        move |_worker, config| {
+            info!("loading media '{label_text}' into drive '{drive}'");
             let (mut changer, _) = required_media_changer(&config, &drive)?;
             changer.load_media(&label_text)?;
             Ok(())
@@ -250,8 +245,8 @@ pub fn unload(
         drive.clone(),
         "unload-media",
         Some(drive.clone()),
-        move |worker, config| {
-            task_log!(worker, "unloading media from drive '{}'", drive);
+        move |_worker, config| {
+            info!("unloading media from drive '{drive}'");
 
             let (mut changer, _) = required_media_changer(&config, &drive)?;
             changer.unload_media(target_slot)?;
@@ -299,9 +294,9 @@ pub fn format_media(
         drive.clone(),
         "format-media",
         Some(drive.clone()),
-        move |worker, config| {
+        move |_worker, config| {
             if let Some(ref label) = label_text {
-                task_log!(worker, "try to load media '{}'", label);
+                info!("try to load media '{label}'");
                 if let Some((mut changer, _)) = media_changer(&config, &drive)? {
                     changer.load_media(label)?;
                 }
@@ -315,11 +310,8 @@ pub fn format_media(
                 let mut handle = LtoTapeHandle::new(file)?;
                 if let Ok(status) = handle.get_drive_and_media_status() {
                     if status.density >= TapeDensity::LTO9 {
-                        task_log!(worker, "Slow formatting LTO9+ media.");
-                        task_log!(
-                            worker,
-                            "This can take a very long time due to media optimization."
-                        );
+                        info!("Slow formatting LTO9+ media.");
+                        info!("This can take a very long time due to media optimization.");
                     }
                 }
             }
@@ -330,15 +322,15 @@ pub fn format_media(
                         bail!("expected label '{}', found unrelated data", label);
                     }
                     /* assume drive contains no or unrelated data */
-                    task_log!(worker, "unable to read media label: {}", err);
-                    task_log!(worker, "format anyways");
+                    info!("unable to read media label: {err}");
+                    info!("format anyways");
                     handle.format_media(fast.unwrap_or(true))?;
                 }
                 Ok((None, _)) => {
                     if let Some(label) = label_text {
                         bail!("expected label '{}', found empty tape", label);
                     }
-                    task_log!(worker, "found empty media - format anyways");
+                    info!("found empty media - format anyways");
                     handle.format_media(fast.unwrap_or(true))?;
                 }
                 Ok((Some(media_id), _key_config)) => {
@@ -352,11 +344,9 @@ pub fn format_media(
                         }
                     }
 
-                    task_log!(
-                        worker,
+                    info!(
                         "found media '{}' with uuid '{}'",
-                        media_id.label.label_text,
-                        media_id.label.uuid,
+                        media_id.label.label_text, media_id.label.uuid,
                     );
 
                     let mut inventory = Inventory::new(TAPE_STATUS_DIR);
@@ -504,7 +494,7 @@ pub fn label_media(
         drive.clone(),
         "label-media",
         Some(drive.clone()),
-        move |worker, config| {
+        move |_worker, config| {
             let mut drive = open_drive(&config, &drive)?;
 
             drive.rewind()?;
@@ -526,7 +516,7 @@ pub fn label_media(
                 pool: pool.clone(),
             };
 
-            write_media_label(worker, &mut drive, label, pool)
+            write_media_label(&mut drive, label, pool)
         },
     )?;
 
@@ -534,7 +524,6 @@ pub fn label_media(
 }
 
 fn write_media_label(
-    worker: Arc<WorkerTask>,
     drive: &mut Box<dyn TapeDriver>,
     label: MediaLabel,
     pool: Option<String>,
@@ -549,18 +538,9 @@ fn write_media_label(
     }
     drive.label_tape(&label)?;
     if let Some(ref pool) = pool {
-        task_log!(
-            worker,
-            "Label media '{}' for pool '{}'",
-            label.label_text,
-            pool
-        );
+        info!("Label media '{}' for pool '{pool}'", label.label_text);
     } else {
-        task_log!(
-            worker,
-            "Label media '{}' (no pool assignment)",
-            label.label_text
-        );
+        info!("Label media '{}' (no pool assignment)", label.label_text);
     }
 
     let media_id = MediaId {
@@ -749,10 +729,10 @@ pub fn clean_drive(drive: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Val
         drive.clone(),
         "clean-drive",
         Some(drive.clone()),
-        move |worker, config| {
+        move |_worker, config| {
             let (mut changer, _changer_name) = required_media_changer(&config, &drive)?;
 
-            task_log!(worker, "Starting drive clean");
+            info!("Starting drive clean");
 
             changer.clean_drive()?;
 
@@ -763,7 +743,7 @@ pub fn clean_drive(drive: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Val
                 // test for critical tape alert flags
                 if let Ok(alert_flags) = handle.tape_alert_flags() {
                     if !alert_flags.is_empty() {
-                        task_log!(worker, "TapeAlertFlags: {:?}", alert_flags);
+                        info!("TapeAlertFlags: {alert_flags:?}");
                         if tape_alert_flags_critical(alert_flags) {
                             bail!("found critical tape alert flags: {:?}", alert_flags);
                         }
@@ -772,13 +752,13 @@ pub fn clean_drive(drive: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Val
 
                 // test wearout (max. 50 mounts)
                 if let Ok(volume_stats) = handle.volume_statistics() {
-                    task_log!(worker, "Volume mounts: {}", volume_stats.volume_mounts);
+                    info!("Volume mounts: {}", volume_stats.volume_mounts);
                     let wearout = volume_stats.volume_mounts * 2; // (*100.0/50.0);
-                    task_log!(worker, "Cleaning tape wearout: {}%", wearout);
+                    info!("Cleaning tape wearout: {wearout}%");
                 }
             }
 
-            task_log!(worker, "Drive cleaned successfully");
+            info!("Drive cleaned successfully");
 
             Ok(())
         },
@@ -910,12 +890,12 @@ pub fn update_inventory(
         drive.clone(),
         "inventory-update",
         Some(drive.clone()),
-        move |worker, config| {
+        move |_worker, config| {
             let (mut changer, changer_name) = required_media_changer(&config, &drive)?;
 
             let label_text_list = changer.online_media_label_texts()?;
             if label_text_list.is_empty() {
-                task_log!(worker, "changer device does not list any media labels");
+                info!("changer device does not list any media labels");
             }
 
             let mut inventory = Inventory::load(TAPE_STATUS_DIR)?;
@@ -924,7 +904,7 @@ pub fn update_inventory(
 
             for label_text in label_text_list.iter() {
                 if label_text.starts_with("CLN") {
-                    task_log!(worker, "skip cleaning unit '{}'", label_text);
+                    info!("skip cleaning unit '{label_text}'");
                     continue;
                 }
 
@@ -936,12 +916,12 @@ pub fn update_inventory(
                             if !catalog
                                 || MediaCatalog::exists(TAPE_STATUS_DIR, &media_id.label.uuid)
                             {
-                                task_log!(worker, "media '{}' already inventoried", label_text);
+                                info!("media '{label_text}' already inventoried");
                                 continue;
                             }
                         }
                         Err(err) => {
-                            task_warn!(worker, "error getting media by unique label: {err}");
+                            warn!("error getting media by unique label: {err}");
                             // we can't be sure which uuid it is
                             continue;
                         }
@@ -950,37 +930,28 @@ pub fn update_inventory(
                 }
 
                 if let Err(err) = changer.load_media(&label_text) {
-                    task_warn!(worker, "unable to load media '{}' - {}", label_text, err);
+                    warn!("unable to load media '{label_text}' - {err}");
                     continue;
                 }
 
                 let mut drive = open_drive(&config, &drive)?;
                 match drive.read_label() {
                     Err(err) => {
-                        task_warn!(
-                            worker,
-                            "unable to read label form media '{}' - {}",
-                            label_text,
-                            err
-                        );
+                        warn!("unable to read label form media '{label_text}' - {err}");
                     }
                     Ok((None, _)) => {
-                        task_log!(worker, "media '{}' is empty", label_text);
+                        info!("media '{label_text}' is empty");
                     }
                     Ok((Some(media_id), _key_config)) => {
                         if label_text != media_id.label.label_text {
-                            task_warn!(
-                                worker,
-                                "label text mismatch ({} != {})",
-                                label_text,
+                            warn!(
+                                "label text mismatch ({label_text} != {})",
                                 media_id.label.label_text
                             );
                             continue;
                         }
-                        task_log!(
-                            worker,
-                            "inventorize media '{}' with uuid '{}'",
-                            label_text,
+                        info!(
+                            "inventorize media '{label_text}' with uuid '{}'",
                             media_id.label.uuid
                         );
 
@@ -1002,15 +973,11 @@ pub fn update_inventory(
                             if catalog {
                                 let media_set = inventory.compute_media_set_members(&set.uuid)?;
                                 if let Err(err) = fast_catalog_restore(
-                                    &worker,
                                     &mut drive,
                                     &media_set,
                                     &media_id.label.uuid,
                                 ) {
-                                    task_warn!(
-                                        worker,
-                                        "could not restore catalog for {label_text}: {err}"
-                                    );
+                                    warn!("could not restore catalog for {label_text}: {err}");
                                 }
                             }
                         } else {
@@ -1066,14 +1033,13 @@ pub fn barcode_label_media(
         drive.clone(),
         "barcode-label-media",
         Some(drive.clone()),
-        move |worker, config| barcode_label_media_worker(worker, drive, &config, pool),
+        move |_worker, config| barcode_label_media_worker(drive, &config, pool),
     )?;
 
     Ok(upid_str.into())
 }
 
 fn barcode_label_media_worker(
-    worker: Arc<WorkerTask>,
     drive: String,
     drive_config: &SectionConfigData,
     pool: Option<String>,
@@ -1106,24 +1072,20 @@ fn barcode_label_media_worker(
         inventory.reload()?;
         match inventory.find_media_by_label_text(&label_text) {
             Ok(Some(_)) => {
-                task_log!(
-                    worker,
-                    "media '{}' already inventoried (already labeled)",
-                    label_text
-                );
+                info!("media '{label_text}' already inventoried (already labeled)");
                 continue;
             }
             Err(err) => {
-                task_warn!(worker, "error getting media by unique label: {err}",);
+                warn!("error getting media by unique label: {err}",);
                 continue;
             }
             Ok(None) => {} // ok to label
         }
 
-        task_log!(worker, "checking/loading media '{}'", label_text);
+        info!("checking/loading media '{label_text}'");
 
         if let Err(err) = changer.load_media(&label_text) {
-            task_warn!(worker, "unable to load media '{}' - {}", label_text, err);
+            warn!("unable to load media '{label_text}' - {err}");
             continue;
         }
 
@@ -1132,21 +1094,13 @@ fn barcode_label_media_worker(
 
         match drive.read_next_file() {
             Ok(_reader) => {
-                task_log!(
-                    worker,
-                    "media '{}' is not empty (format it first)",
-                    label_text
-                );
+                info!("media '{label_text}' is not empty (format it first)");
                 continue;
             }
             Err(BlockReadError::EndOfFile) => { /* EOF mark at BOT, assume tape is empty */ }
             Err(BlockReadError::EndOfStream) => { /* tape is empty */ }
             Err(_err) => {
-                task_warn!(
-                    worker,
-                    "media '{}' read error (maybe not empty - format it first)",
-                    label_text
-                );
+                warn!("media '{label_text}' read error (maybe not empty - format it first)");
                 continue;
             }
         }
@@ -1159,7 +1113,7 @@ fn barcode_label_media_worker(
             pool: pool.clone(),
         };
 
-        write_media_label(worker.clone(), &mut drive, label, pool.clone())?
+        write_media_label(&mut drive, label, pool.clone())?
     }
 
     Ok(())
@@ -1318,14 +1272,12 @@ pub fn catalog_media(
 
             let media_id = match drive.read_label()? {
                 (Some(media_id), key_config) => {
-                    task_log!(
-                        worker,
+                    info!(
                         "found media label: {}",
                         serde_json::to_string_pretty(&serde_json::to_value(&media_id)?)?
                     );
                     if key_config.is_some() {
-                        task_log!(
-                            worker,
+                        info!(
                             "encryption key config: {}",
                             serde_json::to_string_pretty(&serde_json::to_value(&key_config)?)?
                         );
@@ -1339,7 +1291,7 @@ pub fn catalog_media(
 
             let (_media_set_lock, media_set_uuid) = match media_id.media_set_label {
                 None => {
-                    task_log!(worker, "media is empty");
+                    info!("media is empty");
                     let _pool_lock = if let Some(pool) = media_id.pool() {
                         lock_media_pool(TAPE_STATUS_DIR, &pool)?
                     } else {
@@ -1352,7 +1304,7 @@ pub fn catalog_media(
                 Some(ref set) => {
                     if set.unassigned() {
                         // media is empty
-                        task_log!(worker, "media is empty");
+                        info!("media is empty");
                         let _lock = lock_unassigned_media_pool(TAPE_STATUS_DIR)?;
                         MediaCatalog::destroy(TAPE_STATUS_DIR, &media_id.label.uuid)?;
                         inventory.store(media_id.clone(), false)?;
@@ -1377,14 +1329,14 @@ pub fn catalog_media(
             if !scan {
                 let media_set = inventory.compute_media_set_members(media_set_uuid)?;
 
-                if fast_catalog_restore(&worker, &mut drive, &media_set, &media_id.label.uuid)? {
+                if fast_catalog_restore(&mut drive, &media_set, &media_id.label.uuid)? {
                     return Ok(());
                 }
 
-                task_log!(worker, "no catalog found");
+                info!("no catalog found");
             }
 
-            task_log!(worker, "scanning entire media to reconstruct catalog");
+            info!("scanning entire media to reconstruct catalog");
 
             drive.rewind()?;
             drive.read_label()?; // skip over labels - we already read them above
diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs
index 7b96acb6c5b0..21cedd7d55ba 100644
--- a/src/api2/tape/restore.rs
+++ b/src/api2/tape/restore.rs
@@ -6,6 +6,7 @@ use std::sync::Arc;
 
 use anyhow::{bail, format_err, Error};
 use serde_json::Value;
+use tracing::{info, warn};
 
 use proxmox_human_byte::HumanByte;
 use proxmox_io::ReadExt;
@@ -13,7 +14,7 @@ use proxmox_router::{Permission, Router, RpcEnvironment, RpcEnvironmentType};
 use proxmox_schema::{api, ApiType};
 use proxmox_section_config::SectionConfigData;
 use proxmox_sys::fs::{replace_file, CreateOptions};
-use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
+use proxmox_sys::WorkerTaskContext;
 use proxmox_uuid::Uuid;
 
 use pbs_api_types::{
@@ -403,12 +404,12 @@ pub fn restore(
 
             let restore_owner = owner.as_ref().unwrap_or(&auth_id);
 
-            task_log!(worker, "Mediaset '{media_set}'");
-            task_log!(worker, "Pool: {pool}");
+            info!("Mediaset '{media_set}'");
+            info!("Pool: {pool}");
 
             let res = if snapshots.is_some() || namespaces {
                 restore_list_worker(
-                    worker.clone(),
+                    worker,
                     snapshots.unwrap_or_default(),
                     inventory,
                     media_set_uuid,
@@ -422,7 +423,7 @@ pub fn restore(
                 )
             } else {
                 restore_full_worker(
-                    worker.clone(),
+                    worker,
                     inventory,
                     media_set_uuid,
                     drive_config,
@@ -434,10 +435,10 @@ pub fn restore(
                 )
             };
             if res.is_ok() {
-                task_log!(worker, "Restore mediaset '{media_set}' done");
+                info!("Restore mediaset '{media_set}' done");
             }
             if let Err(err) = set_tape_device_state(&drive, "") {
-                task_log!(worker, "could not unset drive state for {drive}: {err}");
+                info!("could not unset drive state for {drive}: {err}");
             }
 
             res
@@ -488,7 +489,7 @@ fn restore_full_worker(
     }
 
     if let Some(fingerprint) = encryption_key_fingerprint {
-        task_log!(worker, "Encryption key fingerprint: {fingerprint}");
+        info!("Encryption key fingerprint: {fingerprint}");
     }
 
     let used_datastores = store_map.used_datastores();
@@ -497,13 +498,9 @@ fn restore_full_worker(
         .map(|(t, _)| String::from(t.name()))
         .collect::<Vec<String>>()
         .join(", ");
-    task_log!(worker, "Datastore(s): {datastore_list}",);
-    task_log!(worker, "Drive: {drive_name}");
-    log_required_tapes(
-        &worker,
-        &inventory,
-        media_id_list.iter().map(|id| &id.label.uuid),
-    );
+    info!("Datastore(s): {datastore_list}",);
+    info!("Drive: {drive_name}");
+    log_required_tapes(&inventory, media_id_list.iter().map(|id| &id.label.uuid));
 
     let mut datastore_locks = Vec::new();
     for (target, _) in used_datastores.values() {
@@ -533,7 +530,6 @@ fn restore_full_worker(
 
 #[allow(clippy::too_many_arguments)]
 fn check_snapshot_restorable(
-    worker: &WorkerTask,
     store_map: &DataStoreMap,
     store: &str,
     snapshot: &str,
@@ -574,7 +570,7 @@ fn check_snapshot_restorable(
             auth_id,
             Some(restore_owner),
         ) {
-            task_warn!(worker, "cannot restore {store}:{snapshot} to {ns}: '{err}'");
+            warn!("cannot restore {store}:{snapshot} to {ns}: '{err}'");
             continue;
         }
 
@@ -582,8 +578,7 @@ fn check_snapshot_restorable(
         if let Ok(owner) = datastore.get_owner(&ns, dir.as_ref()) {
             if restore_owner != &owner {
                 // only the owner is allowed to create additional snapshots
-                task_warn!(
-                    worker,
+                warn!(
                     "restore  of '{snapshot}' to {ns} failed, owner check failed ({restore_owner} \
                     != {owner})",
                 );
@@ -594,10 +589,7 @@ fn check_snapshot_restorable(
         have_some_permissions = true;
 
         if datastore.snapshot_path(&ns, dir).exists() {
-            task_warn!(
-                worker,
-                "found snapshot {snapshot} on target datastore/namespace, skipping...",
-            );
+            warn!("found snapshot {snapshot} on target datastore/namespace, skipping...",);
             continue;
         }
         can_restore_some = true;
@@ -610,11 +602,7 @@ fn check_snapshot_restorable(
     Ok(can_restore_some)
 }
 
-fn log_required_tapes<'a>(
-    worker: &WorkerTask,
-    inventory: &Inventory,
-    list: impl Iterator<Item = &'a Uuid>,
-) {
+fn log_required_tapes<'a>(inventory: &Inventory, list: impl Iterator<Item = &'a Uuid>) {
     let mut tape_list = list
         .map(|uuid| {
             inventory
@@ -626,7 +614,7 @@ fn log_required_tapes<'a>(
         })
         .collect::<Vec<&str>>();
     tape_list.sort_unstable();
-    task_log!(worker, "Required media list: {}", tape_list.join(";"));
+    info!("Required media list: {}", tape_list.join(";"));
 }
 
 #[allow(clippy::too_many_arguments)]
@@ -658,14 +646,13 @@ fn restore_list_worker(
                 let (ns, dir) = match parse_ns_and_snapshot(snapshot) {
                     Ok((ns, dir)) if store_map.has_full_mapping(store, &ns) => (ns, dir),
                     Err(err) => {
-                        task_warn!(worker, "couldn't parse snapshot {snapshot} - {err}");
+                        warn!("couldn't parse snapshot {snapshot} - {err}");
                         continue;
                     }
                     _ => continue,
                 };
                 let snapshot = print_ns_and_snapshot(&ns, &dir);
                 match check_snapshot_restorable(
-                    &worker,
                     &store_map,
                     store,
                     &snapshot,
@@ -679,7 +666,7 @@ fn restore_list_worker(
                     Ok(true) => restorable.push((store.to_string(), snapshot.to_string(), ns, dir)),
                     Ok(false) => {}
                     Err(err) => {
-                        task_warn!(worker, "{err}");
+                        warn!("{err}");
                         skipped.push(format!("{store}:{snapshot}"));
                     }
                 }
@@ -697,7 +684,6 @@ fn restore_list_worker(
                     match parse_ns_and_snapshot(snapshot) {
                         Ok((ns, dir)) => {
                             match check_snapshot_restorable(
-                                &worker,
                                 &store_map,
                                 store,
                                 snapshot,
@@ -713,14 +699,14 @@ fn restore_list_worker(
                                 }
                                 Ok(false) => None,
                                 Err(err) => {
-                                    task_warn!(worker, "{err}");
+                                    warn!("{err}");
                                     skipped.push(format!("{store}:{snapshot}"));
                                     None
                                 }
                             }
                         }
                         Err(err) => {
-                            task_warn!(worker, "could not restore {store_snapshot}: {err}");
+                            warn!("could not restore {store_snapshot}: {err}");
                             skipped.push(store_snapshot);
                             None
                         }
@@ -738,10 +724,7 @@ fn restore_list_worker(
                     let media_id = inventory.lookup_media(media_uuid).unwrap();
                     (media_id, file_num)
                 } else {
-                    task_warn!(
-                        worker,
-                        "did not find snapshot '{store}:{snapshot}' in media set",
-                    );
+                    warn!("did not find snapshot '{store}:{snapshot}' in media set",);
                     skipped.push(format!("{store}:{snapshot}"));
                     continue;
                 };
@@ -754,26 +737,25 @@ fn restore_list_worker(
                 .or_default();
             file_list.push(file_num);
 
-            task_log!(
-                worker,
+            info!(
                 "found snapshot {snapshot} on {}: file {file_num}",
                 media_id.label.label_text,
             );
         }
 
         if snapshot_file_hash.is_empty() {
-            task_log!(worker, "nothing to restore, skipping remaining phases...");
+            info!("nothing to restore, skipping remaining phases...");
             if !skipped.is_empty() {
-                task_log!(worker, "skipped the following snapshots:");
+                info!("skipped the following snapshots:");
                 for snap in skipped {
-                    task_log!(worker, "  {snap}");
+                    info!("  {snap}");
                 }
             }
             return Ok(());
         }
 
-        task_log!(worker, "Phase 1: temporarily restore snapshots to temp dir");
-        log_required_tapes(&worker, &inventory, snapshot_file_hash.keys());
+        info!("Phase 1: temporarily restore snapshots to temp dir");
+        log_required_tapes(&inventory, snapshot_file_hash.keys());
         let mut datastore_chunk_map: HashMap<String, HashSet<[u8; 32]>> = HashMap::new();
         let mut tmp_paths = Vec::new();
         for (media_uuid, file_list) in snapshot_file_hash.iter_mut() {
@@ -824,10 +806,10 @@ fn restore_list_worker(
         drop(catalog);
 
         if !media_file_chunk_map.is_empty() {
-            task_log!(worker, "Phase 2: restore chunks to datastores");
-            log_required_tapes(&worker, &inventory, media_file_chunk_map.keys());
+            info!("Phase 2: restore chunks to datastores");
+            log_required_tapes(&inventory, media_file_chunk_map.keys());
         } else {
-            task_log!(worker, "All chunks are already present, skip phase 2...");
+            info!("All chunks are already present, skip phase 2...");
         }
 
         for (media_uuid, file_chunk_map) in media_file_chunk_map.iter_mut() {
@@ -842,10 +824,7 @@ fn restore_list_worker(
             restore_file_chunk_map(worker.clone(), &mut drive, &store_map, file_chunk_map)?;
         }
 
-        task_log!(
-            worker,
-            "Phase 3: copy snapshots from temp dir to datastores"
-        );
+        info!("Phase 3: copy snapshots from temp dir to datastores");
         let mut errors = false;
         for (source_datastore, snapshot, source_ns, backup_dir) in snapshots.into_iter() {
             if let Err(err) = proxmox_lang::try_block!({
@@ -902,20 +881,14 @@ fn restore_list_worker(
 
                         Ok(())
                     }) {
-                        task_warn!(
-                            worker,
-                            "could not restore {source_datastore}:{snapshot}: '{err}'"
-                        );
+                        warn!("could not restore {source_datastore}:{snapshot}: '{err}'");
                         skipped.push(format!("{source_datastore}:{snapshot}"));
                     }
                 }
-                task_log!(worker, "Restore snapshot '{}' done", snapshot);
+                info!("Restore snapshot '{snapshot}' done");
                 Ok::<_, Error>(())
             }) {
-                task_warn!(
-                    worker,
-                    "could not copy {source_datastore}:{snapshot}: {err}"
-                );
+                warn!("could not copy {source_datastore}:{snapshot}: {err}");
                 errors = true;
             }
         }
@@ -925,7 +898,7 @@ fn restore_list_worker(
                 std::fs::remove_dir_all(&tmp_path)
                     .map_err(|err| format_err!("remove_dir_all failed - {err}"))
             }) {
-                task_warn!(worker, "could not clean up temp dir {tmp_path:?}: {err}");
+                warn!("could not clean up temp dir {tmp_path:?}: {err}");
                 errors = true;
             };
         }
@@ -934,19 +907,16 @@ fn restore_list_worker(
             bail!("errors during copy occurred");
         }
         if !skipped.is_empty() {
-            task_log!(worker, "(partially) skipped the following snapshots:");
+            info!("(partially) skipped the following snapshots:");
             for snap in skipped {
-                task_log!(worker, "  {snap}");
+                info!("  {snap}");
             }
         }
         Ok(())
     });
 
     if res.is_err() {
-        task_warn!(
-            worker,
-            "Error during restore, partially restored snapshots will NOT be cleaned up"
-        );
+        warn!("Error during restore, partially restored snapshots will NOT be cleaned up");
     }
 
     for (datastore, _) in store_map.used_datastores().values() {
@@ -954,7 +924,7 @@ fn restore_list_worker(
         match std::fs::remove_dir_all(tmp_path) {
             Ok(()) => {}
             Err(err) if err.kind() == std::io::ErrorKind::NotFound => {}
-            Err(err) => task_warn!(worker, "error cleaning up: {}", err),
+            Err(err) => warn!("error cleaning up: {err}"),
         }
     }
 
@@ -1037,13 +1007,10 @@ fn restore_snapshots_to_tmpdir(
     for file_num in file_list {
         let current_file_number = drive.current_file_number()?;
         if current_file_number != *file_num {
-            task_log!(
-                worker,
-                "was at file {current_file_number}, moving to {file_num}"
-            );
+            info!("was at file {current_file_number}, moving to {file_num}");
             drive.move_to_file(*file_num)?;
             let current_file_number = drive.current_file_number()?;
-            task_log!(worker, "now at file {}", current_file_number);
+            info!("now at file {current_file_number}");
         }
         let mut reader = drive.read_next_file()?;
 
@@ -1065,10 +1032,7 @@ fn restore_snapshots_to_tmpdir(
                 let source_datastore = archive_header.store;
                 let snapshot = archive_header.snapshot;
 
-                task_log!(
-                    worker,
-                    "File {file_num}: snapshot archive {source_datastore}:{snapshot}",
-                );
+                info!("File {file_num}: snapshot archive {source_datastore}:{snapshot}",);
 
                 let mut decoder =
                     pxar::decoder::sync::Decoder::from_std(pxar::PxarVariant::Unified(reader))?;
@@ -1076,10 +1040,7 @@ fn restore_snapshots_to_tmpdir(
                 let target_datastore = match store_map.target_store(&source_datastore) {
                     Some(datastore) => datastore,
                     None => {
-                        task_warn!(
-                            worker,
-                            "could not find target datastore for {source_datastore}:{snapshot}",
-                        );
+                        warn!("could not find target datastore for {source_datastore}:{snapshot}",);
                         continue;
                     }
                 };
@@ -1131,10 +1092,10 @@ fn restore_file_chunk_map(
     for (nr, chunk_map) in file_chunk_map.iter_mut() {
         let current_file_number = drive.current_file_number()?;
         if current_file_number != *nr {
-            task_log!(worker, "was at file {current_file_number}, moving to {nr}");
+            info!("was at file {current_file_number}, moving to {nr}");
             drive.move_to_file(*nr)?;
             let current_file_number = drive.current_file_number()?;
-            task_log!(worker, "now at file {}", current_file_number);
+            info!("now at file {current_file_number}");
         }
         let mut reader = drive.read_next_file()?;
         let header: MediaContentHeader = unsafe { reader.read_le_value()? };
@@ -1151,10 +1112,7 @@ fn restore_file_chunk_map(
 
                 let source_datastore = archive_header.store;
 
-                task_log!(
-                    worker,
-                    "File {nr}: chunk archive for datastore '{source_datastore}'",
-                );
+                info!("File {nr}: chunk archive for datastore '{source_datastore}'",);
 
                 let datastore = store_map.target_store(&source_datastore).ok_or_else(|| {
                     format_err!("unexpected chunk archive for store: {source_datastore}")
@@ -1166,7 +1124,7 @@ fn restore_file_chunk_map(
                     datastore.clone(),
                     chunk_map,
                 )?;
-                task_log!(worker, "restored {count} chunks");
+                info!("restored {count} chunks");
             }
             _ => bail!("unexpected content magic {:?}", header.content_magic),
         }
@@ -1226,8 +1184,7 @@ fn restore_partial_chunk_archive<'a>(
 
     let elapsed = start_time.elapsed()?.as_secs_f64();
     let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64;
-    task_log!(
-        worker,
+    info!(
         "restored {} ({:.2}/s)",
         HumanByte::new_decimal(bytes),
         HumanByte::new_decimal(bytes / elapsed),
@@ -1311,15 +1268,11 @@ pub fn restore_media(
         let current_file_number = drive.current_file_number()?;
         let reader = match drive.read_next_file() {
             Err(BlockReadError::EndOfFile) => {
-                task_log!(
-                    worker,
-                    "skip unexpected filemark at pos {}",
-                    current_file_number
-                );
+                info!("skip unexpected filemark at pos {current_file_number}");
                 continue;
             }
             Err(BlockReadError::EndOfStream) => {
-                task_log!(worker, "detected EOT after {} files", current_file_number);
+                info!("detected EOT after {current_file_number} files");
                 break;
             }
             Err(BlockReadError::Error(err)) => {
@@ -1383,13 +1336,7 @@ fn restore_archive<'a>(
             let datastore_name = archive_header.store;
             let snapshot = archive_header.snapshot;
 
-            task_log!(
-                worker,
-                "File {}: snapshot archive {}:{}",
-                current_file_number,
-                datastore_name,
-                snapshot
-            );
+            info!("File {current_file_number}: snapshot archive {datastore_name}:{snapshot}");
 
             let (backup_ns, backup_dir) = parse_ns_and_snapshot(&snapshot)?;
 
@@ -1423,16 +1370,16 @@ fn restore_archive<'a>(
                     path.push(rel_path);
 
                     if is_new {
-                        task_log!(worker, "restore snapshot {}", backup_dir);
+                        info!("restore snapshot {backup_dir}");
 
-                        match restore_snapshot_archive(worker.clone(), reader, &path) {
+                        match restore_snapshot_archive(worker, reader, &path) {
                             Err(err) => {
                                 std::fs::remove_dir_all(&path)?;
                                 bail!("restore snapshot {} failed - {}", backup_dir, err);
                             }
                             Ok(false) => {
                                 std::fs::remove_dir_all(&path)?;
-                                task_log!(worker, "skip incomplete snapshot {}", backup_dir);
+                                info!("skip incomplete snapshot {backup_dir}");
                             }
                             Ok(true) => {
                                 catalog.register_snapshot(
@@ -1448,7 +1395,7 @@ fn restore_archive<'a>(
                         return Ok(());
                     }
                 } else {
-                    task_log!(worker, "skipping...");
+                    info!("skipping...");
                 }
             }
 
@@ -1475,12 +1422,7 @@ fn restore_archive<'a>(
 
             let source_datastore = archive_header.store;
 
-            task_log!(
-                worker,
-                "File {}: chunk archive for datastore '{}'",
-                current_file_number,
-                source_datastore
-            );
+            info!("File {current_file_number}: chunk archive for datastore '{source_datastore}'");
             let datastore = target
                 .as_ref()
                 .and_then(|t| t.0.target_store(&source_datastore));
@@ -1497,15 +1439,9 @@ fn restore_archive<'a>(
                     .or_default();
 
                 let chunks = if let Some(datastore) = datastore {
-                    restore_chunk_archive(
-                        worker.clone(),
-                        reader,
-                        datastore,
-                        checked_chunks,
-                        verbose,
-                    )?
+                    restore_chunk_archive(worker, reader, datastore, checked_chunks, verbose)?
                 } else {
-                    scan_chunk_archive(worker.clone(), reader, verbose)?
+                    scan_chunk_archive(worker, reader, verbose)?
                 };
 
                 if let Some(chunks) = chunks {
@@ -1515,12 +1451,12 @@ fn restore_archive<'a>(
                         &source_datastore,
                         &chunks[..],
                     )?;
-                    task_log!(worker, "register {} chunks", chunks.len());
+                    info!("register {} chunks", chunks.len());
                     catalog.commit_if_large()?;
                 }
                 return Ok(());
             } else if target.is_some() {
-                task_log!(worker, "skipping...");
+                info!("skipping...");
             }
 
             reader.skip_data()?; // read all data
@@ -1531,10 +1467,8 @@ fn restore_archive<'a>(
             let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
                 .map_err(|err| format_err!("unable to parse catalog archive header - {}", err))?;
 
-            task_log!(
-                worker,
-                "File {}: skip catalog '{}'",
-                current_file_number,
+            info!(
+                "File {current_file_number}: skip catalog '{}'",
                 archive_header.uuid
             );
 
@@ -1570,7 +1504,7 @@ fn scan_chunk_archive<'a>(
 
                 // check if this is an aborted stream without end marker
                 if let Ok(false) = reader.has_end_marker() {
-                    task_log!(worker, "missing stream end marker");
+                    info!("missing stream end marker");
                     return Ok(None);
                 }
 
@@ -1582,7 +1516,7 @@ fn scan_chunk_archive<'a>(
         worker.check_abort()?;
 
         if verbose {
-            task_log!(worker, "Found chunk: {}", hex::encode(digest));
+            info!("Found chunk: {}", hex::encode(digest));
         }
 
         chunks.push(digest);
@@ -1606,8 +1540,6 @@ fn restore_chunk_archive<'a>(
     let bytes = Arc::new(std::sync::atomic::AtomicU64::new(0));
     let bytes2 = bytes.clone();
 
-    let worker2 = worker.clone();
-
     let writer_pool = ParallelHandler::new(
         "tape restore chunk writer",
         4,
@@ -1615,7 +1547,7 @@ fn restore_chunk_archive<'a>(
             let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
             if !chunk_exists {
                 if verbose {
-                    task_log!(worker2, "Insert chunk: {}", hex::encode(digest));
+                    info!("Insert chunk: {}", hex::encode(digest));
                 }
                 bytes2.fetch_add(chunk.raw_size(), std::sync::atomic::Ordering::SeqCst);
                 // println!("verify and write {}", hex::encode(&digest));
@@ -1626,7 +1558,7 @@ fn restore_chunk_archive<'a>(
 
                 datastore.insert_chunk(&chunk, &digest)?;
             } else if verbose {
-                task_log!(worker2, "Found existing chunk: {}", hex::encode(digest));
+                info!("Found existing chunk: {}", hex::encode(digest));
             }
             Ok(())
         },
@@ -1648,7 +1580,7 @@ fn restore_chunk_archive<'a>(
 
                 // check if this is an aborted stream without end marker
                 if let Ok(false) = reader.has_end_marker() {
-                    task_log!(worker, "missing stream end marker");
+                    info!("missing stream end marker");
                     return Ok(None);
                 }
 
@@ -1672,8 +1604,7 @@ fn restore_chunk_archive<'a>(
 
     let elapsed = start_time.elapsed()?.as_secs_f64();
     let bytes = bytes.load(std::sync::atomic::Ordering::SeqCst) as f64;
-    task_log!(
-        worker,
+    info!(
         "restored {} ({:.2}/s)",
         HumanByte::new_decimal(bytes),
         HumanByte::new_decimal(bytes / elapsed),
@@ -1818,7 +1749,6 @@ fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
 
 /// Try to restore media catalogs (form catalog_archives)
 pub fn fast_catalog_restore(
-    worker: &WorkerTask,
     drive: &mut Box<dyn TapeDriver>,
     media_set: &MediaSet,
     uuid: &Uuid, // current media Uuid
@@ -1839,14 +1769,11 @@ pub fn fast_catalog_restore(
             // limit reader scope
             let mut reader = match drive.read_next_file() {
                 Err(BlockReadError::EndOfFile) => {
-                    task_log!(
-                        worker,
-                        "skip unexpected filemark at pos {current_file_number}"
-                    );
+                    info!("skip unexpected filemark at pos {current_file_number}");
                     continue;
                 }
                 Err(BlockReadError::EndOfStream) => {
-                    task_log!(worker, "detected EOT after {current_file_number} files");
+                    info!("detected EOT after {current_file_number} files");
                     break;
                 }
                 Err(BlockReadError::Error(err)) => {
@@ -1863,7 +1790,7 @@ pub fn fast_catalog_restore(
             if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
                 || header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1
             {
-                task_log!(worker, "found catalog at pos {}", current_file_number);
+                info!("found catalog at pos {current_file_number}");
 
                 let header_data = reader.read_exact_allocated(header.size as usize)?;
 
@@ -1873,11 +1800,7 @@ pub fn fast_catalog_restore(
                     })?;
 
                 if &archive_header.media_set_uuid != media_set.uuid() {
-                    task_log!(
-                        worker,
-                        "skipping unrelated catalog at pos {}",
-                        current_file_number
-                    );
+                    info!("skipping unrelated catalog at pos {current_file_number}");
                     reader.skip_data()?; // read all data
                     continue;
                 }
@@ -1890,11 +1813,7 @@ pub fn fast_catalog_restore(
                 });
 
                 if !wanted {
-                    task_log!(
-                        worker,
-                        "skip catalog because media '{}' not inventarized",
-                        catalog_uuid
-                    );
+                    info!("skip catalog because media '{catalog_uuid}' not inventarized");
                     reader.skip_data()?; // read all data
                     continue;
                 }
@@ -1904,11 +1823,7 @@ pub fn fast_catalog_restore(
                 } else {
                     // only restore if catalog does not exist
                     if MediaCatalog::exists(TAPE_STATUS_DIR, catalog_uuid) {
-                        task_log!(
-                            worker,
-                            "catalog for media '{}' already exists",
-                            catalog_uuid
-                        );
+                        info!("catalog for media '{catalog_uuid}' already exists");
                         reader.skip_data()?; // read all data
                         continue;
                     }
@@ -1924,19 +1839,11 @@ pub fn fast_catalog_restore(
                 match MediaCatalog::parse_catalog_header(&mut file)? {
                     (true, Some(media_uuid), Some(media_set_uuid)) => {
                         if &media_uuid != catalog_uuid {
-                            task_log!(
-                                worker,
-                                "catalog uuid mismatch at pos {}",
-                                current_file_number
-                            );
+                            info!("catalog uuid mismatch at pos {current_file_number}");
                             continue;
                         }
                         if media_set_uuid != archive_header.media_set_uuid {
-                            task_log!(
-                                worker,
-                                "catalog media_set mismatch at pos {}",
-                                current_file_number
-                            );
+                            info!("catalog media_set mismatch at pos {current_file_number}");
                             continue;
                         }
 
@@ -1947,18 +1854,14 @@ pub fn fast_catalog_restore(
                         )?;
 
                         if catalog_uuid == uuid {
-                            task_log!(worker, "successfully restored catalog");
+                            info!("successfully restored catalog");
                             found_catalog = true
                         } else {
-                            task_log!(
-                                worker,
-                                "successfully restored related catalog {}",
-                                media_uuid
-                            );
+                            info!("successfully restored related catalog {media_uuid}");
                         }
                     }
                     _ => {
-                        task_warn!(worker, "got incomplete catalog header - skip file");
+                        warn!("got incomplete catalog header - skip file");
                         continue;
                     }
                 }
@@ -1972,7 +1875,7 @@ pub fn fast_catalog_restore(
         }
         moved_to_eom = true;
 
-        task_log!(worker, "searching for catalog at EOT (moving to EOT)");
+        info!("searching for catalog at EOT (moving to EOT)");
         drive.move_to_last_file()?;
 
         let new_file_number = drive.current_file_number()?;
-- 
2.43.0





More information about the pbs-devel mailing list