[pbs-devel] [PATCH proxmox-backup v8 34/45] api: backup: add no-cache flag to bypass local datastore cache

Lukas Wagner l.wagner at proxmox.com
Fri Jul 18 13:41:08 CEST 2025



On  2025-07-15 14:53, Christian Ebner wrote:
> Adds the `no-cache` flag so the client can request to bypass the
> local datastore cache for chunk uploads. This is mainly intended for
> debugging and benchmarking, but can be used in cases the caching is
> known to be ineffective (no possible deduplication).
> 
> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
> ---
> changes since version 7:
> - no changes
> 
>  examples/upload-speed.rs               | 1 +
>  pbs-client/src/backup_writer.rs        | 4 +++-
>  proxmox-backup-client/src/benchmark.rs | 1 +
>  proxmox-backup-client/src/main.rs      | 8 ++++++++
>  src/api2/backup/environment.rs         | 3 +++
>  src/api2/backup/mod.rs                 | 3 +++
>  src/api2/backup/upload_chunk.rs        | 9 +++++++++
>  src/server/push.rs                     | 1 +
>  8 files changed, 29 insertions(+), 1 deletion(-)
> 
> diff --git a/examples/upload-speed.rs b/examples/upload-speed.rs
> index e4b570ec5..8a6594a47 100644
> --- a/examples/upload-speed.rs
> +++ b/examples/upload-speed.rs
> @@ -25,6 +25,7 @@ async fn upload_speed() -> Result<f64, Error> {
>          &(BackupType::Host, "speedtest".to_string(), backup_time).into(),
>          false,
>          true,
> +        false,
>      )
>      .await?;
>  
> diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs
> index 1253ef561..ce5bd9375 100644
> --- a/pbs-client/src/backup_writer.rs
> +++ b/pbs-client/src/backup_writer.rs
> @@ -82,6 +82,7 @@ impl BackupWriter {
>          backup: &BackupDir,
>          debug: bool,
>          benchmark: bool,
> +        no_cache: bool,
>      ) -> Result<Arc<BackupWriter>, Error> {
>          let mut param = json!({
>              "backup-type": backup.ty(),
> @@ -89,7 +90,8 @@ impl BackupWriter {
>              "backup-time": backup.time,
>              "store": datastore,
>              "debug": debug,
> -            "benchmark": benchmark
> +            "benchmark": benchmark,
> +            "no-cache": no_cache,
>          });
>  
>          if !ns.is_root() {
> diff --git a/proxmox-backup-client/src/benchmark.rs b/proxmox-backup-client/src/benchmark.rs
> index a6f24d745..ed21c7a91 100644
> --- a/proxmox-backup-client/src/benchmark.rs
> +++ b/proxmox-backup-client/src/benchmark.rs
> @@ -236,6 +236,7 @@ async fn test_upload_speed(
>          &(BackupType::Host, "benchmark".to_string(), backup_time).into(),
>          false,
>          true,
> +        true,
>      )
>      .await?;
>  
> diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs
> index 44f4f5db5..83fc9309a 100644
> --- a/proxmox-backup-client/src/main.rs
> +++ b/proxmox-backup-client/src/main.rs
> @@ -742,6 +742,12 @@ fn spawn_catalog_upload(
>                  optional: true,
>                  default: false,
>              },
> +            "no-cache": {
> +                type: Boolean,
> +                description: "Bypass local datastore cache for network storages.",
> +                optional: true,
> +                default: false,
> +            },
>          }
>      }
>   )]
> @@ -754,6 +760,7 @@ async fn create_backup(
>      change_detection_mode: Option<BackupDetectionMode>,
>      dry_run: bool,
>      skip_e2big_xattr: bool,
> +    no_cache: bool,
>      limit: ClientRateLimitConfig,
>      _info: &ApiMethod,
>      _rpcenv: &mut dyn RpcEnvironment,
> @@ -960,6 +967,7 @@ async fn create_backup(
>          &snapshot,
>          true,
>          false,
> +        no_cache,
>      )
>      .await?;
>  
> diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs
> index 369385368..448659e74 100644
> --- a/src/api2/backup/environment.rs
> +++ b/src/api2/backup/environment.rs
> @@ -113,6 +113,7 @@ pub struct BackupEnvironment {
>      result_attributes: Value,
>      auth_id: Authid,
>      pub debug: bool,
> +    pub no_cache: bool,
>      pub formatter: &'static dyn OutputFormatter,
>      pub worker: Arc<WorkerTask>,
>      pub datastore: Arc<DataStore>,
> @@ -129,6 +130,7 @@ impl BackupEnvironment {
>          worker: Arc<WorkerTask>,
>          datastore: Arc<DataStore>,
>          backup_dir: BackupDir,
> +        no_cache: bool,
>      ) -> Result<Self, Error> {
>          let state = SharedBackupState {
>              finished: false,
> @@ -149,6 +151,7 @@ impl BackupEnvironment {
>              worker,
>              datastore,
>              debug: tracing::enabled!(tracing::Level::DEBUG),
> +            no_cache,
>              formatter: JSON_FORMATTER,
>              backup_dir,
>              last_backup: None,
> diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs
> index 026f1f106..ae61ff697 100644
> --- a/src/api2/backup/mod.rs
> +++ b/src/api2/backup/mod.rs
> @@ -53,6 +53,7 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
>              ("backup-time", false, &BACKUP_TIME_SCHEMA),
>              ("debug", true, &BooleanSchema::new("Enable verbose debug logging.").schema()),
>              ("benchmark", true, &BooleanSchema::new("Job is a benchmark (do not keep data).").schema()),
> +            ("no-cache", true, &BooleanSchema::new("Disable local datastore cache for network storages").schema()),
>          ]),
>      )
>  ).access(
> @@ -79,6 +80,7 @@ fn upgrade_to_backup_protocol(
>      async move {
>          let debug = param["debug"].as_bool().unwrap_or(false);
>          let benchmark = param["benchmark"].as_bool().unwrap_or(false);
> +        let no_cache = param["no-cache"].as_bool().unwrap_or(false);
>  
>          let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>  
> @@ -214,6 +216,7 @@ fn upgrade_to_backup_protocol(
>                      worker.clone(),
>                      datastore,
>                      backup_dir,
> +                    no_cache,
>                  )?;
>  
>                  env.debug = debug;
> diff --git a/src/api2/backup/upload_chunk.rs b/src/api2/backup/upload_chunk.rs
> index d97975b34..623b405dd 100644
> --- a/src/api2/backup/upload_chunk.rs
> +++ b/src/api2/backup/upload_chunk.rs
> @@ -262,6 +262,15 @@ async fn upload_to_backend(
>                  );
>              }
>  
> +            if env.no_cache {
> +                let object_key = pbs_datastore::s3::object_key_from_digest(&digest)?;
> +                let is_duplicate = s3_client
> +                    .upload_with_retry(object_key, data, false)
> +                    .await
> +                    .context("failed to upload chunk to s3 backend")?;
> +                return Ok((digest, size, encoded_size, is_duplicate));
> +            }
> +
>              // Avoid re-upload to S3 if the chunk is either present in the LRU cache or the chunk
>              // file exists on filesystem. The latter means that the chunk has been present in the
>              // past an was not cleaned up by garbage collection, so contained in the S3 object store.
> diff --git a/src/server/push.rs b/src/server/push.rs
> index e71012ed8..6a31d2abe 100644
> --- a/src/server/push.rs
> +++ b/src/server/push.rs
> @@ -828,6 +828,7 @@ pub(crate) async fn push_snapshot(
>          snapshot,
>          false,
>          false,
> +        false,

There is already a FIXME for it above the BackupWriter::start function, but with the *third*
boolean parameter I think it is overdue to use a parameter struct instead of plain parameters
for this function.

>      )
>      .await?;
>  

-- 
- Lukas





More information about the pbs-devel mailing list