[pbs-devel] [PATCH v6 proxmox-backup 15/29] api: push: implement endpoint for sync in push direction

Christian Ebner c.ebner at proxmox.com
Thu Nov 7 10:18:02 CET 2024


On 11/6/24 16:10, Fabian Grünbichler wrote:
> Quoting Christian Ebner (2024-10-31 13:15:05)
>> Expose the sync job in push direction via a dedicated API endpoint,
>> analogous to the pull direction.
>>
>> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
>> ---
>> changes since version 5:
>> - Avoid double deserialization for backup namespaces
>> - Drop TryFrom<&SyncJobConfig> for PushParameters impl, as constructing
>>    them requires an api call to fetch the remote api version now
>>
>>   src/api2/mod.rs  |   2 +
>>   src/api2/push.rs | 183 +++++++++++++++++++++++++++++++++++++++++++++++
>>   2 files changed, 185 insertions(+)
>>   create mode 100644 src/api2/push.rs
>>
>> diff --git a/src/api2/mod.rs b/src/api2/mod.rs
>> index a83e4c205..03596326b 100644
>> --- a/src/api2/mod.rs
>> +++ b/src/api2/mod.rs
>> @@ -12,6 +12,7 @@ pub mod helpers;
>>   pub mod node;
>>   pub mod ping;
>>   pub mod pull;
>> +pub mod push;
>>   pub mod reader;
>>   pub mod status;
>>   pub mod tape;
>> @@ -29,6 +30,7 @@ const SUBDIRS: SubdirMap = &sorted!([
>>       ("nodes", &node::ROUTER),
>>       ("ping", &ping::ROUTER),
>>       ("pull", &pull::ROUTER),
>> +    ("push", &push::ROUTER),
>>       ("reader", &reader::ROUTER),
>>       ("status", &status::ROUTER),
>>       ("tape", &tape::ROUTER),
>> diff --git a/src/api2/push.rs b/src/api2/push.rs
>> new file mode 100644
>> index 000000000..28f4417d1
>> --- /dev/null
>> +++ b/src/api2/push.rs
>> @@ -0,0 +1,183 @@
>> +use anyhow::{format_err, Error};
>> +use futures::{future::FutureExt, select};
>> +use tracing::info;
>> +
>> +use pbs_api_types::{
>> +    Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA,
>> +    GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_READ,
>> +    PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, REMOTE_ID_SCHEMA,
>> +    REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA,
>> +};
>> +use proxmox_rest_server::WorkerTask;
>> +use proxmox_router::{Permission, Router, RpcEnvironment};
>> +use proxmox_schema::api;
>> +
>> +use pbs_config::CachedUserInfo;
>> +
>> +use crate::server::push::{push_store, PushParameters};
>> +
>> +/// Check if the provided user is allowed to read from the local source and act on the remote
>> +/// target for pushing content
>> +pub fn check_push_privs(
> 
> not used anywhere except here, could be private?

Acked!

> 
>> +    auth_id: &Authid,
>> +    store: &str,
>> +    namespace: &BackupNamespace,
>> +    remote: &str,
>> +    remote_store: &str,
>> +    remote_ns: Option<&BackupNamespace>,
> 
> since we don't actually need to support not setting the root namespace, the
> Option here can go away..


Acked!

> 
>> +    delete: bool,
>> +) -> Result<(), Error> {
>> +    let user_info = CachedUserInfo::new()?;
>> +
>> +    let target_acl_path = match remote_ns {
>> +        Some(ns) => ns.remote_acl_path(remote, remote_store),
>> +        None => vec!["remote", remote, remote_store],
>> +    };
> 
> which makes this simpler
> 
>> +
>> +    // Check user is allowed to backup to remote/<remote>/<datastore>/<namespace>
>> +    user_info.check_privs(
>> +        auth_id,
>> +        &target_acl_path,
>> +        PRIV_REMOTE_DATASTORE_BACKUP,
>> +        false,
>> +    )?;
>> +
>> +    if delete {
>> +        // Check user is allowed to prune remote datastore
>> +        user_info.check_privs(
>> +            auth_id,
>> +            &target_acl_path,
>> +            PRIV_REMOTE_DATASTORE_PRUNE,
>> +            false,
>> +        )?;
>> +    }
>> +
>> +    // Check user is allowed to read source datastore
>> +    user_info.check_privs(
>> +        auth_id,
>> +        &namespace.acl_path(store),
>> +        PRIV_DATASTORE_READ,
> 
> isn't this too restrictive? should be PRIV_DATASTORE_BACKUP *or* READ?
> 
> the push task will then filter the local namespaces/backup groups/.. by what
> the user is allowed to see..

Agreed, extended the check to also allow if user has PRIV_DATASTORE_BACKUP

> 
>> +        false,
>> +    )?;
>> +
>> +    Ok(())
>> +}
>> +
>> +#[api(
>> +    input: {
>> +        properties: {
>> +            store: {
>> +                schema: DATASTORE_SCHEMA,
>> +            },
>> +            ns: {
>> +                type: BackupNamespace,
>> +                optional: true,
>> +            },
>> +            remote: {
>> +                schema: REMOTE_ID_SCHEMA,
>> +            },
>> +            "remote-store": {
>> +                schema: DATASTORE_SCHEMA,
>> +            },
>> +            "remote-ns": {
>> +                type: BackupNamespace,
>> +                optional: true,
>> +            },
>> +            "remove-vanished": {
>> +                schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
>> +                optional: true,
>> +            },
>> +            "max-depth": {
>> +                schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
>> +                optional: true,
>> +            },
>> +            "group-filter": {
>> +                schema: GROUP_FILTER_LIST_SCHEMA,
>> +                optional: true,
>> +            },
>> +            limit: {
>> +                type: RateLimitConfig,
>> +                flatten: true,
>> +            },
>> +            "transfer-last": {
>> +                schema: TRANSFER_LAST_SCHEMA,
>> +                optional: true,
>> +            },
>> +        },
>> +    },
>> +    access: {
>> +        description: r###"The user needs Remote.Backup privilege on '/remote/{remote}/{remote-store}'
>> +and needs to own the backup group. Datastore.Read is required on '/datastore/{store}'.
>> +The delete flag additionally requires the Remote.Prune privilege on '/remote/{remote}/{remote-store}'.
> 
> this is partly wrong and/or weirdly phrased ;) maybe something like
> 
> The user needs (at least) Remote.DatastoreBackup on '/remote/{remote}/{remote-store}[/{remote-ns}]', and either Datastore.Backup or Datastore.Read on '/datastore/{store}[/{ns}]'. The 'remove-vanished' parameter might require additional privileges.

Yeah, that was indeed not adapted and is from a previous iteration, 
adapted to your suggestions.

> 
>> +"###,
>> +        permission: &Permission::Anybody,
>> +    },
>> +)]
>> +/// Push store to other repository
>> +#[allow(clippy::too_many_arguments)]
>> +async fn push(
>> +    store: String,
>> +    ns: Option<BackupNamespace>,
>> +    remote: String,
>> +    remote_store: String,
>> +    remote_ns: Option<BackupNamespace>,
>> +    remove_vanished: Option<bool>,
>> +    max_depth: Option<usize>,
>> +    group_filter: Option<Vec<GroupFilter>>,
>> +    limit: RateLimitConfig,
>> +    transfer_last: Option<usize>,
>> +    rpcenv: &mut dyn RpcEnvironment,
>> +) -> Result<String, Error> {
>> +    let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>> +    let delete = remove_vanished.unwrap_or(false);
>> +    let ns = ns.unwrap_or_default();
> 
> this could also be done for remote_ns

Acked!

> 
>> +
>> +    check_push_privs(
>> +        &auth_id,
>> +        &store,
>> +        &ns,
>> +        &remote,
>> +        &remote_store,
>> +        remote_ns.as_ref(),
>> +        delete,
>> +    )?;
>> +
>> +    let push_params = PushParameters::new(
>> +        &store,
>> +        ns,
>> +        &remote,
>> +        &remote_store,
>> +        remote_ns.unwrap_or_default(),
> 
> since we unwrap it here anyway ;)

Acked!

> 
>> +        auth_id.clone(),
>> +        remove_vanished,
>> +        max_depth,
>> +        group_filter,
>> +        limit,
>> +        transfer_last,
>> +    )
>> +    .await?;
>> +
>> +    let upid_str = WorkerTask::spawn(
>> +        "sync",
>> +        Some(store.clone()),
>> +        auth_id.to_string(),
>> +        true,
>> +        move |worker| async move {
>> +            info!("push datastore '{store}' to '{remote}/{remote_store}'");
> 
> this is a bit redundant (and incomplete), the push output will contain this
> correctly extended with namespace information..

Okay, agreed. Removed this and the log output below.

> 
>> +
>> +            let push_future = push_store(push_params);
>> +            (select! {
>> +                success = push_future.fuse() => success,
>> +                abort = worker.abort_future().map(|_| Err(format_err!("push aborted"))) => abort,
>> +            })?;
>> +
>> +            info!("push datastore '{store}' end");
> 
> same here
> 
>> +
>> +            Ok(())
>> +        },
>> +    )?;
>> +
>> +    Ok(upid_str)
>> +}
>> +
>> +pub const ROUTER: Router = Router::new().post(&API_METHOD_PUSH);
>> -- 
>> 2.39.5
>>
>>
>>
>> _______________________________________________
>> pbs-devel mailing list
>> pbs-devel at lists.proxmox.com
>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>>
>>
> Return-Path: <f.gruenbichler at proxmox.com>
> Delivered-To: c.ebner at proxmox.com
> Received: from ronja.mits.lan
> 	by ronja.mits.lan with LMTP
> 	id ANW1EEmJK2f3aQAAxxbTJA
> 	(envelope-from <f.gruenbichler at proxmox.com>)
> 	for <c.ebner at proxmox.com>; Wed, 06 Nov 2024 16:20:41 +0100
> Received: from localhost (unknown [192.168.16.37])
> 	by ronja.mits.lan (Postfix) with ESMTPSA id 36470F64912;
> 	Wed,  6 Nov 2024 16:20:41 +0100 (CET)
> Content-Type: text/plain; charset=tf-8"
> MIME-Version: 1.0
> Content-Transfer-Encoding: quoted-printable
> In-Reply-To: <20241031121519.434337-20-c.ebner at proxmox.com>
> References: <20241031121519.434337-1-c.ebner at proxmox.com> <20241031121519.434337-20-c.ebner at proxmox.com>
> Subject: Re: [pbs-devel] [PATCH v6 proxmox-backup 19/29] api: sync jobs: expose optional `sync-direction` parameter
> From: Fabian =tf-8?q?Grünbichler?= <f.gruenbichler at proxmox.com>
> To: Christian Ebner <c.ebner at proxmox.com>, pbs-devel at lists.proxmox.com
> Date: Wed, 06 Nov 2024 16:20:35 +0100
> Message-ID: <173090643519.79072.2923413753129715762 at yuna.proxmox.com>
> User-Agent: alot/0.10
> 
> Quoting Christian Ebner (2024-10-31 13:15:09)
>> Exposes and switch the config type for sync job operations based
>> on the `sync-direction` parameter, exposed on required api endpoints.
>>
>> If not set, the default config type is `sync` and the default sync
>> direction is `pull` for full backwards compatibility. Whenever
>> possible, deterimne the sync direction and config type from the sync
> 
> typo "determine"
> 
>> job config directly rather than requiring it as optional api
>> parameter.
>>
>> Further, extend read and modify access checks by sync direction to
>> conditionally check for the required permissions in pull and push
>> direction.
>>
>> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
>> ---
>> changes since version 5:
>> - Squashed permission check patches into this one, as they make not much
>>    sense without this
>> - Only expose optional sync-direction parameter for api endpoints which
>>    require them, use the job config to determine sync-direction and/or
>>    config-type otherwise.
>>
>>   src/api2/admin/sync.rs               |  34 ++--
>>   src/api2/config/datastore.rs         |  11 +-
>>   src/api2/config/notifications/mod.rs |  19 +-
>>   src/api2/config/sync.rs              | 280 ++++++++++++++++++++-------
>>   src/bin/proxmox-backup-proxy.rs      |  11 +-
>>   5 files changed, 261 insertions(+), 94 deletions(-)
>>
>> diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs
>> index be324564c..8a242b1c3 100644
>> --- a/src/api2/admin/sync.rs
>> +++ b/src/api2/admin/sync.rs
>> @@ -1,6 +1,7 @@
>>   //! Datastore Synchronization Job Management
>>   
>>   use anyhow::{bail, format_err, Error};
>> +use serde::Deserialize;
>>   use serde_json::Value;
>>   
>>   use proxmox_router::{
>> @@ -29,6 +30,10 @@ use crate::{
>>                   schema: DATASTORE_SCHEMA,
>>                   optional: true,
>>               },
>> +            "sync-direction": {
>> +                type: SyncDirection,
>> +                optional: true,
>> +            },
>>           },
>>       },
>>       returns: {
>> @@ -44,6 +49,7 @@ use crate::{
>>   /// List all sync jobs
>>   pub fn list_sync_jobs(
>>       store: Option<String>,
>> +    sync_direction: Option<SyncDirection>,
>>       _param: Value,
>>       rpcenv: &mut dyn RpcEnvironment,
>>   ) -> Result<Vec<SyncJobStatus>, Error> {
>> @@ -52,8 +58,9 @@ pub fn list_sync_jobs(
>>   
>>       let (config, digest) = sync::config()?;
>>   
>> +    let sync_direction = sync_direction.unwrap_or_default();
>>       let job_config_iter = config
>> -        .convert_to_typed_array("sync")?
>> +        .convert_to_typed_array(sync_direction.as_config_type_str())?
>>           .into_iter()
>>           .filter(|job: &SyncJobConfig| {
>>               if let Some(store) = &store {
>> @@ -62,7 +69,9 @@ pub fn list_sync_jobs(
>>                   true
>>               }
>>           })
>> -        .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job));
>> +        .filter(|job: &SyncJobConfig| {
>> +            check_sync_job_read_access(&user_info, &auth_id, job, sync_direction)
>> +        });
>>   
>>       let mut list = Vec::new();
>>   
>> @@ -106,24 +115,23 @@ pub fn run_sync_job(
>>       let user_info = CachedUserInfo::new()?;
>>   
>>       let (config, _digest) = sync::config()?;
>> -    let sync_job: SyncJobConfig = config.lookup("sync", &id)?;
>> +    let (config_type, config_section) = config
>> +        .sections
>> +        .get(&id)
>> +        .ok_or_else(|| format_err!("No sync job with id '{id}' found in config"))?;
>> +
>> +    let sync_direction = SyncDirection::from_config_type_str(config_type)?;
>> +    let sync_job = SyncJobConfig::deserialize(config_section)?;
>>   
>> -    if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) {
>> -        bail!("permission check failed");
>> +    if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job, sync_direction) {
>> +        bail!("permission check failed, '{auth_id}' is missing access");
>>       }
>>   
>>       let job = Job::new("syncjob", &id)?;
>>   
>>       let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
>>   
>> -    let upid_str = do_sync_job(
>> -        job,
>> -        sync_job,
>> -        &auth_id,
>> -        None,
>> -        SyncDirection::Pull,
>> -        to_stdout,
>> -    )?;
>> +    let upid_str = do_sync_job(job, sync_job, &auth_id, None, sync_direction, to_stdout)?;
>>   
>>       Ok(upid_str)
>>   }
>> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
>> index ca6edf05a..c151eda10 100644
>> --- a/src/api2/config/datastore.rs
>> +++ b/src/api2/config/datastore.rs
>> @@ -13,8 +13,9 @@ use proxmox_uuid::Uuid;
>>   
>>   use pbs_api_types::{
>>       Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
>> -    MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE,
>> -    PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
>> +    MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
>> +    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
>> +    PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
>>   };
>>   use pbs_config::BackupLockGuard;
>>   use pbs_datastore::chunk_store::ChunkStore;
>> @@ -498,8 +499,10 @@ pub async fn delete_datastore(
>>           for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? {
>>               delete_verification_job(job.config.id, None, rpcenv)?
>>           }
>> -        for job in list_sync_jobs(Some(name.clone()), Value::Null, rpcenv)? {
>> -            delete_sync_job(job.config.id, None, rpcenv)?
>> +        for direction in [SyncDirection::Pull, SyncDirection::Push] {
>> +            for job in list_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? {
>> +                delete_sync_job(job.config.id, None, rpcenv)?
>> +            }
>>           }
>>           for job in list_prune_jobs(Some(name.clone()), Value::Null, rpcenv)? {
>>               delete_prune_job(job.config.id, None, rpcenv)?
>> diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs
>> index dfe82ed03..31c4851c1 100644
>> --- a/src/api2/config/notifications/mod.rs
>> +++ b/src/api2/config/notifications/mod.rs
>> @@ -9,7 +9,7 @@ use proxmox_schema::api;
>>   use proxmox_sortable_macro::sortable;
>>   
>>   use crate::api2::admin::datastore::get_datastore_list;
>> -use pbs_api_types::PRIV_SYS_AUDIT;
>> +use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT};
>>   
>>   use crate::api2::admin::prune::list_prune_jobs;
>>   use crate::api2::admin::sync::list_sync_jobs;
>> @@ -154,13 +154,15 @@ pub fn get_values(
>>           });
>>       }
>>   
>> -    let sync_jobs = list_sync_jobs(None, param.clone(), rpcenv)?;
>> -    for job in sync_jobs {
>> -        values.push(MatchableValue {
>> -            field: "job-id".into(),
>> -            value: job.config.id,
>> -            comment: job.config.comment,
>> -        });
>> +    for direction in [SyncDirection::Pull, SyncDirection::Push] {
>> +        let sync_jobs = list_sync_jobs(None, Some(direction), param.clone(), rpcenv)?;
>> +        for job in sync_jobs {
>> +            values.push(MatchableValue {
>> +                field: "job-id".into(),
>> +                value: job.config.id,
>> +                comment: job.config.comment,
>> +            });
>> +        }
>>       }
>>   
>>       let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?;
>> @@ -184,6 +186,7 @@ pub fn get_values(
>>           "package-updates",
>>           "prune",
>>           "sync",
>> +        "sync-push",
>>           "system-mail",
>>           "tape-backup",
>>           "tape-load",
>> diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs
>> index 3963049e9..2f32aaccb 100644
>> --- a/src/api2/config/sync.rs
>> +++ b/src/api2/config/sync.rs
>> @@ -1,6 +1,7 @@
>>   use ::serde::{Deserialize, Serialize};
>>   use anyhow::{bail, Error};
>>   use hex::FromHex;
>> +use pbs_api_types::SyncDirection;
>>   use serde_json::Value;
>>   
>>   use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
>> @@ -8,8 +9,9 @@ use proxmox_schema::{api, param_bail};
>>   
>>   use pbs_api_types::{
>>       Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
>> -    PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
>> -    PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
>> +    PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
>> +    PRIV_REMOTE_AUDIT, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY,
>> +    PRIV_REMOTE_DATASTORE_PRUNE, PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
>>   };
>>   use pbs_config::sync;
>>   
>> @@ -20,18 +22,35 @@ pub fn check_sync_job_read_access(
>>       user_info: &CachedUserInfo,
>>       auth_id: &Authid,
>>       job: &SyncJobConfig,
>> +    sync_direction: SyncDirection,
>>   ) -> bool {
>> +    // check for audit access on datastore/namespace, applies for pull and push direction
>>       let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
>>       if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
>>           return false;
>>       }
>>   
>> -    if let Some(remote) = &job.remote {
>> -        let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]);
>> -        remote_privs & PRIV_REMOTE_AUDIT != 0
>> -    } else {
>> -        let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]);
>> -        source_ds_privs & PRIV_DATASTORE_AUDIT != 0
>> +    match sync_direction {
>> +        SyncDirection::Pull => {
>> +            if let Some(remote) = &job.remote {
>> +                let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]);
>> +                remote_privs & PRIV_REMOTE_AUDIT != 0
>> +            } else {
>> +                let source_ds_privs =
>> +                    user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]);
>> +                source_ds_privs & PRIV_DATASTORE_AUDIT != 0
>> +            }
>> +        }
>> +        SyncDirection::Push => {
>> +            // check for audit access on remote/datastore/namespace
>> +            if let Some(target_acl_path) = job.remote_acl_path() {
>> +                let remote_privs = user_info.lookup_privs(auth_id, &target_acl_path);
>> +                remote_privs & PRIV_REMOTE_AUDIT != 0
> 
> the other two checks above check the source side, this checks the the target
> side.. should we check both here?
> 
>> +            } else {
>> +                // Remote must always be present for sync in push direction, fail otherwise
>> +                false
>> +            }
>> +        }
>>       }
>>   }
>>   
>> @@ -43,41 +62,93 @@ fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool {
>>       }
>>   }
>>   
>> -/// checks whether user can run the corresponding pull job
>> +/// checks whether user can run the corresponding sync job, depending on sync direction
>>   ///
>> -/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly.
>> +/// namespace creation/deletion ACL and backup group ownership checks happen in the pull/push code
>> +/// directly.
>>   /// remote side checks/filters remote datastore/namespace/group access.
>>   pub fn check_sync_job_modify_access(
>>       user_info: &CachedUserInfo,
>>       auth_id: &Authid,
>>       job: &SyncJobConfig,
>> +    sync_direction: SyncDirection,
>>   ) -> bool {
>> -    let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
>> -    if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
>> -        return false;
>> -    }
>> +    match sync_direction {
>> +        SyncDirection::Pull => {
>> +            let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
>> +            if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0
>> +                || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0
>> +            {
>> +                return false;
>> +            }
>> +
>> +            if let Some(true) = job.remove_vanished {
>> +                if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 {
>> +                    return false;
>> +                }
>> +            }
>>   
>> -    if let Some(true) = job.remove_vanished {
>> -        if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 {
>> -            return false;
>> +            // same permission as changing ownership after syncing
>> +            if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 {
>> +                return false;
>> +            }
>> +
>> +            if let Some(remote) = &job.remote {
>> +                let remote_privs =
>> +                    user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]);
>> +                return remote_privs & PRIV_REMOTE_READ != 0;
>> +            }
>> +            true
>>           }
>> -    }
>> +        SyncDirection::Push => {
>> +            // Remote must always be present for sync in push direction, fail otherwise
>> +            let target_privs = if let Some(target_acl_path) = job.remote_acl_path() {
>> +                user_info.lookup_privs(auth_id, &target_acl_path)
>> +            } else {
>> +                return false;
>> +            };
>> +
>> +            // check user is allowed to create backups on remote datastore
>> +            if target_privs & PRIV_REMOTE_DATASTORE_BACKUP == 0 {
>> +                return false;
>> +            }
>>   
>> -    // same permission as changing ownership after syncing
>> -    if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 {
>> -        return false;
>> -    }
>> +            if let Some(true) = job.remove_vanished {
>> +                // check user is allowed to prune backup snapshots on remote datastore
>> +                if target_privs & PRIV_REMOTE_DATASTORE_PRUNE == 0 {
>> +                    return false;
>> +                }
>> +            }
>> +
>> +            // check user is not the owner of the sync job, but has remote datastore modify permissions
>> +            if !is_correct_owner(auth_id, job) && target_privs & PRIV_REMOTE_DATASTORE_MODIFY == 0 {
>> +                return false;
>> +            }
> 
> isn't this wrong? if I am modifying/running a sync job "owned" by somebody
> else, then I need to have Datastore.Read or Datastore.Modify on the *local*
> source datastore+namespace.. else I could use such a sync job to exfiltrate
> backups I wouldn't otherwise have access to..
> 
>> +
>> +            // check user is allowed to read from (local) source datastore/namespace
>> +            let source_privs = user_info.lookup_privs(auth_id, &job.acl_path());
>> +            if source_privs & PRIV_DATASTORE_AUDIT == 0 {
>> +                return false;
>> +            }
>>   
>> -    if let Some(remote) = &job.remote {
>> -        let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]);
>> -        return remote_privs & PRIV_REMOTE_READ != 0;
>> +            // check for either datastore read or datastore backup access
>> +            // (the later implying read access for owned snapshot groups)
>> +            if source_privs & PRIV_DATASTORE_READ != 0 {
>> +                return true;
>> +            }
>> +            source_privs & PRIV_DATASTORE_BACKUP != 0
>> +        }
>>       }
>> -    true
>>   }
>>   
>>   #[api(
>>       input: {
>> -        properties: {},
>> +        properties: {
>> +            "sync-direction": {
>> +                type: SyncDirection,
>> +                optional: true,
>> +            },
>> +        },
>>       },
>>       returns: {
>>           description: "List configured jobs.",
>> @@ -92,6 +163,7 @@ pub fn check_sync_job_modify_access(
>>   /// List all sync jobs
>>   pub fn list_sync_jobs(
>>       _param: Value,
>> +    sync_direction: Option<SyncDirection>,
>>       rpcenv: &mut dyn RpcEnvironment,
>>   ) -> Result<Vec<SyncJobConfig>, Error> {
>>       let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>> @@ -99,13 +171,16 @@ pub fn list_sync_jobs(
>>   
>>       let (config, digest) = sync::config()?;
>>   
>> -    let list = config.convert_to_typed_array("sync")?;
>> +    let sync_direction = sync_direction.unwrap_or_default();
>> +    let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?;
>>   
>>       rpcenv["digest"] = hex::encode(digest).into();
>>   
>>       let list = list
>>           .into_iter()
>> -        .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job))
>> +        .filter(|sync_job| {
>> +            check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction)
>> +        })
>>           .collect();
>>       Ok(list)
>>   }
>> @@ -118,6 +193,10 @@ pub fn list_sync_jobs(
>>                   type: SyncJobConfig,
>>                   flatten: true,
>>               },
>> +            "sync-direction": {
>> +                type: SyncDirection,
>> +                optional: true,
>> +            },
>>           },
>>       },
>>       access: {
>> @@ -128,14 +207,16 @@ pub fn list_sync_jobs(
>>   /// Create a new sync job.
>>   pub fn create_sync_job(
>>       config: SyncJobConfig,
>> +    sync_direction: Option<SyncDirection>,
>>       rpcenv: &mut dyn RpcEnvironment,
>>   ) -> Result<(), Error> {
>>       let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>>       let user_info = CachedUserInfo::new()?;
>> +    let sync_direction = sync_direction.unwrap_or_default();
>>   
>>       let _lock = sync::lock_config()?;
>>   
>> -    if !check_sync_job_modify_access(&user_info, &auth_id, &config) {
>> +    if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) {
>>           bail!("permission check failed");
>>       }
>>   
>> @@ -158,7 +239,7 @@ pub fn create_sync_job(
>>           param_bail!("id", "job '{}' already exists.", config.id);
>>       }
>>   
>> -    section_config.set_data(&config.id, "sync", &config)?;
>> +    section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?;
>>   
>>       sync::save_config(&section_config)?;
>>   
>> @@ -188,8 +269,17 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Sync
>>   
>>       let (config, digest) = sync::config()?;
>>   
>> -    let sync_job = config.lookup("sync", &id)?;
>> -    if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) {
>> +    let (sync_job, sync_direction) =
>> +        if let Some((config_type, config_section)) = config.sections.get(&id) {
>> +            (
>> +                SyncJobConfig::deserialize(config_section)?,
>> +                SyncDirection::from_config_type_str(config_type)?,
>> +            )
>> +        } else {
>> +            http_bail!(NOT_FOUND, "job '{id}' does not exist.")
>> +        };
>> +
>> +    if !check_sync_job_read_access(&user_info, &auth_id, &sync_job, sync_direction) {
>>           bail!("permission check failed");
>>       }
>>   
>> @@ -284,7 +374,15 @@ pub fn update_sync_job(
>>           crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
>>       }
>>   
>> -    let mut data: SyncJobConfig = config.lookup("sync", &id)?;
>> +    let (mut data, sync_direction) =
>> +        if let Some((config_type, config_section)) = config.sections.get(&id) {
>> +            (
>> +                SyncJobConfig::deserialize(config_section)?,
>> +                SyncDirection::from_config_type_str(config_type)?,
>> +            )
>> +        } else {
>> +            http_bail!(NOT_FOUND, "job '{id}' does not exist.")
>> +        };
>>   
>>       if let Some(delete) = delete {
>>           for delete_prop in delete {
>> @@ -405,11 +503,11 @@ pub fn update_sync_job(
>>           }
>>       }
>>   
>> -    if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
>> +    if !check_sync_job_modify_access(&user_info, &auth_id, &data, sync_direction) {
>>           bail!("permission check failed");
>>       }
>>   
>> -    config.set_data(&id, "sync", &data)?;
>> +    config.set_data(&id, sync_direction.as_config_type_str(), &data)?;
>>   
>>       sync::save_config(&config)?;
>>   
>> @@ -456,17 +554,16 @@ pub fn delete_sync_job(
>>           crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
>>       }
>>   
>> -    match config.lookup("sync", &id) {
>> -        Ok(job) => {
>> -            if !check_sync_job_modify_access(&user_info, &auth_id, &job) {
>> -                bail!("permission check failed");
>> -            }
>> -            config.sections.remove(&id);
>> -        }
>> -        Err(_) => {
>> -            http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
>> +    if let Some((config_type, config_section)) = config.sections.get(&id) {
>> +        let sync_direction = SyncDirection::from_config_type_str(config_type)?;
>> +        let job = SyncJobConfig::deserialize(config_section)?;
>> +        if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) {
>> +            bail!("permission check failed");
>>           }
>> -    };
>> +        config.sections.remove(&id);
>> +    } else {
>> +        http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
>> +    }
>>   
>>       sync::save_config(&config)?;
>>   
>> @@ -536,39 +633,67 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
>>       };
>>   
>>       // should work without ACLs
>> -    assert!(check_sync_job_read_access(&user_info, root_auth_id, &job));
>> -    assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job));
>> +    assert!(check_sync_job_read_access(
>> +        &user_info,
>> +        root_auth_id,
>> +        &job,
>> +        SyncDirection::Pull,
>> +    ));
>> +    assert!(check_sync_job_modify_access(
>> +        &user_info,
>> +        root_auth_id,
>> +        &job,
>> +        SyncDirection::Pull,
>> +    ));
>>   
>>       // user without permissions must fail
>>       assert!(!check_sync_job_read_access(
>>           &user_info,
>>           &no_perm_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &no_perm_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // reading without proper read permissions on either remote or local must fail
>> -    assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
>> +    assert!(!check_sync_job_read_access(
>> +        &user_info,
>> +        &read_auth_id,
>> +        &job,
>> +        SyncDirection::Pull,
>> +    ));
>>   
>>       // reading without proper read permissions on local end must fail
>>       job.remote = Some("remote1".to_string());
>> -    assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
>> +    assert!(!check_sync_job_read_access(
>> +        &user_info,
>> +        &read_auth_id,
>> +        &job,
>> +        SyncDirection::Pull,
>> +    ));
>>   
>>       // reading without proper read permissions on remote end must fail
>>       job.remote = Some("remote0".to_string());
>>       job.store = "localstore1".to_string();
>> -    assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
>> +    assert!(!check_sync_job_read_access(
>> +        &user_info,
>> +        &read_auth_id,
>> +        &job,
>> +        SyncDirection::Pull,
>> +    ));
>>   
>>       // writing without proper write permissions on either end must fail
>>       job.store = "localstore0".to_string();
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // writing without proper write permissions on local end must fail
>> @@ -580,39 +705,54 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // reset remote to one where users have access
>>       job.remote = Some("remote1".to_string());
>>   
>>       // user with read permission can only read, but not modify/run
>> -    assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job));
>> +    assert!(check_sync_job_read_access(
>> +        &user_info,
>> +        &read_auth_id,
>> +        &job,
>> +        SyncDirection::Pull,
>> +    ));
>>       job.owner = Some(read_auth_id.clone());
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &read_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>       job.owner = None;
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &read_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>       job.owner = Some(write_auth_id.clone());
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &read_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // user with simple write permission can modify/run
>> -    assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job));
>> +    assert!(check_sync_job_read_access(
>> +        &user_info,
>> +        &write_auth_id,
>> +        &job,
>> +        SyncDirection::Pull,
>> +    ));
>>       assert!(check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // but can't modify/run with deletion
>> @@ -620,7 +760,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // unless they have Datastore.Prune as well
>> @@ -628,7 +769,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
>>       assert!(check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // changing owner is not possible
>> @@ -636,7 +778,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // also not to the default 'root at pam'
>> @@ -644,7 +787,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
>>       assert!(!check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       // unless they have Datastore.Modify as well
>> @@ -653,13 +797,15 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
>>       assert!(check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>       job.owner = None;
>>       assert!(check_sync_job_modify_access(
>>           &user_info,
>>           &write_auth_id,
>> -        &job
>> +        &job,
>> +        SyncDirection::Pull,
>>       ));
>>   
>>       Ok(())
>> diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
>> index 6f19a3fbd..70283510d 100644
>> --- a/src/bin/proxmox-backup-proxy.rs
>> +++ b/src/bin/proxmox-backup-proxy.rs
>> @@ -589,7 +589,14 @@ async fn schedule_datastore_sync_jobs() {
>>           Ok((config, _digest)) => config,
>>       };
>>   
>> -    for (job_id, (_, job_config)) in config.sections {
>> +    for (job_id, (job_type, job_config)) in config.sections {
>> +        let sync_direction = match SyncDirection::from_config_type_str(&job_type) {
>> +            Ok(direction) => direction,
>> +            Err(err) => {
>> +                eprintln!("unexpected config type in sync job config - {err}");
>> +                continue;
>> +            }
>> +        };
>>           let job_config: SyncJobConfig = match serde_json::from_value(job_config) {
>>               Ok(c) => c,
>>               Err(err) => {
>> @@ -616,7 +623,7 @@ async fn schedule_datastore_sync_jobs() {
>>                   job_config,
>>                   &auth_id,
>>                   Some(event_str),
>> -                SyncDirection::Pull,
>> +                sync_direction,
>>                   false,
>>               ) {
>>                   eprintln!("unable to start datastore sync job {job_id} - {err}");
>> -- 
>> 2.39.5
>>
>>
>>
>> _______________________________________________
>> pbs-devel mailing list
>> pbs-devel at lists.proxmox.com
>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>>
>>





More information about the pbs-devel mailing list