[pbs-devel] [PATCH v6 proxmox-backup 15/29] api: push: implement endpoint for sync in push direction
Fabian Grünbichler
f.gruenbichler at proxmox.com
Wed Nov 6 16:10:39 CET 2024
Quoting Christian Ebner (2024-10-31 13:15:05)
> Expose the sync job in push direction via a dedicated API endpoint,
> analogous to the pull direction.
>
> Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
> ---
> changes since version 5:
> - Avoid double deserialization for backup namespaces
> - Drop TryFrom<&SyncJobConfig> for PushParameters impl, as constructing
> them requires an api call to fetch the remote api version now
>
> src/api2/mod.rs | 2 +
> src/api2/push.rs | 183 +++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 185 insertions(+)
> create mode 100644 src/api2/push.rs
>
> diff --git a/src/api2/mod.rs b/src/api2/mod.rs
> index a83e4c205..03596326b 100644
> --- a/src/api2/mod.rs
> +++ b/src/api2/mod.rs
> @@ -12,6 +12,7 @@ pub mod helpers;
> pub mod node;
> pub mod ping;
> pub mod pull;
> +pub mod push;
> pub mod reader;
> pub mod status;
> pub mod tape;
> @@ -29,6 +30,7 @@ const SUBDIRS: SubdirMap = &sorted!([
> ("nodes", &node::ROUTER),
> ("ping", &ping::ROUTER),
> ("pull", &pull::ROUTER),
> + ("push", &push::ROUTER),
> ("reader", &reader::ROUTER),
> ("status", &status::ROUTER),
> ("tape", &tape::ROUTER),
> diff --git a/src/api2/push.rs b/src/api2/push.rs
> new file mode 100644
> index 000000000..28f4417d1
> --- /dev/null
> +++ b/src/api2/push.rs
> @@ -0,0 +1,183 @@
> +use anyhow::{format_err, Error};
> +use futures::{future::FutureExt, select};
> +use tracing::info;
> +
> +use pbs_api_types::{
> + Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA,
> + GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_READ,
> + PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, REMOTE_ID_SCHEMA,
> + REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA,
> +};
> +use proxmox_rest_server::WorkerTask;
> +use proxmox_router::{Permission, Router, RpcEnvironment};
> +use proxmox_schema::api;
> +
> +use pbs_config::CachedUserInfo;
> +
> +use crate::server::push::{push_store, PushParameters};
> +
> +/// Check if the provided user is allowed to read from the local source and act on the remote
> +/// target for pushing content
> +pub fn check_push_privs(
not used anywhere except here, could be private?
> + auth_id: &Authid,
> + store: &str,
> + namespace: &BackupNamespace,
> + remote: &str,
> + remote_store: &str,
> + remote_ns: Option<&BackupNamespace>,
since we don't actually need to support not setting the root namespace, the
Option here can go away..
> + delete: bool,
> +) -> Result<(), Error> {
> + let user_info = CachedUserInfo::new()?;
> +
> + let target_acl_path = match remote_ns {
> + Some(ns) => ns.remote_acl_path(remote, remote_store),
> + None => vec!["remote", remote, remote_store],
> + };
which makes this simpler
> +
> + // Check user is allowed to backup to remote/<remote>/<datastore>/<namespace>
> + user_info.check_privs(
> + auth_id,
> + &target_acl_path,
> + PRIV_REMOTE_DATASTORE_BACKUP,
> + false,
> + )?;
> +
> + if delete {
> + // Check user is allowed to prune remote datastore
> + user_info.check_privs(
> + auth_id,
> + &target_acl_path,
> + PRIV_REMOTE_DATASTORE_PRUNE,
> + false,
> + )?;
> + }
> +
> + // Check user is allowed to read source datastore
> + user_info.check_privs(
> + auth_id,
> + &namespace.acl_path(store),
> + PRIV_DATASTORE_READ,
isn't this too restrictive? should be PRIV_DATASTORE_BACKUP *or* READ?
the push task will then filter the local namespaces/backup groups/.. by what
the user is allowed to see..
> + false,
> + )?;
> +
> + Ok(())
> +}
> +
> +#[api(
> + input: {
> + properties: {
> + store: {
> + schema: DATASTORE_SCHEMA,
> + },
> + ns: {
> + type: BackupNamespace,
> + optional: true,
> + },
> + remote: {
> + schema: REMOTE_ID_SCHEMA,
> + },
> + "remote-store": {
> + schema: DATASTORE_SCHEMA,
> + },
> + "remote-ns": {
> + type: BackupNamespace,
> + optional: true,
> + },
> + "remove-vanished": {
> + schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
> + optional: true,
> + },
> + "max-depth": {
> + schema: NS_MAX_DEPTH_REDUCED_SCHEMA,
> + optional: true,
> + },
> + "group-filter": {
> + schema: GROUP_FILTER_LIST_SCHEMA,
> + optional: true,
> + },
> + limit: {
> + type: RateLimitConfig,
> + flatten: true,
> + },
> + "transfer-last": {
> + schema: TRANSFER_LAST_SCHEMA,
> + optional: true,
> + },
> + },
> + },
> + access: {
> + description: r###"The user needs Remote.Backup privilege on '/remote/{remote}/{remote-store}'
> +and needs to own the backup group. Datastore.Read is required on '/datastore/{store}'.
> +The delete flag additionally requires the Remote.Prune privilege on '/remote/{remote}/{remote-store}'.
this is partly wrong and/or weirdly phrased ;) maybe something like
The user needs (at least) Remote.DatastoreBackup on '/remote/{remote}/{remote-store}[/{remote-ns}]', and either Datastore.Backup or Datastore.Read on '/datastore/{store}[/{ns}]'. The 'remove-vanished' parameter might require additional privileges.
> +"###,
> + permission: &Permission::Anybody,
> + },
> +)]
> +/// Push store to other repository
> +#[allow(clippy::too_many_arguments)]
> +async fn push(
> + store: String,
> + ns: Option<BackupNamespace>,
> + remote: String,
> + remote_store: String,
> + remote_ns: Option<BackupNamespace>,
> + remove_vanished: Option<bool>,
> + max_depth: Option<usize>,
> + group_filter: Option<Vec<GroupFilter>>,
> + limit: RateLimitConfig,
> + transfer_last: Option<usize>,
> + rpcenv: &mut dyn RpcEnvironment,
> +) -> Result<String, Error> {
> + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
> + let delete = remove_vanished.unwrap_or(false);
> + let ns = ns.unwrap_or_default();
this could also be done for remote_ns
> +
> + check_push_privs(
> + &auth_id,
> + &store,
> + &ns,
> + &remote,
> + &remote_store,
> + remote_ns.as_ref(),
> + delete,
> + )?;
> +
> + let push_params = PushParameters::new(
> + &store,
> + ns,
> + &remote,
> + &remote_store,
> + remote_ns.unwrap_or_default(),
since we unwrap it here anyway ;)
> + auth_id.clone(),
> + remove_vanished,
> + max_depth,
> + group_filter,
> + limit,
> + transfer_last,
> + )
> + .await?;
> +
> + let upid_str = WorkerTask::spawn(
> + "sync",
> + Some(store.clone()),
> + auth_id.to_string(),
> + true,
> + move |worker| async move {
> + info!("push datastore '{store}' to '{remote}/{remote_store}'");
this is a bit redundant (and incomplete), the push output will contain this
correctly extended with namespace information..
> +
> + let push_future = push_store(push_params);
> + (select! {
> + success = push_future.fuse() => success,
> + abort = worker.abort_future().map(|_| Err(format_err!("push aborted"))) => abort,
> + })?;
> +
> + info!("push datastore '{store}' end");
same here
> +
> + Ok(())
> + },
> + )?;
> +
> + Ok(upid_str)
> +}
> +
> +pub const ROUTER: Router = Router::new().post(&API_METHOD_PUSH);
> --
> 2.39.5
>
>
>
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>
>
More information about the pbs-devel
mailing list