[pbs-devel] [PATCH proxmox-backup 1/4] api2: make remote for sync-jobs optional
Hannes Laimer
h.laimer at proxmox.com
Mon Feb 13 16:45:52 CET 2023
... and update places where it is used.
A SyncJob not having a remote means it is pulling
from a local datastore.
Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
---
pbs-api-types/src/jobs.rs | 4 +-
src/api2/config/remote.rs | 2 +-
src/api2/config/sync.rs | 41 ++++++++++------
src/api2/node/tasks.rs | 4 +-
src/api2/pull.rs | 78 +++++++++++++++++++++++--------
src/server/email_notifications.rs | 16 +++----
6 files changed, 101 insertions(+), 44 deletions(-)
diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs
index cf7618c4..68db6cb8 100644
--- a/pbs-api-types/src/jobs.rs
+++ b/pbs-api-types/src/jobs.rs
@@ -462,6 +462,7 @@ pub const GROUP_FILTER_LIST_SCHEMA: Schema =
},
remote: {
schema: REMOTE_ID_SCHEMA,
+ optional: true,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
@@ -506,7 +507,8 @@ pub struct SyncJobConfig {
pub ns: Option<BackupNamespace>,
#[serde(skip_serializing_if = "Option::is_none")]
pub owner: Option<Authid>,
- pub remote: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub remote: Option<String>,
pub remote_store: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub remote_ns: Option<BackupNamespace>,
diff --git a/src/api2/config/remote.rs b/src/api2/config/remote.rs
index 2f02d121..aa74bdc0 100644
--- a/src/api2/config/remote.rs
+++ b/src/api2/config/remote.rs
@@ -268,7 +268,7 @@ pub fn delete_remote(name: String, digest: Option<String>) -> Result<(), Error>
let job_list: Vec<SyncJobConfig> = sync_jobs.convert_to_typed_array("sync")?;
for job in job_list {
- if job.remote == name {
+ if job.remote.map_or(false, |id| id == name) {
param_bail!(
"name",
"remote '{}' is used by sync job '{}' (datastore '{}')",
diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs
index bd7373df..4c5d06e2 100644
--- a/src/api2/config/sync.rs
+++ b/src/api2/config/sync.rs
@@ -8,8 +8,8 @@ use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
- PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT,
- PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
+ PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
+ PRIV_REMOTE_AUDIT, PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::sync;
@@ -25,8 +25,13 @@ pub fn check_sync_job_read_access(
return false;
}
- let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
- remote_privs & PRIV_REMOTE_AUDIT != 0
+ if let Some(remote) = &job.remote {
+ let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]);
+ remote_privs & PRIV_REMOTE_AUDIT != 0
+ } else {
+ let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]);
+ source_ds_privs & PRIV_DATASTORE_AUDIT != 0
+ }
}
/// checks whether user can run the corresponding pull job
@@ -63,8 +68,13 @@ pub fn check_sync_job_modify_access(
return false;
}
- let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote, &job.remote_store]);
- remote_privs & PRIV_REMOTE_READ != 0
+ if let Some(remote) = &job.remote {
+ let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]);
+ remote_privs & PRIV_REMOTE_READ != 0
+ } else {
+ let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]);
+ source_ds_privs & PRIV_DATASTORE_READ != 0
+ }
}
#[api(
@@ -191,6 +201,8 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result<Sync
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
+ /// Delete the remote property(-> meaning local).
+ Remote,
/// Delete the owner property.
Owner,
/// Delete the comment property.
@@ -273,6 +285,9 @@ pub fn update_sync_job(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
+ DeletableProperty::Remote => {
+ data.remote = None;
+ }
DeletableProperty::Owner => {
data.owner = None;
}
@@ -329,7 +344,7 @@ pub fn update_sync_job(
data.ns = Some(ns);
}
if let Some(remote) = update.remote {
- data.remote = remote;
+ data.remote = Some(remote);
}
if let Some(remote_store) = update.remote_store {
data.remote_store = remote_store;
@@ -495,7 +510,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
let mut job = SyncJobConfig {
id: "regular".to_string(),
- remote: "remote0".to_string(),
+ remote: Some("remote0".to_string()),
remote_store: "remotestore1".to_string(),
remote_ns: None,
store: "localstore0".to_string(),
@@ -529,11 +544,11 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
// reading without proper read permissions on local end must fail
- job.remote = "remote1".to_string();
+ job.remote = Some("remote1".to_string());
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
// reading without proper read permissions on remote end must fail
- job.remote = "remote0".to_string();
+ job.remote = Some("remote0".to_string());
job.store = "localstore1".to_string();
assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job));
@@ -546,10 +561,10 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
));
// writing without proper write permissions on local end must fail
- job.remote = "remote1".to_string();
+ job.remote = Some("remote1".to_string());
// writing without proper write permissions on remote end must fail
- job.remote = "remote0".to_string();
+ job.remote = Some("remote0".to_string());
job.store = "localstore1".to_string();
assert!(!check_sync_job_modify_access(
&user_info,
@@ -558,7 +573,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator
));
// reset remote to one where users have access
- job.remote = "remote1".to_string();
+ job.remote = Some("remote1".to_string());
// user with read permission can only read, but not modify/run
assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job));
diff --git a/src/api2/node/tasks.rs b/src/api2/node/tasks.rs
index d386f805..780cb6d1 100644
--- a/src/api2/node/tasks.rs
+++ b/src/api2/node/tasks.rs
@@ -75,14 +75,14 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
let local_store = captures.get(3);
let local_ns = captures.get(4).map(|m| m.as_str());
- if let (Some(remote), Some(remote_store), Some(local_store)) =
+ if let (remote, Some(remote_store), Some(local_store)) =
(remote, remote_store, local_store)
{
return check_pull_privs(
auth_id,
local_store.as_str(),
local_ns,
- remote.as_str(),
+ remote.map(|remote| remote.as_str()),
remote_store.as_str(),
false,
);
diff --git a/src/api2/pull.rs b/src/api2/pull.rs
index b2473ec8..c4255254 100644
--- a/src/api2/pull.rs
+++ b/src/api2/pull.rs
@@ -9,7 +9,8 @@ use proxmox_sys::task_log;
use pbs_api_types::{
Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP,
- PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
+ PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA,
+ REMOVE_VANISHED_BACKUPS_SCHEMA,
};
use pbs_config::CachedUserInfo;
use proxmox_rest_server::WorkerTask;
@@ -21,7 +22,7 @@ pub fn check_pull_privs(
auth_id: &Authid,
store: &str,
ns: Option<&str>,
- remote: &str,
+ remote: Option<&str>,
remote_store: &str,
delete: bool,
) -> Result<(), Error> {
@@ -38,12 +39,22 @@ pub fn check_pull_privs(
PRIV_DATASTORE_BACKUP,
false,
)?;
- user_info.check_privs(
- auth_id,
- &["remote", remote, remote_store],
- PRIV_REMOTE_READ,
- false,
- )?;
+
+ if let Some(remote) = remote {
+ user_info.check_privs(
+ auth_id,
+ &["remote", remote, remote_store],
+ PRIV_REMOTE_READ,
+ false,
+ )?;
+ } else {
+ user_info.check_privs(
+ auth_id,
+ &["datastore", remote_store],
+ PRIV_DATASTORE_READ,
+ false,
+ )?;
+ }
if delete {
user_info.check_privs(
@@ -64,7 +75,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
PullParameters::new(
&sync_job.store,
sync_job.ns.clone().unwrap_or_default(),
- &sync_job.remote,
+ sync_job.remote.clone().as_deref(),
&sync_job.remote_store,
sync_job.remote_ns.clone().unwrap_or_default(),
sync_job
@@ -89,7 +100,7 @@ pub fn do_sync_job(
) -> Result<String, Error> {
let job_id = format!(
"{}:{}:{}:{}:{}",
- sync_job.remote,
+ sync_job.remote.clone().unwrap_or("localhost".to_string()),
sync_job.remote_store,
sync_job.store,
sync_job.ns.clone().unwrap_or_default(),
@@ -112,7 +123,6 @@ pub fn do_sync_job(
let worker_future = async move {
let pull_params = PullParameters::try_from(&sync_job)?;
- let client = pull_params.client().await?;
task_log!(worker, "Starting datastore sync job '{}'", job_id);
if let Some(event_str) = schedule {
@@ -122,11 +132,35 @@ pub fn do_sync_job(
worker,
"sync datastore '{}' from '{}/{}'",
sync_job.store,
- sync_job.remote,
+ sync_job.remote.clone().unwrap_or("local".to_string()),
sync_job.remote_store,
);
- pull_store(&worker, &client, pull_params).await?;
+ if sync_job.remote.is_some() {
+ let client = pull_params.client().await?;
+ pull_store(&worker, Some(&client), pull_params).await?;
+ } else {
+ match (sync_job.ns, sync_job.remote_ns) {
+ (Some(target_ns), Some(source_ns))
+ if target_ns.path().starts_with(source_ns.path())
+ && sync_job.store == sync_job.remote_store =>
+ {
+ task_log!(
+ worker,
+ "Can't sync namespace into one of its sub-namespaces, skipping"
+ );
+ }
+ (_, None) if sync_job.store == sync_job.remote_store => {
+ task_log!(
+ worker,
+ "Can't sync root namespace into same datastore, skipping"
+ );
+ }
+ _ => {
+ pull_store(&worker, None, pull_params).await?;
+ }
+ }
+ }
task_log!(worker, "sync job '{}' end", &job_id);
@@ -178,6 +212,7 @@ pub fn do_sync_job(
},
remote: {
schema: REMOTE_ID_SCHEMA,
+ optional: true,
},
"remote-store": {
schema: DATASTORE_SCHEMA,
@@ -218,7 +253,7 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
async fn pull(
store: String,
ns: Option<BackupNamespace>,
- remote: String,
+ remote: Option<String>,
remote_store: String,
remote_ns: Option<BackupNamespace>,
remove_vanished: Option<bool>,
@@ -241,7 +276,7 @@ async fn pull(
&auth_id,
&store,
ns_str.as_deref(),
- &remote,
+ remote.as_deref(),
&remote_store,
delete,
)?;
@@ -249,7 +284,7 @@ async fn pull(
let pull_params = PullParameters::new(
&store,
ns,
- &remote,
+ remote.as_deref(),
&remote_store,
remote_ns.unwrap_or_default(),
auth_id.clone(),
@@ -258,7 +293,12 @@ async fn pull(
group_filter,
limit,
)?;
- let client = pull_params.client().await?;
+
+ let client = if remote.is_some() {
+ Some(pull_params.client().await?)
+ } else {
+ None
+ };
// fixme: set to_stdout to false?
// FIXME: add namespace to worker id?
@@ -272,11 +312,11 @@ async fn pull(
worker,
"pull datastore '{}' from '{}/{}'",
store,
- remote,
+ remote.as_deref().unwrap_or("localhost"),
remote_store,
);
- let pull_future = pull_store(&worker, &client, pull_params);
+ let pull_future = pull_store(&worker, client.as_ref(), pull_params);
(select! {
success = pull_future.fuse() => success,
abort = worker.abort_future().map(|_| Err(format_err!("pull aborted"))) => abort,
diff --git a/src/server/email_notifications.rs b/src/server/email_notifications.rs
index b3298cf9..31a46b0f 100644
--- a/src/server/email_notifications.rs
+++ b/src/server/email_notifications.rs
@@ -486,15 +486,15 @@ pub fn send_sync_status(
}
};
+ let source_str = if let Some(remote) = job.remote.clone() {
+ format!("Sync remote '{}'", remote)
+ } else {
+ format!("Sync local")
+ };
+
let subject = match result {
- Ok(()) => format!(
- "Sync remote '{}' datastore '{}' successful",
- job.remote, job.remote_store,
- ),
- Err(_) => format!(
- "Sync remote '{}' datastore '{}' failed",
- job.remote, job.remote_store,
- ),
+ Ok(()) => format!("{} datastore '{}' successful", source_str, job.remote_store,),
+ Err(_) => format!("{} datastore '{}' failed", source_str, job.remote_store,),
};
send_job_status_mail(email, &subject, &text)?;
--
2.30.2
More information about the pbs-devel
mailing list