From c.ebner at proxmox.com Mon Nov 4 11:58:29 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 11:58:29 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/3] sync: pull: do not resync currently newest snapshot on target In-Reply-To: <20241104105830.85612-1-c.ebner@proxmox.com> References: <20241104105830.85612-1-c.ebner@proxmox.com> Message-ID: <20241104105830.85612-2-c.ebner@proxmox.com> The currently newest snapshot of a group on the sync target is not excluded from the list of already synced snapshots, leading to a re-sync. Filter out the snapshot as well. Signed-off-by: Christian Ebner --- Might be ignored if the re-sync is intetional. Implementation already present since commit: de8ec041 ("src/api2/sync.rs: implement remote sync") src/server/pull.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index cc1427196..7aa191d96 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -528,7 +528,7 @@ async fn pull_group( .enumerate() .filter(|&(pos, ref dir)| { source_snapshots.insert(dir.time); - if last_sync_time > dir.time { + if last_sync_time >= dir.time { already_synced_skip_info.update(dir.time); return false; } else if already_synced_skip_info.count > 0 { @@ -536,7 +536,7 @@ async fn pull_group( already_synced_skip_info.reset(); } - if pos < cutoff && last_sync_time != dir.time { + if pos < cutoff { transfer_last_skip_info.update(dir.time); return false; } else if transfer_last_skip_info.count > 0 { -- 2.39.5 From c.ebner at proxmox.com Mon Nov 4 11:58:30 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 11:58:30 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/3] sync: pull: simplify logic for source snapshot filtering In-Reply-To: <20241104105830.85612-1-c.ebner@proxmox.com> References: <20241104105830.85612-1-c.ebner@proxmox.com> Message-ID: <20241104105830.85612-3-c.ebner@proxmox.com> Decouple the actual filter logic from the skip reason output logic by pulling the latter out of the filter closue. Makes the filtering logic more intuitive. Signed-off-by: Christian Ebner --- src/server/pull.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index 7aa191d96..8f00ae0af 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -531,23 +531,25 @@ async fn pull_group( if last_sync_time >= dir.time { already_synced_skip_info.update(dir.time); return false; - } else if already_synced_skip_info.count > 0 { - info!("{already_synced_skip_info}"); - already_synced_skip_info.reset(); } - if pos < cutoff { transfer_last_skip_info.update(dir.time); return false; - } else if transfer_last_skip_info.count > 0 { - info!("{transfer_last_skip_info}"); - transfer_last_skip_info.reset(); } true }) .map(|(_, dir)| dir) .collect(); + if already_synced_skip_info.count > 0 { + info!("{already_synced_skip_info}"); + already_synced_skip_info.reset(); + } + if transfer_last_skip_info.count > 0 { + info!("{transfer_last_skip_info}"); + transfer_last_skip_info.reset(); + } + // start with 65536 chunks (up to 256 GiB) let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024 * 64))); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 4 11:58:28 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 11:58:28 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/3] sync: fix premature return in snapshot skip filter logic Message-ID: <20241104105830.85612-1-c.ebner@proxmox.com> While checking which snapshots to sync, the filter logic incorrectly included the first snapshot newer that the last synced one unconditionally, bypassing the transfer last check for that one snapshot. Following snapshots are correctly handled again. E.g. of an incorrect sync by excerpt of a task log provided by a user in the community forum [0], with transfer last set to 1: ``` skipped: 2 snapshot(s) (2024-09-29T18:00:28Z .. 2024-10-20T18:00:29Z) - older than the newest local snapshot skipped: 5 snapshot(s) (2024-10-28T19:00:28Z .. 2024-11-01T19:00:32Z) - due to transfer-last sync snapshot vm/110/2024-10-27T19:00:25Z ... sync snapshot vm/110/2024-11-02T19:00:23Z ``` Not only the last, but the first newer than newest and last were incorrectly synced. By dropping the early return, leading to incorrect inclusion of the snapshot, the transfer last condition is now correctly checked as well. Link to the issue reported in the community forum: [0] https://forum.proxmox.com/threads/156873/ Signed-off-by: Christian Ebner --- src/server/pull.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index 3117f7d2c..cc1427196 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -534,7 +534,6 @@ async fn pull_group( } else if already_synced_skip_info.count > 0 { info!("{already_synced_skip_info}"); already_synced_skip_info.reset(); - return true; } if pos < cutoff && last_sync_time != dir.time { -- 2.39.5 From f.gruenbichler at proxmox.com Mon Nov 4 12:51:12 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 12:51:12 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <20241018090909.103952-2-g.goller@proxmox.com> References: <20241018090909.103952-1-g.goller@proxmox.com> <20241018090909.103952-2-g.goller@proxmox.com> Message-ID: <1730717237.hc5rhqwdje.astroid@yuna.none> this doesn't really do what it says on the tin, see below. On October 18, 2024 11:09 am, Gabriel Goller wrote: > This option allows us to "fix" corrupt snapshots (and/or their chunks) > by pulling them from another remote. When traversing the remote > snapshots, we check if it exists locally, and if it is, we check if the > last verification of it failed. If the local snapshot is broken and the > `resync-corrupt` option is turned on, we pull in the remote snapshot, > overwriting the local one. > > This is very useful and has been requested a lot, as there is currently > no way to "fix" corrupt chunks/snapshots even if the user has a healthy > version of it on their offsite instance. > > Originally-by: Shannon Sterz > Signed-off-by: Gabriel Goller > --- > pbs-api-types/src/jobs.rs | 10 ++++++ > pbs-datastore/src/backup_info.rs | 13 +++++++- > src/api2/config/sync.rs | 4 +++ > src/api2/pull.rs | 9 +++++- > src/bin/proxmox-backup-manager.rs | 4 +-- > src/server/pull.rs | 52 ++++++++++++++++++++++++++----- > 6 files changed, 80 insertions(+), 12 deletions(-) > > diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs > index 868702bc059e..58f739ad00b5 100644 > --- a/pbs-api-types/src/jobs.rs > +++ b/pbs-api-types/src/jobs.rs > @@ -498,6 +498,10 @@ pub const TRANSFER_LAST_SCHEMA: Schema = > .minimum(1) > .schema(); > > +pub const RESYNC_CORRUPT_SCHEMA: Schema = > + BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") > + .schema(); > + > #[api( > properties: { > id: { > @@ -552,6 +556,10 @@ pub const TRANSFER_LAST_SCHEMA: Schema = > schema: TRANSFER_LAST_SCHEMA, > optional: true, > }, > + "resync-corrupt": { > + schema: RESYNC_CORRUPT_SCHEMA, > + optional: true, > + } > } > )] > #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] > @@ -585,6 +593,8 @@ pub struct SyncJobConfig { > pub limit: RateLimitConfig, > #[serde(skip_serializing_if = "Option::is_none")] > pub transfer_last: Option, > + #[serde(skip_serializing_if = "Option::is_none")] > + pub resync_corrupt: Option, > } > > impl SyncJobConfig { > diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs > index 414ec878d01a..c86fbb7568ab 100644 > --- a/pbs-datastore/src/backup_info.rs > +++ b/pbs-datastore/src/backup_info.rs > @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; > use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; > > use pbs_api_types::{ > - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, > + Authid, BackupNamespace, BackupType, GroupFilter, SnapshotVerifyState, VerifyState, > + BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, > }; > use pbs_config::{open_backup_lockfile, BackupLockGuard}; > > @@ -583,6 +584,16 @@ impl BackupDir { > > Ok(()) > } > + > + /// Load the verify state from the manifest. > + pub fn verify_state(&self) -> Result { > + self.load_manifest().and_then(|(m, _)| { > + let verify = m.unprotected["verify_state"].clone(); > + serde_json::from_value::(verify) > + .map(|svs| svs.state) > + .map_err(Into::into) > + }) > + } wouldn't it make more sense to have this as a getter for an optional SnapshotVerifyState on the BackupManifest? then it could go into its own commit, other call sites that load the verify state from a manifest could be adapted to it, and then this commit can also start using it? also see the comment further below about how the current implementation is very noisy if snapshots are newly synced as opposed to resynced.. > } > > impl AsRef for BackupDir { > diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs > index 6fdc69a9e645..fa9db92f3d11 100644 > --- a/src/api2/config/sync.rs > +++ b/src/api2/config/sync.rs > @@ -368,6 +368,9 @@ pub fn update_sync_job( > if let Some(transfer_last) = update.transfer_last { > data.transfer_last = Some(transfer_last); > } > + if let Some(resync_corrupt) = update.resync_corrupt { > + data.resync_corrupt = Some(resync_corrupt); > + } > > if update.limit.rate_in.is_some() { > data.limit.rate_in = update.limit.rate_in; > @@ -527,6 +530,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > ns: None, > owner: Some(write_auth_id.clone()), > comment: None, > + resync_corrupt: None, > remove_vanished: None, > max_depth: None, > group_filter: None, > diff --git a/src/api2/pull.rs b/src/api2/pull.rs > index e733c9839e3a..0d4be0e2d228 100644 > --- a/src/api2/pull.rs > +++ b/src/api2/pull.rs > @@ -10,7 +10,7 @@ use pbs_api_types::{ > Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, > GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, > PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, > - TRANSFER_LAST_SCHEMA, > + RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, > }; > use pbs_config::CachedUserInfo; > use proxmox_human_byte::HumanByte; > @@ -89,6 +89,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { > sync_job.group_filter.clone(), > sync_job.limit.clone(), > sync_job.transfer_last, > + sync_job.resync_corrupt, > ) > } > } > @@ -240,6 +241,10 @@ pub fn do_sync_job( > schema: TRANSFER_LAST_SCHEMA, > optional: true, > }, > + "resync-corrupt": { > + schema: RESYNC_CORRUPT_SCHEMA, > + optional: true, > + }, > }, > }, > access: { > @@ -264,6 +269,7 @@ async fn pull( > group_filter: Option>, > limit: RateLimitConfig, > transfer_last: Option, > + resync_corrupt: Option, > rpcenv: &mut dyn RpcEnvironment, > ) -> Result { > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > @@ -301,6 +307,7 @@ async fn pull( > group_filter, > limit, > transfer_last, > + resync_corrupt, > )?; > > // fixme: set to_stdout to false? > diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs > index 420e96665662..38a1cf0f5881 100644 > --- a/src/bin/proxmox-backup-manager.rs > +++ b/src/bin/proxmox-backup-manager.rs > @@ -14,8 +14,8 @@ use pbs_api_types::percent_encoding::percent_encode_component; > use pbs_api_types::{ > BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, > GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, > - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, > - VERIFICATION_OUTDATED_AFTER_SCHEMA, > + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, > + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, > }; > use pbs_client::{display_task_log, view_task_result}; > use pbs_config::sync; > diff --git a/src/server/pull.rs b/src/server/pull.rs > index 3117f7d2c960..b2dd15d9d6db 100644 > --- a/src/server/pull.rs > +++ b/src/server/pull.rs > @@ -7,12 +7,14 @@ use std::sync::{Arc, Mutex}; > use std::time::SystemTime; > > use anyhow::{bail, format_err, Error}; > +use nom::combinator::verify; I think this snuck in ;) > use proxmox_human_byte::HumanByte; > use tracing::info; > > use pbs_api_types::{ > print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, > - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, > + RateLimitConfig, Remote, VerifyState, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, > + PRIV_DATASTORE_BACKUP, > }; > use pbs_client::BackupRepository; > use pbs_config::CachedUserInfo; > @@ -55,6 +57,8 @@ pub(crate) struct PullParameters { > group_filter: Vec, > /// How many snapshots should be transferred at most (taking the newest N snapshots) > transfer_last: Option, > + /// Whether to re-sync corrupted snapshots > + resync_corrupt: bool, > } > > impl PullParameters { > @@ -72,12 +76,14 @@ impl PullParameters { > group_filter: Option>, > limit: RateLimitConfig, > transfer_last: Option, > + resync_corrupt: Option, > ) -> Result { > if let Some(max_depth) = max_depth { > ns.check_max_depth(max_depth)?; > remote_ns.check_max_depth(max_depth)?; > }; > let remove_vanished = remove_vanished.unwrap_or(false); > + let resync_corrupt = resync_corrupt.unwrap_or(false); > > let source: Arc = if let Some(remote) = remote { > let (remote_config, _digest) = pbs_config::remote::config()?; > @@ -116,6 +122,7 @@ impl PullParameters { > max_depth, > group_filter, > transfer_last, > + resync_corrupt, > }) > } > } > @@ -175,9 +182,10 @@ async fn pull_index_chunks( > target.cond_touch_chunk(&info.digest, false) > })?; > if chunk_exists { > - //info!("chunk {} exists {}", pos, hex::encode(digest)); > + //info!("chunk exists {}", hex::encode(info.digest)); this > return Ok::<_, Error>(()); > } > + and this as well? > //info!("sync {} chunk {}", pos, hex::encode(digest)); > let chunk = chunk_reader.read_raw_chunk(&info.digest).await?; > let raw_size = chunk.raw_size() as usize; > @@ -325,13 +333,15 @@ async fn pull_single_archive<'a>( > /// - (Re)download the manifest > /// -- if it matches, only download log and treat snapshot as already synced > /// - Iterate over referenced files > -/// -- if file already exists, verify contents > +/// -- if file already exists, verify contents or pull again if last > +/// verification failed and `resync_corrupt` is true > /// -- if not, pull it from the remote > /// - Download log if not already existing > async fn pull_snapshot<'a>( > reader: Arc, > snapshot: &'a pbs_datastore::BackupDir, > downloaded_chunks: Arc>>, > + resync_corrupt: bool, > ) -> Result { > let mut sync_stats = SyncStats::default(); > let mut manifest_name = snapshot.full_path(); > @@ -352,6 +362,14 @@ async fn pull_snapshot<'a>( > return Ok(sync_stats); > } > I think this part here is somewhat wrong ordering wise, or at least, unnecessarily expensive.. if resync_corrupt is enabled, we want to (in this order!) - check the local snapshot for corruption, if it exists - if it is corrupt, we proceed with resyncing - if not, we only proceed with resyncing if it is the last snapshot in this group, and return early otherwise that way, we avoid redownloading all the manifests.. but see further below for another issue with the current implementation.. > + let must_resync_existing = resync_corrupt > + && snapshot > + .verify_state() > + .inspect_err(|err| { > + tracing::error!("Failed to check verification state of snapshot: {err:?}") 2024-11-04T12:34:57+01:00: Failed to check verification state of snapshot: unable to load blob '"/tank/pbs/ns/foobar/ns/test/ns/another_test/vm/900/2023-04-06T14:36:00Z/index.json.blob"' - No such file or directory (os error 2) this seems to be very noisy for newly synced snapshots, because the helper is implemented on BackupInfo instead of on BackupManifest.. > + }) > + .is_ok_and(|state| state == VerifyState::Failed); > + > if manifest_name.exists() { > let manifest_blob = proxmox_lang::try_block!({ > let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| { > @@ -365,7 +383,7 @@ async fn pull_snapshot<'a>( > format_err!("unable to read local manifest {manifest_name:?} - {err}") > })?; > > - if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() { > + if manifest_blob.raw_data() == tmp_manifest_blob.raw_data() && !must_resync_existing { > if !client_log_name.exists() { > reader.try_download_client_log(&client_log_name).await?; > }; > @@ -377,11 +395,18 @@ async fn pull_snapshot<'a>( > > let manifest = BackupManifest::try_from(tmp_manifest_blob)?; > > + if must_resync_existing { > + info!( > + "re-syncing snapshot {} due to bad verification result", > + snapshot.dir() > + ); > + } > + > for item in manifest.files() { > let mut path = snapshot.full_path(); > path.push(&item.filename); > > - if path.exists() { > + if !must_resync_existing && path.exists() { > match ArchiveType::from_path(&item.filename)? { > ArchiveType::DynamicIndex => { > let index = DynamicIndexReader::open(&path)?; > @@ -443,6 +468,7 @@ async fn pull_snapshot_from<'a>( > reader: Arc, > snapshot: &'a pbs_datastore::BackupDir, > downloaded_chunks: Arc>>, > + resync_corrupt: bool, > ) -> Result { > let (_path, is_new, _snap_lock) = snapshot > .datastore() > @@ -451,7 +477,7 @@ async fn pull_snapshot_from<'a>( > let sync_stats = if is_new { > info!("sync snapshot {}", snapshot.dir()); > > - match pull_snapshot(reader, snapshot, downloaded_chunks).await { > + match pull_snapshot(reader, snapshot, downloaded_chunks, resync_corrupt).await { > Err(err) => { > if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( > snapshot.backup_ns(), > @@ -469,7 +495,7 @@ async fn pull_snapshot_from<'a>( > } > } else { > info!("re-sync snapshot {}", snapshot.dir()); > - pull_snapshot(reader, snapshot, downloaded_chunks).await? > + pull_snapshot(reader, snapshot, downloaded_chunks, resync_corrupt).await? > }; > > Ok(sync_stats) > @@ -528,6 +554,10 @@ async fn pull_group( > .enumerate() > .filter(|&(pos, ref dir)| { > source_snapshots.insert(dir.time); > + // If resync_corrupt is set, we go through all the remote snapshots > + if params.resync_corrupt { > + return true; > + } alternatively, we could check the local manifest here, and only include existing snapshots with a failed verification state, the last one and new ones? that way, we'd get more meaningful progress stats as well.. because right now, this will not only resync existing corrupt snapshots, but also ones that have been pruned locally, but not on the source (i.e., the other proposed "fixing" sync mode that syncs "missing" old snapshots, not just corrupt ones). > if last_sync_time > dir.time { > already_synced_skip_info.update(dir.time); > return false; > @@ -566,7 +596,13 @@ async fn pull_group( > .source > .reader(source_namespace, &from_snapshot) > .await?; > - let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await; > + let result = pull_snapshot_from( > + reader, > + &to_snapshot, > + downloaded_chunks.clone(), > + params.resync_corrupt, > + ) > + .await; > > progress.done_snapshots = pos as u64 + 1; > info!("percentage done: {progress}"); > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Mon Nov 4 12:56:14 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 12:56:14 +0100 Subject: [pbs-devel] [PATCH v3 proxmox-backup 2/5] api types: introduce `BackupArchiveName` type In-Reply-To: <1729857821.37okdbzjnk.astroid@yuna.none> References: <20241024080150.30200-1-c.ebner@proxmox.com> <20241024080150.30200-3-c.ebner@proxmox.com> <1729857821.37okdbzjnk.astroid@yuna.none> Message-ID: <5db8c8fe-7730-48ec-b4b2-e9d671434994@proxmox.com> On 10/25/24 14:15, Fabian Gr?nbichler wrote: > On October 24, 2024 10:01 am, Christian Ebner wrote: >> Introduces a dedicated wrapper type to be used for backup archive >> names instead of plain strings and associated helper methods for >> archive type checks and archive name mappings. >> >> Signed-off-by: Christian Ebner >> --- >> changes since version 2: >> - reworded commit message >> >> pbs-api-types/src/datastore.rs | 107 ++++++++++++++++++++++++++++++++- >> 1 file changed, 106 insertions(+), 1 deletion(-) >> >> diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs >> index dfa6bb259..62e4f7345 100644 >> --- a/pbs-api-types/src/datastore.rs >> +++ b/pbs-api-types/src/datastore.rs >> @@ -1,5 +1,7 @@ >> +use std::convert::{AsRef, TryFrom}; >> use std::fmt; >> use std::path::{Path, PathBuf}; >> +use std::str::FromStr; >> >> use anyhow::{bail, format_err, Error}; >> use const_format::concatcp; >> @@ -1570,7 +1572,7 @@ pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { >> } >> } >> >> -#[derive(PartialEq, Eq)] >> +#[derive(Clone, PartialEq, Eq)] >> /// Allowed variants of backup archives to be contained in a snapshot's manifest >> pub enum ArchiveType { >> FixedIndex, >> @@ -1590,3 +1592,106 @@ impl ArchiveType { >> Ok(archive_type) >> } >> } >> + >> +#[derive(Clone, PartialEq, Eq)] >> +/// Name of archive files contained in snapshot's manifest >> +pub struct BackupArchiveName { >> + // archive name including the `.fidx`, `.didx` or `.blob` extension >> + name: String, >> + // type parsed based on given extension >> + ty: ArchiveType, >> +} >> + >> +impl fmt::Display for BackupArchiveName { >> + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { >> + write!(f, "{name}", name = self.name) >> + } >> +} >> + >> +serde_plain::derive_deserialize_from_fromstr!(BackupArchiveName, "archive name"); >> + >> +impl FromStr for BackupArchiveName { >> + type Err = Error; >> + >> + fn from_str(name: &str) -> Result { >> + Self::try_from(name) >> + } >> +} >> + >> +serde_plain::derive_serialize_from_display!(BackupArchiveName); >> + >> +impl TryFrom<&str> for BackupArchiveName { >> + type Error = anyhow::Error; >> + >> + fn try_from(value: &str) -> Result { >> + let (name, ty) = Self::parse_archive_type(value)?; >> + Ok(Self { name, ty }) >> + } >> +} >> + >> +impl AsRef for BackupArchiveName { >> + fn as_ref(&self) -> &str { >> + &self.name >> + } >> +} >> + >> +impl BackupArchiveName { >> + pub fn from_path(path: impl AsRef) -> Result { >> + let path = path.as_ref(); >> + if path.as_os_str().as_encoded_bytes().last() == Some(&b'/') { >> + bail!("invalid archive name, got directory"); >> + } >> + let file_name = path >> + .file_name() >> + .ok_or_else(|| format_err!("invalid archive name"))?; >> + let name = file_name >> + .to_str() >> + .ok_or_else(|| format_err!("archive name not valid UTF-8"))?; >> + >> + Self::try_from(name) >> + } >> + >> + pub fn archive_type(&self) -> ArchiveType { >> + self.ty.clone() >> + } >> + >> + pub fn ends_with(&self, postfix: &str) -> bool { >> + self.name.ends_with(postfix) >> + } >> + >> + pub fn has_pxar_filename_extension(&self) -> bool { >> + self.name.ends_with(".pxar.didx") >> + || self.name.ends_with(".mpxar.didx") >> + || self.name.ends_with(".ppxar.didx") >> + } >> + >> + pub fn without_type_extension(&self) -> String { >> + match self.ty { >> + ArchiveType::DynamicIndex => self.name.strip_suffix(".didx").unwrap().into(), >> + ArchiveType::FixedIndex => self.name.strip_suffix(".fidx").unwrap().into(), >> + ArchiveType::Blob => self.name.strip_suffix(".blob").unwrap().into(), > > if ArchiveType would have a getter for the corresponding extension, then > this could just become > > self.name.strip_suffix(self.ty.extension()).unwrap().into() Acked, will adapt for version 4 of the patch series. > >> + } >> + } >> + >> + fn parse_archive_type(archive_name: &str) -> Result<(String, ArchiveType), Error> { >> + if archive_name.ends_with(".didx") >> + || archive_name.ends_with(".fidx") >> + || archive_name.ends_with(".blob") >> + { >> + Ok((archive_name.into(), ArchiveType::from_path(archive_name)?)) > > and this here could maybe also be turned around -> get the extension > from archive_name: > > if let Ok(ty) = ArchiveType::from_path(..) { > Ok((archive_name.into(), ty)) > } else if .. Agreed, will also incorporate this. > >> + } else if archive_name.ends_with(".pxar") >> + || archive_name.ends_with(".mpxar") >> + || archive_name.ends_with(".ppxar") >> + { >> + Ok((format!("{archive_name}.didx"), ArchiveType::DynamicIndex)) >> + } else if archive_name.ends_with(".img") { >> + Ok((format!("{archive_name}.fidx"), ArchiveType::FixedIndex)) > > not sure whether we want these associations (between ArchiveType and > contained files) to live somewhere more declarative? Could be moved to the `ArchiveType`, as `archive_extension` mimicking the `extension` getter as suggested above. > >> + } else { >> + Ok((format!("{archive_name}.blob"), ArchiveType::Blob)) > > this last catchall here might be a bit dangerous? it basically makes the > introduction of a new archive type collide with any existing blobs that > happen to have a file name that ends with that new archive type.. This is true, but we already have that exact same mapping currently in use (see patch 4, which drops the pre-existing helper). But that was arguably more limited in scope. So maybe we might keep the pre-existing helper here instead of implementing `parse_archive_type` for the API type directly? > > e.g., qemu-server.conf.blob > > with this patch, qemu-server.conf will automatically expand to > qemu-server.conf.blob > > if we then at some point introduce an archive type `conf`, then it > wouldn't anymore, but instead be interpreted as archive of that type.. > > similarly, all of the above "inner" extensions are burned for archive > type usage already.. > > this was basically the only reason I didn't apply this right now leaving > the rest above as follow-up material ;) > >> + } >> + } >> +} >> + >> +impl ApiType for BackupArchiveName { >> + const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; >> +} >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> >> > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Mon Nov 4 13:15:11 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 13:15:11 +0100 Subject: [pbs-devel] partially-applied: [PATCH proxmox-backup 1/3] sync: fix premature return in snapshot skip filter logic In-Reply-To: <20241104105830.85612-1-c.ebner@proxmox.com> References: <20241104105830.85612-1-c.ebner@proxmox.com> Message-ID: <1730722421.eutmwxhuvw.astroid@yuna.none> applied this one! On November 4, 2024 11:58 am, Christian Ebner wrote: > While checking which snapshots to sync, the filter logic incorrectly > included the first snapshot newer that the last synced one > unconditionally, bypassing the transfer last check for that one > snapshot. Following snapshots are correctly handled again. > > E.g. of an incorrect sync by excerpt of a task log provided by a user > in the community forum [0], with transfer last set to 1: > > ``` > skipped: 2 snapshot(s) (2024-09-29T18:00:28Z .. 2024-10-20T18:00:29Z) - older than the newest local snapshot > skipped: 5 snapshot(s) (2024-10-28T19:00:28Z .. 2024-11-01T19:00:32Z) - due to transfer-last > sync snapshot vm/110/2024-10-27T19:00:25Z > ... > sync snapshot vm/110/2024-11-02T19:00:23Z > ``` > > Not only the last, but the first newer than newest and last were > incorrectly synced. > > By dropping the early return, leading to incorrect inclusion of the > snapshot, the transfer last condition is now correctly checked as > well. > > Link to the issue reported in the community forum: > [0] https://forum.proxmox.com/threads/156873/ > > Signed-off-by: Christian Ebner > --- > src/server/pull.rs | 1 - > 1 file changed, 1 deletion(-) > > diff --git a/src/server/pull.rs b/src/server/pull.rs > index 3117f7d2c..cc1427196 100644 > --- a/src/server/pull.rs > +++ b/src/server/pull.rs > @@ -534,7 +534,6 @@ async fn pull_group( > } else if already_synced_skip_info.count > 0 { > info!("{already_synced_skip_info}"); > already_synced_skip_info.reset(); > - return true; > } > > if pos < cutoff && last_sync_time != dir.time { > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Mon Nov 4 13:15:15 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 13:15:15 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/3] sync: pull: simplify logic for source snapshot filtering In-Reply-To: <20241104105830.85612-3-c.ebner@proxmox.com> References: <20241104105830.85612-1-c.ebner@proxmox.com> <20241104105830.85612-3-c.ebner@proxmox.com> Message-ID: <1730722463.m98lxjw06x.astroid@yuna.none> Reviewed-by: Fabian Gr?nbichler but needs a rebase cause of patch#2 ;) On November 4, 2024 11:58 am, Christian Ebner wrote: > Decouple the actual filter logic from the skip reason output logic by > pulling the latter out of the filter closue. > > Makes the filtering logic more intuitive. > > Signed-off-by: Christian Ebner > --- > src/server/pull.rs | 16 +++++++++------- > 1 file changed, 9 insertions(+), 7 deletions(-) > > diff --git a/src/server/pull.rs b/src/server/pull.rs > index 7aa191d96..8f00ae0af 100644 > --- a/src/server/pull.rs > +++ b/src/server/pull.rs > @@ -531,23 +531,25 @@ async fn pull_group( > if last_sync_time >= dir.time { > already_synced_skip_info.update(dir.time); > return false; > - } else if already_synced_skip_info.count > 0 { > - info!("{already_synced_skip_info}"); > - already_synced_skip_info.reset(); > } > - > if pos < cutoff { > transfer_last_skip_info.update(dir.time); > return false; > - } else if transfer_last_skip_info.count > 0 { > - info!("{transfer_last_skip_info}"); > - transfer_last_skip_info.reset(); > } > true > }) > .map(|(_, dir)| dir) > .collect(); > > + if already_synced_skip_info.count > 0 { > + info!("{already_synced_skip_info}"); > + already_synced_skip_info.reset(); > + } > + if transfer_last_skip_info.count > 0 { > + info!("{transfer_last_skip_info}"); > + transfer_last_skip_info.reset(); > + } > + > // start with 65536 chunks (up to 256 GiB) > let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024 * 64))); > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Mon Nov 4 13:15:21 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 13:15:21 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/3] sync: pull: do not resync currently newest snapshot on target In-Reply-To: <20241104105830.85612-2-c.ebner@proxmox.com> References: <20241104105830.85612-1-c.ebner@proxmox.com> <20241104105830.85612-2-c.ebner@proxmox.com> Message-ID: <1730722172.za02ajxxfy.astroid@yuna.none> On November 4, 2024 11:58 am, Christian Ebner wrote: > The currently newest snapshot of a group on the sync target is not > excluded from the list of already synced snapshots, leading to a > re-sync. this is intentional, the last snapshot might not have been completely done on the source side when we last synced it (e.g., backup log still missing, post-backup verification not done yet, ..). > > Filter out the snapshot as well. > > Signed-off-by: Christian Ebner > --- > Might be ignored if the re-sync is intetional. > > Implementation already present since commit: > de8ec041 ("src/api2/sync.rs: implement remote sync") > > src/server/pull.rs | 4 ++-- > 1 file changed, 2 insertions(+), 2 deletions(-) > > diff --git a/src/server/pull.rs b/src/server/pull.rs > index cc1427196..7aa191d96 100644 > --- a/src/server/pull.rs > +++ b/src/server/pull.rs > @@ -528,7 +528,7 @@ async fn pull_group( > .enumerate() > .filter(|&(pos, ref dir)| { > source_snapshots.insert(dir.time); > - if last_sync_time > dir.time { > + if last_sync_time >= dir.time { > already_synced_skip_info.update(dir.time); > return false; > } else if already_synced_skip_info.count > 0 { > @@ -536,7 +536,7 @@ async fn pull_group( > already_synced_skip_info.reset(); > } > > - if pos < cutoff && last_sync_time != dir.time { > + if pos < cutoff { > transfer_last_skip_info.update(dir.time); > return false; > } else if transfer_last_skip_info.count > 0 { > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Mon Nov 4 13:20:30 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 13:20:30 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/3] sync: pull: do not resync currently newest snapshot on target In-Reply-To: <1730722172.za02ajxxfy.astroid@yuna.none> References: <20241104105830.85612-1-c.ebner@proxmox.com> <20241104105830.85612-2-c.ebner@proxmox.com> <1730722172.za02ajxxfy.astroid@yuna.none> Message-ID: <23f19edf-7865-4d32-b185-a93458b40f83@proxmox.com> On 11/4/24 13:15, Fabian Gr?nbichler wrote: > On November 4, 2024 11:58 am, Christian Ebner wrote: >> The currently newest snapshot of a group on the sync target is not >> excluded from the list of already synced snapshots, leading to a >> re-sync. > > this is intentional, the last snapshot might not have been completely > done on the source side when we last synced it (e.g., backup log still > missing, post-backup verification not done yet, ..). Ah I see, thanks for clarification: was already expecting this to be more subtle. Will amend the patch to explicitly mention this as a comment. From f.gruenbichler at proxmox.com Mon Nov 4 13:23:08 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 13:23:08 +0100 Subject: [pbs-devel] [PATCH v3 proxmox-backup 2/5] api types: introduce `BackupArchiveName` type In-Reply-To: <5db8c8fe-7730-48ec-b4b2-e9d671434994@proxmox.com> References: <20241024080150.30200-1-c.ebner@proxmox.com> <20241024080150.30200-3-c.ebner@proxmox.com> <1729857821.37okdbzjnk.astroid@yuna.none> <5db8c8fe-7730-48ec-b4b2-e9d671434994@proxmox.com> Message-ID: <1730722831.b39roa1gvp.astroid@yuna.none> On November 4, 2024 12:56 pm, Christian Ebner wrote: > On 10/25/24 14:15, Fabian Gr?nbichler wrote: >> On October 24, 2024 10:01 am, Christian Ebner wrote: >>> Introduces a dedicated wrapper type to be used for backup archive >>> names instead of plain strings and associated helper methods for >>> archive type checks and archive name mappings. >>> >>> Signed-off-by: Christian Ebner >> >>> + } else { >>> + Ok((format!("{archive_name}.blob"), ArchiveType::Blob)) >> >> this last catchall here might be a bit dangerous? it basically makes the >> introduction of a new archive type collide with any existing blobs that >> happen to have a file name that ends with that new archive type.. > > This is true, but we already have that exact same mapping currently in > use (see patch 4, which drops the pre-existing helper). But that was > arguably more limited in scope. > > So maybe we might keep the pre-existing helper here instead of > implementing `parse_archive_type` for the API type directly? yeah, that old helper was only used in a single place - the `proxmox-backup-client restore ..` command. so I guess we need to special case this there if we want to avoid extending that catchall fallback to everywhere? From c.ebner at proxmox.com Mon Nov 4 13:56:13 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 13:56:13 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/2] sync: pull: mention why last snapshot of previous sync is resynced Message-ID: <20241104125614.162491-1-c.ebner@proxmox.com> The last snapshot synced during the previous sync job might not have been fully completed just yet (e.g. backup log still missing, verification still ongoing, ...). Explicitley mention the reason and that the resync is therefore intentional by a comment in the filter logic. Suggested-by: Fabian Gr?nbichler Signed-off-by: Christian Ebner --- changes since version 1: - no present in previous version src/server/pull.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/server/pull.rs b/src/server/pull.rs index cc1427196..8d8461cb2 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -528,6 +528,8 @@ async fn pull_group( .enumerate() .filter(|&(pos, ref dir)| { source_snapshots.insert(dir.time); + // Note: Last sync times final snapshot might not have been completely + // done yet on the source side, keep it include for a resync. if last_sync_time > dir.time { already_synced_skip_info.update(dir.time); return false; -- 2.39.5 From c.ebner at proxmox.com Mon Nov 4 13:56:14 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 13:56:14 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/2] sync: pull: simplify logic for source snapshot filtering In-Reply-To: <20241104125614.162491-1-c.ebner@proxmox.com> References: <20241104125614.162491-1-c.ebner@proxmox.com> Message-ID: <20241104125614.162491-2-c.ebner@proxmox.com> Decouple the actual filter logic from the skip reason output logic by pulling the latter out of the filter closue. Makes the filtering logic more intuitive. Reviewed-by: Fabian Gr?nbichler Signed-off-by: Christian Ebner --- changes since version 1: - no changes src/server/pull.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index 8d8461cb2..0afb16bc0 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -533,23 +533,26 @@ async fn pull_group( if last_sync_time > dir.time { already_synced_skip_info.update(dir.time); return false; - } else if already_synced_skip_info.count > 0 { - info!("{already_synced_skip_info}"); - already_synced_skip_info.reset(); } if pos < cutoff && last_sync_time != dir.time { transfer_last_skip_info.update(dir.time); return false; - } else if transfer_last_skip_info.count > 0 { - info!("{transfer_last_skip_info}"); - transfer_last_skip_info.reset(); } true }) .map(|(_, dir)| dir) .collect(); + if already_synced_skip_info.count > 0 { + info!("{already_synced_skip_info}"); + already_synced_skip_info.reset(); + } + if transfer_last_skip_info.count > 0 { + info!("{transfer_last_skip_info}"); + transfer_last_skip_info.reset(); + } + // start with 65536 chunks (up to 256 GiB) let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024 * 64))); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 4 13:57:49 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 4 Nov 2024 13:57:49 +0100 Subject: [pbs-devel] partially-applied: [PATCH proxmox-backup 1/3] sync: fix premature return in snapshot skip filter logic In-Reply-To: <1730722421.eutmwxhuvw.astroid@yuna.none> References: <20241104105830.85612-1-c.ebner@proxmox.com> <1730722421.eutmwxhuvw.astroid@yuna.none> Message-ID: superseded-by version 2: https://lore.proxmox.com/pbs-devel/20241104125614.162491-1-c.ebner at proxmox.com/T/ From f.gruenbichler at proxmox.com Mon Nov 4 14:06:13 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 14:06:13 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v4 2/6] add support for bulk import of a dump directory In-Reply-To: <20241030135537.92595-3-f.schauer@proxmox.com> References: <20241030135537.92595-1-f.schauer@proxmox.com> <20241030135537.92595-3-f.schauer@proxmox.com> Message-ID: <1730724842.agk2is6zq8.astroid@yuna.none> On October 30, 2024 2:55 pm, Filip Schauer wrote: > When a path to a directory is provided in the vma_file argument, try to > upload all VMA backups in the directory. This also handles compressed > VMA files, notes and logs. If a vmid is specified with --vmid, only the > backups of that particular vmid are uploaded. > > This is intended for use on a dump directory: > > PBS_FINGERPRINT='PBS_FINGERPRINT' vma-to-pbs \ > --repository 'user at realm!token at server:port:datastore' \ > /var/lib/vz/dump > > Signed-off-by: Filip Schauer > --- > Cargo.toml | 3 + > src/main.rs | 161 +++++++++++++++++++++++++++++++++++++++++++++---- > src/vma2pbs.rs | 64 +++++++++++++++++--- > 3 files changed, 209 insertions(+), 19 deletions(-) > > diff --git a/Cargo.toml b/Cargo.toml > index cd13426..5c6a175 100644 > --- a/Cargo.toml > +++ b/Cargo.toml > @@ -7,9 +7,12 @@ edition = "2021" > [dependencies] > anyhow = "1.0" > bincode = "1.3" > +chrono = "0.4" > hyper = "0.14.5" > +itertools = "0.13" not needed, see below > pico-args = "0.5" > md5 = "0.7.0" > +regex = "1.7" > scopeguard = "1.1.0" > serde = "1.0" > serde_json = "1.0" > diff --git a/src/main.rs b/src/main.rs > index 3e25591..4c5135b 100644 > --- a/src/main.rs > +++ b/src/main.rs > @@ -1,26 +1,35 @@ > use std::ffi::OsString; > +use std::fs::read_dir; > +use std::io::{BufRead, BufReader}; > +use std::path::PathBuf; > > use anyhow::{bail, Context, Error}; > +use chrono::NaiveDateTime; > +use itertools::Itertools; > use proxmox_sys::linux::tty; > use proxmox_time::epoch_i64; > +use regex::Regex; > > mod vma; > mod vma2pbs; > -use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, PbsArgs, VmaBackupArgs}; > +use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, Compression, PbsArgs, VmaBackupArgs}; > > const CMD_HELP: &str = "\ > Usage: vma-to-pbs [OPTIONS] --repository --vmid [vma_file] > > Arguments: > - [vma_file] > + [vma_file | dump_directory] > > Options: > --repository > Repository URL > [--ns ] > Namespace > - --vmid > + [--vmid ] > Backup ID > + This is required if a single VMA file is provided. > + If not specified, bulk import all VMA backups in the provided directory. > + If specified with a dump directory, only import backups of the specified vmid. > [--backup-time ] > Backup timestamp > --fingerprint > @@ -41,6 +50,8 @@ Options: > File containing a comment/notes > [--log-file ] > Log file > + -y, --yes > + Automatic yes to prompts > -h, --help > Print help > -V, --version > @@ -52,7 +63,16 @@ fn parse_args() -> Result { > args.remove(0); // remove the executable path. > > let mut first_later_args_index = 0; > - let options = ["-h", "--help", "-c", "--compress", "-e", "--encrypt"]; > + let options = [ > + "-h", > + "--help", > + "-c", > + "--compress", > + "-e", > + "--encrypt", > + "-y", > + "--yes", > + ]; > > for (i, arg) in args.iter().enumerate() { > if let Some(arg) = arg.to_str() { > @@ -87,7 +107,7 @@ fn parse_args() -> Result { > > let pbs_repository = args.value_from_str("--repository")?; > let namespace = args.opt_value_from_str("--ns")?; > - let vmid = args.value_from_str("--vmid")?; > + let vmid = args.opt_value_from_str("--vmid")?; > let backup_time: Option = args.opt_value_from_str("--backup-time")?; > let backup_time = backup_time.unwrap_or_else(epoch_i64); > let fingerprint = args.opt_value_from_str("--fingerprint")?; > @@ -99,6 +119,7 @@ fn parse_args() -> Result { > let key_password_file: Option = args.opt_value_from_str("--key-password-file")?; > let notes_file: Option = args.opt_value_from_str("--notes-file")?; > let log_file_path: Option = args.opt_value_from_str("--log-file")?; > + let yes = args.contains(["-y", "--yes"]); > > match (encrypt, keyfile.is_some()) { > (true, false) => bail!("--encrypt requires a --keyfile!"), > @@ -196,15 +217,131 @@ fn parse_args() -> Result { > encrypt, > }; > > - let vma_args = VmaBackupArgs { > - vma_file_path: vma_file_path.cloned(), > - backup_id: vmid, > - backup_time, > - notes, > - log_file_path, > + let bulk = > + vma_file_path > + .map(PathBuf::from) > + .and_then(|path| if path.is_dir() { Some(path) } else { None }); > + > + let grouped_vmas = if let Some(dump_dir_path) = bulk { grouped_vmas should still be a map, not a vec of vec.. e.g., something like this (requires some more adaptation - while this could use itertools, I don't think it's worth to pull that in if the same can be had with a single fold invocation): @@ -298,12 +298,16 @@ fn parse_args() -> Result { vmas.sort_by_key(|d| d.backup_time); let total_vma_count = vmas.len(); - let mut grouped_vmas: Vec<_> = vmas - .into_iter() - .into_group_map_by(|d| d.backup_id.clone()) - .into_values() - .collect(); - grouped_vmas.sort_by_key(|d| d[0].backup_id.clone()); + let grouped_vmas = vmas.into_iter().fold( + HashMap::new(), + |mut grouped: HashMap>, vma_args| { + grouped + .entry(vma_args.backup_id.clone()) + .or_default() + .push(vma_args); + grouped + }, + ); log::info!( "Found {} backup archive(s) of {} different VMID(s):", @@ -311,12 +315,8 @@ fn parse_args() -> Result { grouped_vmas.len() ); - for vma_group in &grouped_vmas { - log::info!( - "- VMID {}: {} backups", - vma_group[0].backup_id, - vma_group.len() - ); + for (vma_group, vma_args) in &grouped_vmas { + log::info!("- VMID {}: {} backups", vma_group, vma_args.len()); } if !yes { > + let re = Regex::new( > + r"vzdump-qemu-(\d+)-(\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2}).vma(|.zst|.lzo|.gz)$", > + )?; > + > + let mut vmas = Vec::new(); > + > + for entry in read_dir(dump_dir_path)? { > + let entry = entry?; > + let path = entry.path(); > + > + if !path.is_file() { > + continue; > + } > + > + if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) { > + let Some((_, [backup_id, timestr, ext])) = > + re.captures(file_name).map(|c| c.extract()) > + else { > + // Skip the file, since it is not a VMA backup > + continue; > + }; > + > + if let Some(ref vmid) = vmid { > + if backup_id != vmid { > + // Skip the backup, since it does not match the specified vmid > + continue; > + } > + } > + > + let compression = match ext { > + "" => None, > + ".zst" => Some(Compression::Zstd), > + ".lzo" => Some(Compression::Lzo), > + ".gz" => Some(Compression::GZip), > + _ => bail!("Unexpected file extension: {ext}"), > + }; > + > + let backup_time = NaiveDateTime::parse_from_str(timestr, "%Y_%m_%d-%H_%M_%S")? > + .and_utc() > + .timestamp(); > + > + let notes_path = path.with_file_name(format!("{}.notes", file_name)); > + let notes = proxmox_sys::fs::file_read_optional_string(notes_path)?; > + > + let log_path = path.with_file_name(format!("{}.log", file_name)); > + let log_file_path = if log_path.exists() { > + Some(log_path.to_path_buf().into_os_string()) > + } else { > + None > + }; > + > + let backup_args = VmaBackupArgs { > + vma_file_path: Some(path.clone().into()), > + compression, > + backup_id: backup_id.to_string(), > + backup_time, > + notes, > + log_file_path, > + }; > + vmas.push(backup_args); > + } > + } > + > + vmas.sort_by_key(|d| d.backup_time); > + let total_vma_count = vmas.len(); > + let mut grouped_vmas: Vec<_> = vmas > + .into_iter() > + .into_group_map_by(|d| d.backup_id.clone()) > + .into_values() > + .collect(); > + grouped_vmas.sort_by_key(|d| d[0].backup_id.clone()); > + > + println!( > + "Found {} backup archive(s) of {} different VMID(s):", > + total_vma_count, > + grouped_vmas.len() > + ); if we don't find any, we should print something else here and exit? > + > + for vma_group in &grouped_vmas { > + println!( > + "- VMID {}: {} backups", > + vma_group[0].backup_id, > + vma_group.len() > + ); > + } > + > + if !yes { > + loop { > + eprint!("Proceed with the bulk import? (y/n): "); > + let mut line = String::new(); > + > + BufReader::new(std::io::stdin()).read_line(&mut line)?; > + let trimmed = line.trim(); > + if trimmed == "y" || trimmed == "Y" { > + break; > + } else if trimmed == "n" || trimmed == "N" { > + bail!("Bulk import was not confirmed."); > + } this maybe should mimic what we do in proxmox_router when prompting for confirmation? e.g., flush stdout, have a default value, ..? should we abort after a few loops? > + } > + } > + > + grouped_vmas From f.gruenbichler at proxmox.com Mon Nov 4 14:09:22 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 14:09:22 +0100 Subject: [pbs-devel] partially-applied: [PATCH vma-to-pbs v4 0/6] add support for bulk import of a dump directory In-Reply-To: <20241030135537.92595-1-f.schauer@proxmox.com> References: <20241030135537.92595-1-f.schauer@proxmox.com> Message-ID: <1730725578.754woz7k97.astroid@yuna.none> applied patches 1 and 4 consider patches 3, 5 and 6 Reviewed-by: Fabian Gr?nbichler they were not possible to apply now because patch context relies on patch #2 being applied as well. On October 30, 2024 2:55 pm, Filip Schauer wrote: > When a path to a directory is provided in the vma_file argument, try to > upload all VMA backups in the directory. This also handles compressed > VMA files, notes and logs. If a vmid is specified with --vmid, only the > backups of that particular vmid are uploaded. > > Also improve the readability of the log messages to keep track of all > imported backups. > > Changed since v3: > * Mention in the description of the --vmid argument, that it is required > if a single VMA file is provided > * Construct grouped_vmas in place > * Add debug logs when gathering files for bulk import > * Log a summary of the files gathered for bulk import > * Remove the "confusing VMA file path" error message in the second > commit > * Switch chunk_stats from Arc> to > Arc<[AtomicU64; 256]> and use fetch_add to atomically increment and > fetch the chunk stat > * Ask for confirmation before bulk import > * Add --yes option to skip the confirmation prompt > > Changed since v2: > * Make skipping a VMID on error optional with the --skip-failed option > * Switch log output from stderr to stdout > * Bump itertools to 0.13 > > Changed since v1: > * Do not recurse through dump directory > * Compile regex once before iterating over the files in the dump > directory > * Use extract on regex capture groups > * Do not use deprecated method `chrono::NaiveDateTime::timestamp` > * Use proxmox_sys::fs::file_read_optional_string > * Group VMA files by VMID and continue with next VMID on error > * Move the BackupVmaToPbsArgs split into its own commit > * Remove hard coded occurences of 255 > * Use level-based logging instead of println > > Filip Schauer (6): > split BackupVmaToPbsArgs into PbsArgs and VmaBackupArgs > add support for bulk import of a dump directory > add option to skip vmids whose backups failed to upload > remove hard coded values > use level-based logging instead of println > log device upload progress as a percentage > > Cargo.toml | 5 + > src/main.rs | 192 +++++++++++++++++++++++++++--- > src/vma.rs | 2 +- > src/vma2pbs.rs | 310 ++++++++++++++++++++++++++++++++----------------- > 4 files changed, 388 insertions(+), 121 deletions(-) > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Mon Nov 4 15:14:05 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 04 Nov 2024 15:14:05 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 1/2] sync: pull: mention why last snapshot of previous sync is resynced In-Reply-To: <20241104125614.162491-1-c.ebner@proxmox.com> References: <20241104125614.162491-1-c.ebner@proxmox.com> Message-ID: <1730729637.ob58e6gfbc.astroid@yuna.none> On November 4, 2024 1:56 pm, Christian Ebner wrote: > The last snapshot synced during the previous sync job might not have > been fully completed just yet (e.g. backup log still missing, > verification still ongoing, ...). > Explicitley mention the reason and that the resync is therefore > intentional by a comment in the filter logic. > > Suggested-by: Fabian Gr?nbichler > Signed-off-by: Christian Ebner > --- > changes since version 1: > - no present in previous version > > src/server/pull.rs | 2 ++ > 1 file changed, 2 insertions(+) > > diff --git a/src/server/pull.rs b/src/server/pull.rs > index cc1427196..8d8461cb2 100644 > --- a/src/server/pull.rs > +++ b/src/server/pull.rs > @@ -528,6 +528,8 @@ async fn pull_group( > .enumerate() > .filter(|&(pos, ref dir)| { > source_snapshots.insert(dir.time); > + // Note: Last sync times final snapshot might not have been completely > + // done yet on the source side, keep it include for a resync. > if last_sync_time > dir.time { > already_synced_skip_info.update(dir.time); > return false; > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > From g.goller at proxmox.com Mon Nov 4 17:02:21 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Mon, 4 Nov 2024 17:02:21 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <1730717237.hc5rhqwdje.astroid@yuna.none> References: <20241018090909.103952-1-g.goller@proxmox.com> <20241018090909.103952-2-g.goller@proxmox.com> <1730717237.hc5rhqwdje.astroid@yuna.none> Message-ID: On 04.11.2024 12:51, Fabian Gr?nbichler wrote: >this doesn't really do what it says on the tin, see below. > >On October 18, 2024 11:09 am, Gabriel Goller wrote: >> [snip] >> diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs >> index 414ec878d01a..c86fbb7568ab 100644 >> --- a/pbs-datastore/src/backup_info.rs >> +++ b/pbs-datastore/src/backup_info.rs >> @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; >> use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; >> >> use pbs_api_types::{ >> - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, >> + Authid, BackupNamespace, BackupType, GroupFilter, SnapshotVerifyState, VerifyState, >> + BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, >> }; >> use pbs_config::{open_backup_lockfile, BackupLockGuard}; >> >> @@ -583,6 +584,16 @@ impl BackupDir { >> >> Ok(()) >> } >> + >> + /// Load the verify state from the manifest. >> + pub fn verify_state(&self) -> Result { >> + self.load_manifest().and_then(|(m, _)| { >> + let verify = m.unprotected["verify_state"].clone(); >> + serde_json::from_value::(verify) >> + .map(|svs| svs.state) >> + .map_err(Into::into) >> + }) >> + } > >wouldn't it make more sense to have this as a getter for an optional >SnapshotVerifyState on the BackupManifest? > >then it could go into its own commit, other call sites that load the >verify state from a manifest could be adapted to it, and then this >commit can also start using it? You're right the BackupManifest is loaded here twice actually. So I could move this function to a getter in BackupManifest and then move the construction of it (src/server/pull.rs:396): let manifest = BackupManifest::try_from(tmp_manifest_blob)?; up to (src/server/pull.rs:365). And instead of having the try_block to try read from the manifest just call BackupManifest::try_from. I'll see if I can make this work... >also see the comment further below about how the current implementation >is very noisy if snapshots are newly synced as opposed to resynced.. > >> } >> >> impl AsRef for BackupDir { >> diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs >> index 6fdc69a9e645..fa9db92f3d11 100644 >> --- a/src/api2/config/sync.rs >> +++ b/src/api2/config/sync.rs >> [snip] >> diff --git a/src/server/pull.rs b/src/server/pull.rs >> index 3117f7d2c960..b2dd15d9d6db 100644 >> --- a/src/server/pull.rs >> +++ b/src/server/pull.rs >> @@ -7,12 +7,14 @@ use std::sync::{Arc, Mutex}; >> use std::time::SystemTime; >> >> use anyhow::{bail, format_err, Error}; >> +use nom::combinator::verify; > >I think this snuck in ;) Oops, my fault :) >> [snip] >> @@ -175,9 +182,10 @@ async fn pull_index_chunks( >> target.cond_touch_chunk(&info.digest, false) >> })?; >> if chunk_exists { >> - //info!("chunk {} exists {}", pos, hex::encode(digest)); >> + //info!("chunk exists {}", hex::encode(info.digest)); > >this > >> return Ok::<_, Error>(()); >> } >> + > >and this as well? Yep, removed both, thanks for the heads-up! >> [snip] >> @@ -325,13 +333,15 @@ async fn pull_single_archive<'a>( >> /// - (Re)download the manifest >> /// -- if it matches, only download log and treat snapshot as already synced >> /// - Iterate over referenced files >> -/// -- if file already exists, verify contents >> +/// -- if file already exists, verify contents or pull again if last >> +/// verification failed and `resync_corrupt` is true >> /// -- if not, pull it from the remote >> /// - Download log if not already existing >> async fn pull_snapshot<'a>( >> reader: Arc, >> snapshot: &'a pbs_datastore::BackupDir, >> downloaded_chunks: Arc>>, >> + resync_corrupt: bool, >> ) -> Result { >> let mut sync_stats = SyncStats::default(); >> let mut manifest_name = snapshot.full_path(); >> @@ -352,6 +362,14 @@ async fn pull_snapshot<'a>( >> return Ok(sync_stats); >> } >> > >I think this part here is somewhat wrong ordering wise, or at least, >unnecessarily expensive.. > >if resync_corrupt is enabled, we want to (in this order!) >- check the local snapshot for corruption, if it exists >- if it is corrupt, we proceed with resyncing >- if not, we only proceed with resyncing if it is the last snapshot in > this group, and return early otherwise > >that way, we avoid redownloading all the manifests.. but see further >below for another issue with the current implementation.. > >> + let must_resync_existing = resync_corrupt >> + && snapshot >> + .verify_state() >> + .inspect_err(|err| { >> + tracing::error!("Failed to check verification state of snapshot: {err:?}") > >2024-11-04T12:34:57+01:00: Failed to check verification state of snapshot: unable to load blob '"/tank/pbs/ns/foobar/ns/test/ns/another_test/vm/900/2023-04-06T14:36:00Z/index.json.blob"' - No such file or directory (os error 2) > >this seems to be very noisy for newly synced snapshots, because the >helper is implemented on BackupInfo instead of on BackupManifest.. True, but I think this is mostly because new backups don't have a verify_state yet (and that doesn't necessarily mean == bad). Removed the log line as it's silly anyway :) >> + }) >> + .is_ok_and(|state| state == VerifyState::Failed); >> + >> if manifest_name.exists() { >> let manifest_blob = proxmox_lang::try_block!({ >> let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| { >> [snip] >> @@ -528,6 +554,10 @@ async fn pull_group( >> .enumerate() >> .filter(|&(pos, ref dir)| { >> source_snapshots.insert(dir.time); >> + // If resync_corrupt is set, we go through all the remote snapshots >> + if params.resync_corrupt { >> + return true; >> + } > >alternatively, we could check the local manifest here, and only include >existing snapshots with a failed verification state, the last one and >new ones? that way, we'd get more meaningful progress stats as well.. That's true, that would be cleaner. The downside is that we would have open/parse the BackupManifest twice. I could write something like: if params.resync_corrupt { let local_dir = params.target.store.backup_dir(target_ns.clone(), dir.clone()); if let Ok(dir) = local_dir { let verify_state = dir.verify_state(); if verify_state == Some(VerifyState::Failed) { return true; } } } >because right now, this will not only resync existing corrupt snapshots, >but also ones that have been pruned locally, but not on the source >(i.e., the other proposed "fixing" sync mode that syncs "missing" >old snapshots, not just corrupt ones). I'm too stupid to find the mail where this was mentioned/discussed, I'm quite sure we said to just pull both, and then maybe separate them in a future iteration/feature. But now that I think about the flag is named `resync_corrupt` so I'd expect it to only pull in the corrupt snapshots. I actually agree with this change, it probably is also more performant (reading backup_manifest twice is probably faster than pulling lots of unneeded manifests from the remote). >> if last_sync_time > dir.time { >> already_synced_skip_info.update(dir.time); >> return false; >> @@ -566,7 +596,13 @@ async fn pull_group( >> .source >> .reader(source_namespace, &from_snapshot) >> .await?; >> - let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await; >> + let result = pull_snapshot_from( >> + reader, >> + &to_snapshot, >> + downloaded_chunks.clone(), >> + params.resync_corrupt, >> + ) >> + .await; >> >> progress.done_snapshots = pos as u64 + 1; >> info!("percentage done: {progress}"); From f.gruenbichler at proxmox.com Tue Nov 5 08:20:14 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Tue, 5 Nov 2024 08:20:14 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup v2 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: References: <20241018090909.103952-1-g.goller@proxmox.com> <20241018090909.103952-2-g.goller@proxmox.com> <1730717237.hc5rhqwdje.astroid@yuna.none> Message-ID: <2106894461.7273.1730791214926@webmail.proxmox.com> > Gabriel Goller hat am 04.11.2024 17:02 CET geschrieben: > > > On 04.11.2024 12:51, Fabian Gr?nbichler wrote: > >this doesn't really do what it says on the tin, see below. > > > >On October 18, 2024 11:09 am, Gabriel Goller wrote: > >> @@ -528,6 +554,10 @@ async fn pull_group( > >> .enumerate() > >> .filter(|&(pos, ref dir)| { > >> source_snapshots.insert(dir.time); > >> + // If resync_corrupt is set, we go through all the remote snapshots > >> + if params.resync_corrupt { > >> + return true; > >> + } > > > >alternatively, we could check the local manifest here, and only include > >existing snapshots with a failed verification state, the last one and > >new ones? that way, we'd get more meaningful progress stats as well.. > > That's true, that would be cleaner. The downside is that we would have > open/parse the BackupManifest twice. > > I could write something like: > > if params.resync_corrupt { > let local_dir = params.target.store.backup_dir(target_ns.clone(), dir.clone()); > if let Ok(dir) = local_dir { > let verify_state = dir.verify_state(); > if verify_state == Some(VerifyState::Failed) { > return true; > } > } > } > > >because right now, this will not only resync existing corrupt snapshots, > >but also ones that have been pruned locally, but not on the source > >(i.e., the other proposed "fixing" sync mode that syncs "missing" > >old snapshots, not just corrupt ones). > > I'm too stupid to find the mail where this was mentioned/discussed, I'm > quite sure we said to just pull both, and then maybe separate them in a > future iteration/feature. But now that I think about the flag is named > `resync_corrupt` so I'd expect it to only pull in the corrupt snapshots. > > I actually agree with this change, it probably is also more performant > (reading backup_manifest twice is probably faster than pulling lots of > unneeded manifests from the remote). I think we do want both of these, but they are not a single feature, since the "sync missing snapshots" option would effectively undo any pruning you do on the target side. Their implementation is of course somewhat intertwined, as they both affect the snapshot selection logic. They might even be both enabled and combined with remove_vanished to have a sort of 1:1 repairing replication going on (with all the pros and cons/danger that comes with it). For syncing missing snapshots we might also want to require additional privileges to prevent lesser privileged users from stuffing backup groups (i.e, at least require Prune privs?). From g.goller at proxmox.com Tue Nov 5 11:39:40 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 5 Nov 2024 11:39:40 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <2106894461.7273.1730791214926@webmail.proxmox.com> References: <20241018090909.103952-1-g.goller@proxmox.com> <20241018090909.103952-2-g.goller@proxmox.com> <1730717237.hc5rhqwdje.astroid@yuna.none> <2106894461.7273.1730791214926@webmail.proxmox.com> Message-ID: I Agree! Sent a v3! From g.goller at proxmox.com Tue Nov 5 11:40:12 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 5 Nov 2024 11:40:12 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 0/3] fix #3786: resync corrupt chunks in sync-job Message-ID: <20241105104015.162094-1-g.goller@proxmox.com> Add an option `resync-corrupt` that resyncs corrupt snapshots when running sync-job. This option checks if the local snapshot failed the last verification and if it did, overwrites the local snapshot with the remote one. This is quite useful, as we currently don't have an option to "fix" broken chunks/snapshots in any way, even if a healthy version is on another (e.g. offsite) instance. Important things to note are also: this has a slight performance penalty, as all the manifests have to be looked through, and a verification job has to be run beforehand, otherwise we do not know if the snapshot is healthy. Note: This series was originally written by Shannon! I just picked it up, rebased, and fixed the obvious comments on the last series. Changelog v3 (thanks @Fabian) - filter out snapshots earlier in the pull_group function - move verify_state to BackupManifest and fixed invocations - reverted verify_state Option -> Result state (It doesn't matter if we get an error, we get that quite often f.e. in new backups) - removed some unnecessary log lines - removed some unnecessary imports and modifications - rebase to current master Changelog v2 (thanks @Thomas): - order git trailers - adjusted schema description to include broken indexes - change verify_state to return a Result<_,_> - print error if verify_state is not able to read the state - update docs on pull_snapshot function - simplify logic by combining flags - move log line out of loop to only print once that we resync the snapshot Changelog since RFC (Shannon's work): - rename option from deep-sync to resync-corrupt - rebase on latest master (and change implementation details, as a lot has changed around sync-jobs) proxmox-backup: Gabriel Goller (3): fix #3786: api: add resync-corrupt option to sync jobs fix #3786: ui/cli: add resync-corrupt option on sync-jobs fix #3786: docs: add resync-corrupt option to sync-job docs/managing-remotes.rst | 6 +++ pbs-api-types/src/jobs.rs | 10 +++++ pbs-datastore/src/backup_info.rs | 12 +++++- pbs-datastore/src/manifest.rs | 13 ++++++- src/api2/config/sync.rs | 4 ++ src/api2/pull.rs | 9 ++++- src/bin/proxmox-backup-manager.rs | 13 ++++++- src/server/pull.rs | 62 +++++++++++++++++++++++-------- www/window/SyncJobEdit.js | 11 ++++++ 9 files changed, 119 insertions(+), 21 deletions(-) Summary over all repositories: 9 files changed, 119 insertions(+), 21 deletions(-) -- Generated by git-murpp 0.7.1 From g.goller at proxmox.com Tue Nov 5 11:40:14 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 5 Nov 2024 11:40:14 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 2/3] fix #3786: ui/cli: add resync-corrupt option on sync-jobs In-Reply-To: <20241105104015.162094-1-g.goller@proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> Message-ID: <20241105104015.162094-3-g.goller@proxmox.com> Add the `resync-corrupt` option to the ui and the `proxmox-backup-manager` cli. It is listed in the `Advanced` section, because it slows the sync-job down and is useless if no verification job was run beforehand. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller --- src/bin/proxmox-backup-manager.rs | 9 +++++++++ www/window/SyncJobEdit.js | 11 +++++++++++ 2 files changed, 20 insertions(+) diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index 38a1cf0f5881..08728e9d7250 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -339,6 +339,10 @@ fn task_mgmt_cli() -> CommandLineInterface { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, } } )] @@ -355,6 +359,7 @@ async fn pull_datastore( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, param: Value, ) -> Result { let output_format = get_output_format(¶m); @@ -391,6 +396,10 @@ async fn pull_datastore( args["transfer-last"] = json!(transfer_last) } + if let Some(resync_corrupt) = resync_corrupt { + args["resync-corrupt"] = Value::from(resync_corrupt); + } + let mut limit_json = json!(limit); let limit_map = limit_json .as_object_mut() diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 6543995e8800..a3c497fc2185 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -321,6 +321,17 @@ Ext.define('PBS.window.SyncJobEdit', { deleteEmpty: '{!isCreate}', }, }, + { + fieldLabel: gettext('Resync corrupt snapshots'), + xtype: 'proxmoxcheckbox', + name: 'resync-corrupt', + autoEl: { + tag: 'div', + 'data-qtip': gettext('Re-sync snapshots, whose verfification failed.'), + }, + uncheckedValue: false, + value: false, + }, ], }, { -- 2.39.5 From g.goller at proxmox.com Tue Nov 5 11:40:13 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 5 Nov 2024 11:40:13 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <20241105104015.162094-1-g.goller@proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> Message-ID: <20241105104015.162094-2-g.goller@proxmox.com> This option allows us to "fix" corrupt snapshots (and/or their chunks) by pulling them from another remote. When traversing the remote snapshots, we check if it exists locally, and if it is, we check if the last verification of it failed. If the local snapshot is broken and the `resync-corrupt` option is turned on, we pull in the remote snapshot, overwriting the local one. This is very useful and has been requested a lot, as there is currently no way to "fix" corrupt chunks/snapshots even if the user has a healthy version of it on their offsite instance. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller --- pbs-api-types/src/jobs.rs | 10 +++++ pbs-datastore/src/backup_info.rs | 12 +++++- pbs-datastore/src/manifest.rs | 13 ++++++- src/api2/config/sync.rs | 4 ++ src/api2/pull.rs | 9 ++++- src/bin/proxmox-backup-manager.rs | 4 +- src/server/pull.rs | 62 +++++++++++++++++++++++-------- 7 files changed, 93 insertions(+), 21 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 868702bc059e..58f739ad00b5 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -498,6 +498,10 @@ pub const TRANSFER_LAST_SCHEMA: Schema = .minimum(1) .schema(); +pub const RESYNC_CORRUPT_SCHEMA: Schema = + BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") + .schema(); + #[api( properties: { id: { @@ -552,6 +556,10 @@ pub const TRANSFER_LAST_SCHEMA: Schema = schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + } } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -585,6 +593,8 @@ pub struct SyncJobConfig { pub limit: RateLimitConfig, #[serde(skip_serializing_if = "Option::is_none")] pub transfer_last: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub resync_corrupt: Option, } impl SyncJobConfig { diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 414ec878d01a..e6174322dad6 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, + Authid, BackupNamespace, BackupType, GroupFilter, VerifyState, BACKUP_DATE_REGEX, + BACKUP_FILE_REGEX, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; @@ -583,6 +584,15 @@ impl BackupDir { Ok(()) } + + /// Load the verify state from the manifest. + pub fn verify_state(&self) -> Option { + if let Ok(manifest) = self.load_manifest() { + manifest.0.verify_state() + } else { + None + } + } } impl AsRef for BackupDir { diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index c3df014272a0..623c1499c0bb 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState, VerifyState}; use pbs_tools::crypt_config::CryptConfig; pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; @@ -242,6 +242,17 @@ impl BackupManifest { let manifest: BackupManifest = serde_json::from_value(json)?; Ok(manifest) } + + /// Get the verify state of the snapshot + /// + /// Note: New snapshots, which have not been verified yet, do not have a status and this + /// function will return `None`. + pub fn verify_state(&self) -> Option { + let verify = self.unprotected["verify_state"].clone(); + serde_json::from_value::(verify) + .map(|svs| svs.state) + .ok() + } } impl TryFrom for BackupManifest { diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 6fdc69a9e645..fa9db92f3d11 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -368,6 +368,9 @@ pub fn update_sync_job( if let Some(transfer_last) = update.transfer_last { data.transfer_last = Some(transfer_last); } + if let Some(resync_corrupt) = update.resync_corrupt { + data.resync_corrupt = Some(resync_corrupt); + } if update.limit.rate_in.is_some() { data.limit.rate_in = update.limit.rate_in; @@ -527,6 +530,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator ns: None, owner: Some(write_auth_id.clone()), comment: None, + resync_corrupt: None, remove_vanished: None, max_depth: None, group_filter: None, diff --git a/src/api2/pull.rs b/src/api2/pull.rs index e733c9839e3a..0d4be0e2d228 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -10,7 +10,7 @@ use pbs_api_types::{ Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, - TRANSFER_LAST_SCHEMA, + RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, }; use pbs_config::CachedUserInfo; use proxmox_human_byte::HumanByte; @@ -89,6 +89,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { sync_job.group_filter.clone(), sync_job.limit.clone(), sync_job.transfer_last, + sync_job.resync_corrupt, ) } } @@ -240,6 +241,10 @@ pub fn do_sync_job( schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, }, }, access: { @@ -264,6 +269,7 @@ async fn pull( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -301,6 +307,7 @@ async fn pull( group_filter, limit, transfer_last, + resync_corrupt, )?; // fixme: set to_stdout to false? diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index 420e96665662..38a1cf0f5881 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -14,8 +14,8 @@ use pbs_api_types::percent_encoding::percent_encode_component; use pbs_api_types::{ BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::{display_task_log, view_task_result}; use pbs_config::sync; diff --git a/src/server/pull.rs b/src/server/pull.rs index d9584776ee7f..11a0a9d74cf3 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -12,7 +12,8 @@ use tracing::info; use pbs_api_types::{ print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + RateLimitConfig, Remote, VerifyState, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, + PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -55,6 +56,8 @@ pub(crate) struct PullParameters { group_filter: Vec, /// How many snapshots should be transferred at most (taking the newest N snapshots) transfer_last: Option, + /// Whether to re-sync corrupted snapshots + resync_corrupt: bool, } impl PullParameters { @@ -72,12 +75,14 @@ impl PullParameters { group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, ) -> Result { if let Some(max_depth) = max_depth { ns.check_max_depth(max_depth)?; remote_ns.check_max_depth(max_depth)?; }; let remove_vanished = remove_vanished.unwrap_or(false); + let resync_corrupt = resync_corrupt.unwrap_or(false); let source: Arc = if let Some(remote) = remote { let (remote_config, _digest) = pbs_config::remote::config()?; @@ -116,6 +121,7 @@ impl PullParameters { max_depth, group_filter, transfer_last, + resync_corrupt, }) } } @@ -323,7 +329,7 @@ async fn pull_single_archive<'a>( /// /// Pulling a snapshot consists of the following steps: /// - (Re)download the manifest -/// -- if it matches, only download log and treat snapshot as already synced +/// -- if it matches and is not corrupt, only download log and treat snapshot as already synced /// - Iterate over referenced files /// -- if file already exists, verify contents /// -- if not, pull it from the remote @@ -332,6 +338,7 @@ async fn pull_snapshot<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let mut sync_stats = SyncStats::default(); let mut manifest_name = snapshot.full_path(); @@ -352,7 +359,7 @@ async fn pull_snapshot<'a>( return Ok(sync_stats); } - if manifest_name.exists() { + if manifest_name.exists() && !corrupt { let manifest_blob = proxmox_lang::try_block!({ let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| { format_err!("unable to open local manifest {manifest_name:?} - {err}") @@ -381,7 +388,7 @@ async fn pull_snapshot<'a>( let mut path = snapshot.full_path(); path.push(&item.filename); - if path.exists() { + if !corrupt && path.exists() { match ArchiveType::from_path(&item.filename)? { ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path)?; @@ -443,6 +450,7 @@ async fn pull_snapshot_from<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let (_path, is_new, _snap_lock) = snapshot .datastore() @@ -451,7 +459,7 @@ async fn pull_snapshot_from<'a>( let sync_stats = if is_new { info!("sync snapshot {}", snapshot.dir()); - match pull_snapshot(reader, snapshot, downloaded_chunks).await { + match pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await { Err(err) => { if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( snapshot.backup_ns(), @@ -468,8 +476,15 @@ async fn pull_snapshot_from<'a>( } } } else { - info!("re-sync snapshot {}", snapshot.dir()); - pull_snapshot(reader, snapshot, downloaded_chunks).await? + if corrupt { + info!( + "re-sync snapshot {} due to bad verification result", + snapshot.dir() + ); + } else { + info!("re-sync snapshot {}", snapshot.dir()); + } + pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await? }; Ok(sync_stats) @@ -523,26 +538,40 @@ async fn pull_group( .last_successful_backup(&target_ns, group)? .unwrap_or(i64::MIN); - let list: Vec = raw_list + // Filter remote BackupDirs to include in pull + // Also stores if the snapshot is corrupt (verification job failed) + let list: Vec<(BackupDir, bool)> = raw_list .into_iter() .enumerate() - .filter(|&(pos, ref dir)| { + .filter_map(|(pos, dir)| { source_snapshots.insert(dir.time); + // If resync_corrupt is set, check if the corresponding local snapshot failed to + // verification + if params.resync_corrupt { + let local_dir = params + .target + .store + .backup_dir(target_ns.clone(), dir.clone()); + if let Ok(local_dir) = local_dir { + let verify_state = local_dir.verify_state(); + if verify_state == Some(VerifyState::Failed) { + return Some((dir, true)); + } + } + } // Note: the snapshot represented by `last_sync_time` might be missing its backup log // or post-backup verification state if those were not yet available during the last // sync run, always resync it if last_sync_time > dir.time { already_synced_skip_info.update(dir.time); - return false; + return None; } - if pos < cutoff && last_sync_time != dir.time { transfer_last_skip_info.update(dir.time); - return false; + return None; } - true + Some((dir, false)) }) - .map(|(_, dir)| dir) .collect(); if already_synced_skip_info.count > 0 { @@ -561,7 +590,7 @@ async fn pull_group( let mut sync_stats = SyncStats::default(); - for (pos, from_snapshot) in list.into_iter().enumerate() { + for (pos, (from_snapshot, corrupt)) in list.into_iter().enumerate() { let to_snapshot = params .target .store @@ -571,7 +600,8 @@ async fn pull_group( .source .reader(source_namespace, &from_snapshot) .await?; - let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await; + let result = + pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone(), corrupt).await; progress.done_snapshots = pos as u64 + 1; info!("percentage done: {progress}"); -- 2.39.5 From g.goller at proxmox.com Tue Nov 5 11:40:15 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 5 Nov 2024 11:40:15 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 3/3] fix #3786: docs: add resync-corrupt option to sync-job In-Reply-To: <20241105104015.162094-1-g.goller@proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> Message-ID: <20241105104015.162094-4-g.goller@proxmox.com> Add short section explaining the `resync-corrupt` option on the sync-job. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller --- docs/managing-remotes.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/managing-remotes.rst b/docs/managing-remotes.rst index dd43ccd2b79b..e8013b6e2113 100644 --- a/docs/managing-remotes.rst +++ b/docs/managing-remotes.rst @@ -135,6 +135,12 @@ For mixing include and exclude filter, following rules apply: .. note:: The ``protected`` flag of remote backup snapshots will not be synced. +Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have +failed to verify during the last :ref:`maintenance_verification`. Hence, a verification +job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware +that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore +and might take much longer than regular sync jobs. + Namespace Support ^^^^^^^^^^^^^^^^^ -- 2.39.5 From c.ebner at proxmox.com Tue Nov 5 15:01:49 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 5 Nov 2024 15:01:49 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] fix #5853: ignore stale files Message-ID: <20241105140153.282980-1-c.ebner@proxmox.com> When files and their associated metadata get invalidated, I/O operations on network filesystems return ESTALE to indicate that the filehandle does not reference a valid file anymore. Currently, the proxmox-backup-client does not cover such cases, it will fail with a hard error when a stale file handle is encountered. Any concurrent operation invalidating file handles has the potential to lead to the backups failing if timed accordingly. For local filesystems this is not an issue, as the file remains accessible until the file handle is closed. Make the backup client more resilient by handling the ESTALE errors gracefully, warning the user about the vanished/invalidated files, while generating a valid and consistent backup archive nevertheless. Christian Ebner (4): client: pxar: skip directories on stale file handle client: pxar: skip directory entries on stale file handle client: pxar: warn user and ignore stale file handles on file open fix #5853: client: pxar: exclude stale files on metadata read pbs-client/src/pxar/create.rs | 155 +++++++++++++++++++++++----------- 1 file changed, 108 insertions(+), 47 deletions(-) -- 2.39.5 From c.ebner at proxmox.com Tue Nov 5 15:01:50 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 5 Nov 2024 15:01:50 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/4] client: pxar: skip directories on stale file handle In-Reply-To: <20241105140153.282980-1-c.ebner@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> Message-ID: <20241105140153.282980-2-c.ebner@proxmox.com> Skip over the whole directory in case the file handle was invalidated and therefore the filesystem type check returns with ESTALE. Encode the directory start entry in the archive and the catalog only after the filesystem type check, so the directory can be fully skipped. At this point it is still possible to ignore the invalidated directory. If the directory is invalidated afterwards, it will be backed up only partially. Signed-off-by: Christian Ebner --- pbs-client/src/pxar/create.rs | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index c48524c4c..a2b9b3e30 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -72,7 +72,7 @@ pub struct PxarPrevRef { pub archive_name: String, } -fn detect_fs_type(fd: RawFd) -> Result { +fn detect_fs_type(fd: RawFd) -> Result { let mut fs_stat = std::mem::MaybeUninit::uninit(); let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) }; Errno::result(res)?; @@ -1169,20 +1169,23 @@ impl Archiver { ) -> Result<(), Error> { let dir_name = OsStr::from_bytes(c_dir_name.to_bytes()); - if !self.cache.caching_enabled() { - if let Some(ref catalog) = self.catalog { - catalog.lock().unwrap().start_directory(c_dir_name)?; - } - encoder.create_directory(dir_name, metadata).await?; - } - let old_fs_magic = self.fs_magic; let old_fs_feature_flags = self.fs_feature_flags; let old_st_dev = self.current_st_dev; let mut skip_contents = false; if old_st_dev != stat.st_dev { - self.fs_magic = detect_fs_type(dir.as_raw_fd())?; + match detect_fs_type(dir.as_raw_fd()) { + Ok(fs_magic) => self.fs_magic = fs_magic, + Err(Errno::ESTALE) => { + log::warn!( + "encountered stale file handle, skipping directory: {:?}", + self.path + ); + return Ok(()); + } + Err(err) => return Err(err.into()), + } self.fs_feature_flags = Flags::from_magic(self.fs_magic); self.current_st_dev = stat.st_dev; @@ -1193,6 +1196,13 @@ impl Archiver { } } + if !self.cache.caching_enabled() { + if let Some(ref catalog) = self.catalog { + catalog.lock().unwrap().start_directory(c_dir_name)?; + } + encoder.create_directory(dir_name, metadata).await?; + } + let result = if skip_contents { log::info!("skipping mount point: {:?}", self.path); Ok(()) -- 2.39.5 From c.ebner at proxmox.com Tue Nov 5 15:01:53 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 5 Nov 2024 15:01:53 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 4/4] fix #5853: client: pxar: exclude stale files on metadata read In-Reply-To: <20241105140153.282980-1-c.ebner@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> Message-ID: <20241105140153.282980-5-c.ebner@proxmox.com> Skip and warn the user for files which returned a stale file handle error while reading the metadata associated to that file. Instead of returning with an error when getting the metadata, return a boolean flag signaling if a stale file handle has been encountered. Link to issue in bugtracker: https://bugzilla.proxmox.com/show_bug.cgi?id=5853 Link to thread in community forum: https://forum.proxmox.com/threads/156822/ Signed-off-by: Christian Ebner --- pbs-client/src/pxar/create.rs | 100 ++++++++++++++++++++++------------ 1 file changed, 66 insertions(+), 34 deletions(-) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index 2a844922c..85be00db4 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -228,7 +228,7 @@ where let mut fs_feature_flags = Flags::from_magic(fs_magic); let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?; - let metadata = get_metadata( + let (metadata, stale_fd) = get_metadata( source_dir.as_raw_fd(), &stat, feature_flags & fs_feature_flags, @@ -744,7 +744,7 @@ impl Archiver { return Ok(()); } - let metadata = get_metadata( + let (metadata, stale_fd) = get_metadata( fd.as_raw_fd(), stat, self.flags(), @@ -753,6 +753,11 @@ impl Archiver { self.skip_e2big_xattr, )?; + if stale_fd { + log::warn!("Stale filehandle encountered, skip {:?}", self.path); + return Ok(()); + } + if self.previous_payload_index.is_none() { return self .add_entry_to_archive(encoder, &mut None, c_file_name, stat, fd, &metadata, None) @@ -1301,7 +1306,14 @@ impl Archiver { file_name: &Path, metadata: &Metadata, ) -> Result<(), Error> { - let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?; + let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) { + Ok(dest) => dest, + Err(Errno::ESTALE) => { + log::warn!("Stale file handle encountered, skip {file_name:?}"); + return Ok(()); + } + Err(err) => return Err(err.into()), + }; encoder.add_symlink(metadata, file_name, dest).await?; Ok(()) } @@ -1397,9 +1409,10 @@ fn get_metadata( fs_magic: i64, fs_feature_flags: &mut Flags, skip_e2big_xattr: bool, -) -> Result { +) -> Result<(Metadata, bool), Error> { // required for some of these let proc_path = Path::new("/proc/self/fd/").join(fd.to_string()); + let mut stale_fd = false; let mut meta = Metadata { stat: pxar::Stat { @@ -1412,18 +1425,27 @@ fn get_metadata( ..Default::default() }; - get_xattr_fcaps_acl( + if get_xattr_fcaps_acl( &mut meta, fd, &proc_path, flags, fs_feature_flags, skip_e2big_xattr, - )?; - get_chattr(&mut meta, fd)?; + )? { + stale_fd = true; + log::warn!("Stale filehandle, xattrs incomplete"); + } + if get_chattr(&mut meta, fd)? { + stale_fd = true; + log::warn!("Stale filehandle, chattr incomplete"); + } get_fat_attr(&mut meta, fd, fs_magic)?; - get_quota_project_id(&mut meta, fd, flags, fs_magic)?; - Ok(meta) + if get_quota_project_id(&mut meta, fd, flags, fs_magic)? { + stale_fd = true; + log::warn!("Stale filehandle, quota project id incomplete"); + } + Ok((meta, stale_fd)) } fn get_fcaps( @@ -1431,22 +1453,23 @@ fn get_fcaps( fd: RawFd, flags: Flags, fs_feature_flags: &mut Flags, -) -> Result<(), Error> { +) -> Result { if !flags.contains(Flags::WITH_FCAPS) { - return Ok(()); + return Ok(false); } match xattr::fgetxattr(fd, xattr::XATTR_NAME_FCAPS) { Ok(data) => { meta.fcaps = Some(pxar::format::FCaps { data }); - Ok(()) + Ok(false) } - Err(Errno::ENODATA) => Ok(()), + Err(Errno::ENODATA) => Ok(false), Err(Errno::EOPNOTSUPP) => { fs_feature_flags.remove(Flags::WITH_FCAPS); - Ok(()) + Ok(false) } - Err(Errno::EBADF) => Ok(()), // symlinks + Err(Errno::EBADF) => Ok(false), // symlinks + Err(Errno::ESTALE) => Ok(true), Err(err) => Err(err).context("failed to read file capabilities"), } } @@ -1458,32 +1481,35 @@ fn get_xattr_fcaps_acl( flags: Flags, fs_feature_flags: &mut Flags, skip_e2big_xattr: bool, -) -> Result<(), Error> { +) -> Result { if !flags.contains(Flags::WITH_XATTRS) { - return Ok(()); + return Ok(false); } let xattrs = match xattr::flistxattr(fd) { Ok(names) => names, Err(Errno::EOPNOTSUPP) => { fs_feature_flags.remove(Flags::WITH_XATTRS); - return Ok(()); + return Ok(false); } Err(Errno::E2BIG) => { match skip_e2big_xattr { - true => return Ok(()), + true => return Ok(false), false => { bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); } }; } - Err(Errno::EBADF) => return Ok(()), // symlinks + Err(Errno::EBADF) => return Ok(false), // symlinks + Err(Errno::ESTALE) => return Ok(true), Err(err) => return Err(err).context("failed to read xattrs"), }; for attr in &xattrs { if xattr::is_security_capability(attr) { - get_fcaps(meta, fd, flags, fs_feature_flags)?; + if get_fcaps(meta, fd, flags, fs_feature_flags)? { + return Ok(true); + } continue; } @@ -1505,35 +1531,37 @@ fn get_xattr_fcaps_acl( Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either Err(Errno::E2BIG) => { match skip_e2big_xattr { - true => return Ok(()), + true => return Ok(false), false => { bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); } }; } + Err(Errno::ESTALE) => return Ok(true), // symlinks Err(err) => { return Err(err).context(format!("error reading extended attribute {attr:?}")) } } } - Ok(()) + Ok(false) } -fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> { +fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result { let mut attr: libc::c_long = 0; match unsafe { fs::read_attr_fd(fd, &mut attr) } { Ok(_) => (), + Err(Errno::ESTALE) => return Ok(true), Err(errno) if errno_is_unsupported(errno) => { - return Ok(()); + return Ok(false); } Err(err) => return Err(err).context("failed to read file attributes"), } metadata.stat.flags |= Flags::from_chattr(attr).bits(); - Ok(()) + Ok(false) } fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> { @@ -1564,30 +1592,34 @@ fn get_quota_project_id( fd: RawFd, flags: Flags, magic: i64, -) -> Result<(), Error> { +) -> Result { if !(metadata.is_dir() || metadata.is_regular_file()) { - return Ok(()); + return Ok(false); } if !flags.contains(Flags::WITH_QUOTA_PROJID) { - return Ok(()); + return Ok(false); } use proxmox_sys::linux::magic::*; match magic { EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (), - _ => return Ok(()), + _ => return Ok(false), } let mut fsxattr = fs::FSXAttr::default(); let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) }; + if let Err(Errno::ESTALE) = res { + return Ok(true); + } + // On some FUSE filesystems it can happen that ioctl is not supported. // For these cases projid is set to 0 while the error is ignored. if let Err(errno) = res { if errno_is_unsupported(errno) { - return Ok(()); + return Ok(false); } else { return Err(errno).context("error while reading quota project id"); } @@ -1597,7 +1629,7 @@ fn get_quota_project_id( if projid != 0 { metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid }); } - Ok(()) + Ok(false) } fn get_acl( @@ -1840,7 +1872,7 @@ mod tests { let fs_magic = detect_fs_type(dir.as_raw_fd()).unwrap(); let stat = nix::sys::stat::fstat(dir.as_raw_fd()).unwrap(); let mut fs_feature_flags = Flags::from_magic(fs_magic); - let metadata = get_metadata( + let (metadata, _) = get_metadata( dir.as_raw_fd(), &stat, fs_feature_flags, @@ -1937,7 +1969,7 @@ mod tests { let stat = nix::sys::stat::fstat(source_dir.as_raw_fd()).unwrap(); let mut fs_feature_flags = Flags::from_magic(fs_magic); - let metadata = get_metadata( + let (metadata, _) = get_metadata( source_dir.as_raw_fd(), &stat, fs_feature_flags, -- 2.39.5 From c.ebner at proxmox.com Tue Nov 5 15:01:52 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 5 Nov 2024 15:01:52 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/4] client: pxar: warn user and ignore stale file handles on file open In-Reply-To: <20241105140153.282980-1-c.ebner@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> Message-ID: <20241105140153.282980-4-c.ebner@proxmox.com> Do not fail hard if a file open fails because of a stale file handle. Warn the user and ignore the file, just like the client already does in case of missing privileges to access the file. Signed-off-by: Christian Ebner --- pbs-client/src/pxar/create.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index 8685e8d42..2a844922c 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -484,6 +484,10 @@ impl Archiver { log::warn!("failed to open file: {:?}: access denied", file_name); Ok(None) } + Err(Errno::ESTALE) => { + log::warn!("failed to open file: {file_name:?}: stale file handle"); + Ok(None) + } Err(Errno::EPERM) if !noatime.is_empty() => { // Retry without O_NOATIME: noatime = OFlag::empty(); -- 2.39.5 From c.ebner at proxmox.com Tue Nov 5 15:01:51 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 5 Nov 2024 15:01:51 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/4] client: pxar: skip directory entries on stale file handle In-Reply-To: <20241105140153.282980-1-c.ebner@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> Message-ID: <20241105140153.282980-3-c.ebner@proxmox.com> Skip over the entries when a stale file handle is encountered during generation of the entry list of a directory entry. This will lead to the directory not being backed up if the directory itself was invalidated, as then reading all child entries will fail also, or the directory is backed up without entries which have been invalidated. Signed-off-by: Christian Ebner --- pbs-client/src/pxar/create.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index a2b9b3e30..8685e8d42 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -638,15 +638,30 @@ impl Archiver { Ok(Some(MatchType::Exclude)) => continue, Ok(_) => (), Err(err) if err.not_found() => continue, + Err(Errno::ESTALE) => { + log::warn!("stale file handle, skip {full_path:?}"); + continue; + } Err(err) => { return Err(err).with_context(|| format!("stat failed on {full_path:?}")) } } - let stat = stat_results - .map(Ok) - .unwrap_or_else(get_file_mode) - .with_context(|| format!("stat failed on {full_path:?}"))?; + let stat = match stat_results { + Some(mode) => mode, + None => match get_file_mode() { + Ok(mode) => mode, + Err(Errno::ESTALE) => { + log::warn!("stale file handle, skip {full_path:?}"); + continue; + } + Err(err) => { + return Err( + Error::from(err).context(format!("stat failed on {full_path:?}")) + ) + } + }, + }; self.entry_counter += 1; if self.entry_counter > self.entry_limit { -- 2.39.5 From h.laimer at proxmox.com Wed Nov 6 11:45:12 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 6 Nov 2024 11:45:12 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] api: tape: add permission to move_tape endpoint Message-ID: <20241106104512.41479-1-h.laimer@proxmox.com> ... so it is usable by non-root users, this came up in support. Signed-off-by: Hannes Laimer --- This came up in enterprise support, but it also makes sense generally src/api2/tape/media.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api2/tape/media.rs b/src/api2/tape/media.rs index a7c8483a..2ed3e961 100644 --- a/src/api2/tape/media.rs +++ b/src/api2/tape/media.rs @@ -9,7 +9,8 @@ use proxmox_uuid::Uuid; use pbs_api_types::{ Authid, MediaContentEntry, MediaContentListFilter, MediaListEntry, MediaPoolConfig, MediaSetListEntry, MediaStatus, CHANGER_NAME_SCHEMA, MEDIA_LABEL_SCHEMA, - MEDIA_POOL_NAME_SCHEMA, MEDIA_UUID_SCHEMA, PRIV_TAPE_AUDIT, VAULT_NAME_SCHEMA, + MEDIA_POOL_NAME_SCHEMA, MEDIA_UUID_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, + VAULT_NAME_SCHEMA, }; use pbs_config::CachedUserInfo; @@ -305,6 +306,9 @@ pub async fn list_media( }, }, }, + access: { + permission: &Permission::Privilege(&["tape"], PRIV_TAPE_MODIFY, false), + }, )] /// Change Tape location to vault (if given), or offline. pub fn move_tape( -- 2.39.5 From f.gruenbichler at proxmox.com Wed Nov 6 12:57:45 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Wed, 06 Nov 2024 12:57:45 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 13/29] fix #3044: server: implement push support for sync operations In-Reply-To: <20241031121519.434337-14-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-14-c.ebner@proxmox.com> Message-ID: <173089426545.79072.10424056569024402158@yuna.proxmox.com> Quoting Christian Ebner (2024-10-31 13:15:03) > Adds the functionality required to push datastore contents from a > source to a remote target. > This includes syncing of the namespaces, backup groups and snapshots > based on the provided filters as well as removing vanished contents > from the target when requested. > > While trying to mimic the pull direction of sync jobs, the > implementation is different as access to the remote must be performed > via the REST API, not needed for the pull job which can access the > local datastore via the filesystem directly. > > Signed-off-by: Christian Ebner > --- > changes since version 5: > - fetch backup groups split by owned and not owned, only allow to push > to owned groups, don't allow to prune not owned groups. > - store remote api version unconditionally > - check for supported feature instead of api version to include conditional > parameters for api calls > - directly use delete stats from api calls, since these are not followup > patches anymore > > src/server/mod.rs | 1 + > src/server/push.rs | 980 +++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 981 insertions(+) > create mode 100644 src/server/push.rs > > diff --git a/src/server/mod.rs b/src/server/mod.rs > index 2e40bde3c..7c14ed4b8 100644 > --- a/src/server/mod.rs > +++ b/src/server/mod.rs > @@ -36,6 +36,7 @@ pub mod auth; > pub mod metric_collection; > > pub(crate) mod pull; > +pub(crate) mod push; > pub(crate) mod sync; > > pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> { > diff --git a/src/server/push.rs b/src/server/push.rs > new file mode 100644 > index 000000000..c38e9c96b > --- /dev/null > +++ b/src/server/push.rs > @@ -0,0 +1,980 @@ > +//! Sync datastore by pushing contents to remote server > + > +use std::cmp::Ordering; > +use std::collections::HashSet; > +use std::sync::{Arc, Mutex}; > + > +use anyhow::{bail, format_err, Error}; > +use futures::stream::{self, StreamExt, TryStreamExt}; > +use tokio::sync::mpsc; > +use tokio_stream::wrappers::ReceiverStream; > +use tracing::{info, warn}; > + > +use pbs_api_types::{ > + print_store_and_ns, ApiVersion, ApiVersionInfo, Authid, BackupDir, BackupGroup, > + BackupGroupDeleteStats, BackupNamespace, CryptMode, GroupFilter, GroupListItem, > + NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, > + PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE, > +}; > +use pbs_client::{BackupRepository, BackupWriter, HttpClient, MergedChunkInfo, UploadOptions}; > +use pbs_config::CachedUserInfo; > +use pbs_datastore::data_blob::ChunkInfo; > +use pbs_datastore::dynamic_index::DynamicIndexReader; > +use pbs_datastore::fixed_index::FixedIndexReader; > +use pbs_datastore::index::IndexFile; > +use pbs_datastore::manifest::{ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; > +use pbs_datastore::read_chunk::AsyncReadChunk; > +use pbs_datastore::{BackupManifest, DataStore, StoreProgress}; > + > +use super::sync::{ > + check_namespace_depth_limit, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason, > + SyncSource, SyncStats, > +}; > +use crate::api2::config::remote; > + > +/// Target for backups to be pushed to > +pub(crate) struct PushTarget { > + // Remote as found in remote.cfg > + remote: Remote, > + // Target repository on remote > + repo: BackupRepository, > + // Target namespace on remote > + ns: BackupNamespace, > + // Http client to connect to remote > + client: HttpClient, > + // Api version reported by the target > + api_version: ApiVersion, > +} > + > +impl PushTarget { > + fn remote_user(&self) -> Authid { > + self.remote.config.auth_id.clone() > + } > +} > + > +/// Parameters for a push operation > +pub(crate) struct PushParameters { > + /// Source of backups to be pushed to remote > + source: Arc, > + /// Target for backups to be pushed to > + target: PushTarget, > + /// Local user limiting the accessible source contents, makes sure that the sync job sees the > + /// same source content when executed by different users with different privileges > + /// User as which the job gets executed, requires the permissions on the remote this now has two sentences which looks kinda weird. maybe they could be combined into a single one, with the longer explanation of the semantics and caveats in the documentation? e.g., User used for permission checks on the source side, including potentially filtering visible namespaces and backup groups. > + local_user: Authid, > + /// Whether to remove groups which exist locally, but not on the remote end > + remove_vanished: bool, groups and namespaces? > + /// How many levels of sub-namespaces to push (0 == no recursion, None == maximum recursion) > + max_depth: Option, > + /// Filters for reducing the push scope > + group_filter: Vec, > + /// How many snapshots should be transferred at most (taking the newest N snapshots) > + transfer_last: Option, > +} > + > +impl PushParameters { > + /// Creates a new instance of `PushParameters`. > + #[allow(clippy::too_many_arguments)] > + pub(crate) async fn new( > + store: &str, > + ns: BackupNamespace, > + remote_id: &str, > + remote_store: &str, > + remote_ns: BackupNamespace, > + local_user: Authid, > + remove_vanished: Option, > + max_depth: Option, > + group_filter: Option>, > + limit: RateLimitConfig, > + transfer_last: Option, > + ) -> Result { > + if let Some(max_depth) = max_depth { > + ns.check_max_depth(max_depth)?; > + remote_ns.check_max_depth(max_depth)?; > + }; > + let remove_vanished = remove_vanished.unwrap_or(false); > + > + let source = Arc::new(LocalSource { > + store: DataStore::lookup_datastore(store, Some(Operation::Read))?, > + ns, > + }); > + > + let (remote_config, _digest) = pbs_config::remote::config()?; > + let remote: Remote = remote_config.lookup("remote", remote_id)?; > + > + let repo = BackupRepository::new( > + Some(remote.config.auth_id.clone()), > + Some(remote.config.host.clone()), > + remote.config.port, > + remote_store.to_string(), > + ); > + > + let client = remote::remote_client_config(&remote, Some(limit))?; > + > + let mut result = client.get("api2/json/version", None).await?; > + let data = result["data"].take(); > + let version_info: ApiVersionInfo = serde_json::from_value(data)?; > + let api_version = ApiVersion::try_from(version_info)?; > + let target = PushTarget { > + remote, > + repo, > + ns: remote_ns, > + client, > + api_version, > + }; > + let group_filter = group_filter.unwrap_or_default(); > + > + Ok(Self { > + source, > + target, > + local_user, > + remove_vanished, > + max_depth, > + group_filter, > + transfer_last, > + }) > + } > + > + // Map the given namespace from source to target by adapting the prefix > + fn map_to_target(&self, namespace: &BackupNamespace) -> Result { > + namespace.map_prefix(&self.source.ns, &self.target.ns) > + } > +} > + > +// Check if the job user given in the push parameters has the provided privs on the remote > +// datastore namespace > +fn check_ns_remote_datastore_privs( > + params: &PushParameters, > + namespace: &BackupNamespace, what is this namespace referring to? shouldn't it be the actual namespace on the target side? if so, I'd rename it to `target_namespace` and adapt the call sites accordingly to pass in already mapped namespaces.. i.e., if I push a local namespace foo/bar into a remote namespace bar/baz, it doesn't really make sense to make the remote ACL path about the local namespace? > + privs: u64, > +) -> Result<(), Error> { > + let user_info = CachedUserInfo::new()?; > + let mut acl_path: Vec<&str> = vec![ > + "remote", > + ¶ms.target.remote.name, > + params.target.repo.store(), > + ]; > + > + if !namespace.is_root() { > + let ns_components: Vec<&str> = namespace.components().collect(); > + acl_path.extend(ns_components); > + } let acl_path = namespace.remote_acl_path(..) ;) > + user_info.check_privs(¶ms.local_user, &acl_path, privs, false)?; > + > + Ok(()) > +} > + > +// Fetch the list of namespaces found on target > +async fn fetch_target_namespaces(params: &PushParameters) -> Result, Error> { > + let api_path = format!( > + "api2/json/admin/datastore/{store}/namespace", > + store = params.target.repo.store(), > + ); > + let mut result = params.target.client.get(&api_path, None).await?; > + let namespaces: Vec = serde_json::from_value(result["data"].take())?; > + let mut namespaces: Vec = namespaces > + .into_iter() > + .map(|namespace| namespace.ns) > + .collect(); > + namespaces.sort_unstable_by_key(|a| a.name_len()); > + > + Ok(namespaces) so this fails for servers not yet supporting namespaces.. which is fine I guess (those versions are EOL in the meantime), but we might want to - call that out in the docs - check that up front based on the ApiVersion? (this one would actually need to go with the version as base, not a new extra feature, since we can't add that retroactively to all versions supporting namespaces..) > +} > + > +// Remove the provided namespace from the target > +async fn remove_target_namespace( > + params: &PushParameters, > + namespace: &BackupNamespace, should be `target_namespace` as well, based on the other comments above, since all we do with it is check the "remote ACL" and map it for the actual API call.. > +) -> Result { > + if namespace.is_root() { > + bail!("cannot remove root namespace from target"); > + } > + > + check_ns_remote_datastore_privs(params, namespace, PRIV_REMOTE_DATASTORE_MODIFY) > + .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; > + > + let api_path = format!( > + "api2/json/admin/datastore/{store}/namespace", > + store = params.target.repo.store(), > + ); > + > + let target_ns = params.map_to_target(namespace)?; and this mapping should happen at the call site.. > + let mut args = serde_json::json!({ > + "ns": target_ns.name(), > + "delete-groups": true, > + }); > + > + let api_feature_supported = params.target.api_version.supports_feature("prune-delete-stats"); this is done a few times, would it make sense to do it once when initializing the parameters/target and just have a boolean? or even a "PushFeatures" struct/bitmap/.. ? then it could just be logged once at the start of the sync job as well.. > + if api_feature_supported { > + args["error-on-protected"] = serde_json::to_value(false)?; > + } > + > + let mut result = params.target.client.delete(&api_path, Some(args)).await?; > + let data = result["data"].take(); > + let delete_stats: BackupGroupDeleteStats = if api_feature_supported { > + serde_json::from_value(data)? should we add context to a deserialization error here? > + } else { > + serde_json::from_value(data).unwrap_or_else(|_| BackupGroupDeleteStats::default()) isn't this wrong? if the other end doesn't support DeleteStats, how could it return one? this should just return empty stats.. > + }; > + > + Ok(delete_stats) this return could just be part of the if, dropping the corresponding let, just > +} > + > +// Fetch the list of groups found on target in given namespace > +// Returns sorted list of owned groups and a hashset containing not owned backup groups on target. > +async fn fetch_target_groups( > + params: &PushParameters, > + namespace: &BackupNamespace, should be `target_namespace` as well > +) -> Result<(Vec, HashSet), Error> { > + let api_path = format!( > + "api2/json/admin/datastore/{store}/groups", > + store = params.target.repo.store(), > + ); > + > + let args = if !namespace.is_root() { else this check here is wrong, we can only skip setting "ns" if the *mapped* namespace is the root one.. but also, I think we don't need this at all - if we don't want to support servers without namespace support, then passing in an empty string for the (mapped) root namespace should be fine? after all, that's what it serializes to and can be deserialized from .. > + let target_ns = params.map_to_target(namespace)?; > + Some(serde_json::json!({ "ns": target_ns.name() })) > + } else { > + None > + }; > + > + let mut result = params.target.client.get(&api_path, args).await?; > + let groups: Vec = serde_json::from_value(result["data"].take())?; > + > + let (mut owned, not_owned) = groups.iter().fold( > + (Vec::new(), HashSet::new()), > + |(mut owned, mut not_owned), group| { > + if let Some(ref owner) = group.owner { > + if params.target.remote_user() == *owner { > + owned.push(group.backup.clone()); > + return (owned, not_owned); > + } > + } > + not_owned.insert(group.backup.clone()); > + (owned, not_owned) > + }, > + ); > + > + owned.sort_unstable_by(|a, b| { > + let type_order = a.ty.cmp(&b.ty); > + if type_order == Ordering::Equal { > + a.id.cmp(&b.id) > + } else { > + type_order > + } > + }); this is copied from pull code, but actually, BackGroup implements cmp::Ord in a more meaningful manner, and both could be switched over to that? > + > + Ok((owned, not_owned)) > +} > + > +// Remove the provided backup group in given namespace from the target > +async fn remove_target_group( > + params: &PushParameters, > + namespace: &BackupNamespace, `target_namespace` again.. because all we do here is check the remote ACL (which should use the mapped namespace) and mapping it to do the API call.. > + backup_group: &BackupGroup, > +) -> Result { > + check_ns_remote_datastore_privs(params, namespace, PRIV_REMOTE_DATASTORE_PRUNE) > + .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; > + > + let api_path = format!( > + "api2/json/admin/datastore/{store}/groups", > + store = params.target.repo.store(), > + ); > + > + let mut args = serde_json::json!({ > + "backup-id": backup_group.id, > + "backup-type": backup_group.ty, > + }); > + > + let api_feature_supported = params.target.api_version.supports_feature("prune-delete-stats"); > + > + if api_feature_supported { > + args["error-on-protected"] = serde_json::to_value(false)?; > + } > + if !namespace.is_root() { this again checks in the wrong order, but the same comment as in fetch_target_groups applies here as well.. > + let target_ns = params.map_to_target(namespace)?; > + args["ns"] = serde_json::to_value(target_ns.name())?; > + } > + > + let mut result = params.target.client.delete(&api_path, Some(args)).await?; > + let data = result["data"].take(); > + let delete_stats: BackupGroupDeleteStats = if api_feature_supported { > + serde_json::from_value(data)? > + } else { > + serde_json::from_value(data).unwrap_or_else(|_| BackupGroupDeleteStats::default()) and here the same comment as with removing namespaces, just return the default stats right away, the server can't have returned one if it doesn't have the feature.. > + }; > + Ok(delete_stats) this return could just be part of the if, dropping the corresponding let, just like in remove_target_namespace.. > +} > + > +// Check if the namespace is already present on the target, create it otherwise > +async fn check_or_create_target_namespace( > + params: &PushParameters, > + target_namespaces: &[BackupNamespace], > + namespace: &BackupNamespace, this actually already contains the target_namespace, but it's named like it doesn't.. > +) -> Result { > + let mut created = false; > + > + if !namespace.is_root() && !target_namespaces.contains(namespace) { > + // Namespace not present on target, create namespace. > + // Sub-namespaces have to be created by creating parent components first. > + > + check_ns_remote_datastore_privs(params, namespace, PRIV_REMOTE_DATASTORE_MODIFY) > + .map_err(|err| format_err!("Creating namespace not allowed - {err}"))?; which means that this priv check here operated on different namespace semantics than the others.. > + > + let mut parent = BackupNamespace::root(); > + for component in namespace.components() { > + let current = BackupNamespace::from_parent_ns(&parent, component.to_string())?; > + // Skip over pre-existing parent namespaces on target > + if target_namespaces.contains(¤t) { > + parent = current; > + continue; > + } > + let api_path = format!( > + "api2/json/admin/datastore/{store}/namespace", > + store = params.target.repo.store(), > + ); > + let mut args = serde_json::json!({ "name": component.to_string() }); > + if !parent.is_root() { > + args["parent"] = serde_json::to_value(parent.clone())?; > + } > + if let Err(err) = params.target.client.post(&api_path, Some(args)).await { > + let target_store_and_ns = print_store_and_ns(params.target.repo.store(), ¤t); > + bail!("sync into {target_store_and_ns} failed - namespace creation failed: {err}"); > + } should we add a log line here for created intermediate namespaces? namespace creation won't happen too often, but might be important information.. > + created = true; > + parent = current; > + } > + } > + > + Ok(created) > +} > + > +/// Push contents of source datastore matched by given push parameters to target. > +pub(crate) async fn push_store(mut params: PushParameters) -> Result { > + let mut errors = false; > + if !params.target.api_version.supports_feature("prune-delete-stats") && params.remove_vanished { > + info!("Older api version on remote detected, delete stats might be incomplete"); I think this might be a bit more prominent, but not sure.. or maybe, set a flag in the removedstats and print another info line at the end of the push if it is set? making it a warning is probably overkill, since then every task pushing to an older server would have a warning.. > + } > + > + // Generate list of source namespaces to push to target, limited by max-depth > + let mut namespaces = params.source.list_namespaces(&mut params.max_depth).await?; > + > + check_namespace_depth_limit(¶ms.source.get_ns(), ¶ms.target.ns, &namespaces)?; > + > + namespaces.sort_unstable_by_key(|a| a.name_len()); > + > + // Fetch all accessible namespaces already present on the target > + let target_namespaces = fetch_target_namespaces(¶ms).await?; > + // Remember synced namespaces, removing non-synced ones when remove vanished flag is set > + let mut synced_namespaces = HashSet::with_capacity(namespaces.len()); > + > + let (mut groups, mut snapshots) = (0, 0); > + let mut stats = SyncStats::default(); > + for namespace in namespaces { > + let source_store_and_ns = print_store_and_ns(params.source.store.name(), &namespace); > + let target_namespace = params.map_to_target(&namespace)?; > + let target_store_and_ns = print_store_and_ns(params.target.repo.store(), &target_namespace); > + > + info!("----"); > + info!("Syncing {source_store_and_ns} into {target_store_and_ns}"); > + > + synced_namespaces.insert(target_namespace.clone()); > + > + match check_or_create_target_namespace(¶ms, &target_namespaces, &target_namespace).await > + { > + Ok(true) => info!("Created namespace {target_namespace}"), > + Ok(false) => {} > + Err(err) => { > + info!("Cannot sync {source_store_and_ns} into {target_store_and_ns} - {err}"); > + errors = true; > + continue; > + } > + } > + > + match push_namespace(&namespace, ¶ms).await { > + Ok((sync_progress, sync_stats, sync_errors)) => { > + errors |= sync_errors; > + stats.add(sync_stats); > + > + if params.max_depth != Some(0) { > + groups += sync_progress.done_groups; > + snapshots += sync_progress.done_snapshots; > + > + let ns = if namespace.is_root() { > + "root namespace".into() > + } else { > + format!("namespace {namespace}") > + }; > + info!( > + "Finished syncing {ns}, current progress: {groups} groups, {snapshots} snapshots" > + ); > + } > + } > + Err(err) => { > + errors = true; > + info!("Encountered errors while syncing namespace {namespace} - {err}"); > + } > + } > + } > + > + if params.remove_vanished { > + for target_namespace in target_namespaces { target_namespaces contains *all* target namespaces as they are called on the target side, not just those below our target "anchor".. this needs additional filtering, else you might remove entirely unrelated namespaces here.. > + if synced_namespaces.contains(&target_namespace) { > + continue; > + } > + match remove_target_namespace(¶ms, &target_namespace).await { see above w.r.t. remove_target_namespace, this is actually wrong with the current code (where remove_target_namespace does yet another mapping), but becomes right once the changes I suggested are incorporated.. > + Ok(delete_stats) => { > + stats.add(SyncStats::from(RemovedVanishedStats { > + snapshots: delete_stats.removed_snapshots(), > + groups: delete_stats.removed_groups(), > + namespaces: 1, > + })); > + if delete_stats.protected_snapshots() > 0 { > + warn!( > + "kept {protected_count} protected snapshots of namespace '{target_namespace}'", most output refers to the namespaces as they are called on the source side, should we keep this? > + protected_count = delete_stats.protected_snapshots(), > + ); > + continue; > + } > + } > + Err(err) => { > + warn!("failed to remove vanished namespace {target_namespace} - {err}"); same here.. > + continue; > + } > + } > + info!("removed vanished namespace {target_namespace}"); and here.. > + } > + } > + > + if errors { > + bail!("sync failed with some errors."); > + } > + > + Ok(stats) > +} > + > +/// Push namespace including all backup groups to target > +/// > +/// Iterate over all backup groups in the namespace and push them to the target. > +pub(crate) async fn push_namespace( > + namespace: &BackupNamespace, > + params: &PushParameters, > +) -> Result<(StoreProgress, SyncStats, bool), Error> { > + // Check if user is allowed to perform backups on remote datastore > + check_ns_remote_datastore_privs(params, namespace, PRIV_REMOTE_DATASTORE_BACKUP) this needs to be mapped.. > + .map_err(|err| format_err!("Pushing to remote not allowed - {err}"))?; > + > + let mut list: Vec = params > + .source > + .list_groups(namespace, ¶ms.local_user) > + .await?; > + > + list.sort_unstable_by(|a, b| { > + let type_order = a.ty.cmp(&b.ty); > + if type_order == Ordering::Equal { > + a.id.cmp(&b.id) > + } else { > + type_order > + } > + }); once more, this could just use BackupGroup's impl of Ord.. > + > + let total = list.len(); > + let list: Vec = list > + .into_iter() > + .filter(|group| group.apply_filters(¶ms.group_filter)) > + .collect(); > + > + info!( > + "found {filtered} groups to sync (out of {total} total)", > + filtered = list.len() > + ); > + > + let mut errors = false; > + // Remember synced groups, remove others when the remove vanished flag is set > + let mut synced_groups = HashSet::new(); > + let mut progress = StoreProgress::new(list.len() as u64); > + let mut stats = SyncStats::default(); > + > + let (owned_target_groups, not_owned_target_groups) = > + fetch_target_groups(params, namespace).await?; should use mapped namespace.. > + > + for (done, group) in list.into_iter().enumerate() { > + progress.done_groups = done as u64; > + progress.done_snapshots = 0; > + progress.group_snapshots = 0; > + > + if not_owned_target_groups.contains(&group) { > + warn!("group '{group}' not owned by remote user on target, skip"); should we include the remote user/authid here? you can only end up here if you have Remote.Audit on the remote, so you can already query that anyway.. > + continue; > + } > + synced_groups.insert(group.clone()); > + > + match push_group(params, namespace, &group, &mut progress).await { this one actually requires the source side namespace, so that's okay (and also why this whole fn can't be switched over to just take the target NS ;)) > + Ok(sync_stats) => stats.add(sync_stats), > + Err(err) => { > + warn!("sync group '{group}' failed - {err}"); > + errors = true; > + } > + } > + } > + > + if params.remove_vanished { > + // only ever allow to prune owned groups on target > + for target_group in owned_target_groups { > + if synced_groups.contains(&target_group) { > + continue; > + } > + if !target_group.apply_filters(¶ms.group_filter) { > + continue; > + } > + > + info!("delete vanished group '{target_group}'"); > + > + match remove_target_group(params, namespace, &target_group).await { this should use the mapped namespace again > + Ok(delete_stats) => { > + if delete_stats.protected_snapshots() > 0 { > + warn!( > + "kept {protected_count} protected snapshots of group '{target_group}'", > + protected_count = delete_stats.protected_snapshots(), > + ); > + } > + stats.add(SyncStats::from(RemovedVanishedStats { > + snapshots: delete_stats.removed_snapshots(), > + groups: delete_stats.removed_groups(), > + namespaces: 0, > + })); > + } > + Err(err) => { > + warn!("failed to delete vanished group - {err}"); > + errors = true; > + continue; > + } > + } > + } > + } > + > + Ok((progress, stats, errors)) > +} > + > +async fn fetch_target_snapshots( > + params: &PushParameters, > + namespace: &BackupNamespace, this should use the mapped/target_namespace > + group: &BackupGroup, > +) -> Result, Error> { > + let api_path = format!( > + "api2/json/admin/datastore/{store}/snapshots", > + store = params.target.repo.store(), > + ); > + let mut args = serde_json::to_value(group)?; > + if !namespace.is_root() { > + let target_ns = params.map_to_target(namespace)?; > + args["ns"] = serde_json::to_value(target_ns)?; > + } > + let mut result = params.target.client.get(&api_path, Some(args)).await?; > + let snapshots: Vec = serde_json::from_value(result["data"].take())?; > + > + Ok(snapshots) > +} > + > +async fn fetch_previous_backup_time( > + params: &PushParameters, > + namespace: &BackupNamespace, target_namespace as well.. > + group: &BackupGroup, > +) -> Result, Error> { > + let mut snapshots = fetch_target_snapshots(params, namespace, group).await?; > + snapshots.sort_unstable_by(|a, b| a.backup.time.cmp(&b.backup.time)); > + Ok(snapshots.last().map(|snapshot| snapshot.backup.time)) > +} > + > +async fn forget_target_snapshot( > + params: &PushParameters, > + namespace: &BackupNamespace, target_namespace as well.. > + snapshot: &BackupDir, > +) -> Result<(), Error> { > + check_ns_remote_datastore_privs(params, namespace, PRIV_REMOTE_DATASTORE_PRUNE) > + .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; > + > + let api_path = format!( > + "api2/json/admin/datastore/{store}/snapshots", > + store = params.target.repo.store(), > + ); > + let mut args = serde_json::to_value(snapshot)?; > + if !namespace.is_root() { > + let target_ns = params.map_to_target(namespace)?; > + args["ns"] = serde_json::to_value(target_ns)?; > + } > + params.target.client.delete(&api_path, Some(args)).await?; > + > + Ok(()) > +} > + > +/// Push group including all snaphshots to target > +/// > +/// Iterate over all snapshots in the group and push them to the target. > +/// The group sync operation consists of the following steps: > +/// - Query snapshots of given group from the source > +/// - Sort snapshots by time > +/// - Apply transfer last cutoff and filters to list > +/// - Iterate the snapshot list and push each snapshot individually > +/// - (Optional): Remove vanished groups if `remove_vanished` flag is set > +pub(crate) async fn push_group( > + params: &PushParameters, > + namespace: &BackupNamespace, > + group: &BackupGroup, > + progress: &mut StoreProgress, > +) -> Result { > + let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced); > + let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast); > + > + let mut snapshots: Vec = params.source.list_backup_dirs(namespace, group).await?; > + snapshots.sort_unstable_by(|a, b| a.time.cmp(&b.time)); > + > + let total_snapshots = snapshots.len(); > + let cutoff = params > + .transfer_last > + .map(|count| total_snapshots.saturating_sub(count)) > + .unwrap_or_default(); > + > + let last_snapshot_time = fetch_previous_backup_time(params, namespace, group) > + .await? > + .unwrap_or(i64::MIN); > + > + let mut source_snapshots = HashSet::new(); > + let snapshots: Vec = snapshots > + .into_iter() > + .enumerate() > + .filter(|&(pos, ref snapshot)| { > + source_snapshots.insert(snapshot.time); > + if last_snapshot_time > snapshot.time { > + already_synced_skip_info.update(snapshot.time); > + return false; > + } else if already_synced_skip_info.count > 0 { > + info!("{already_synced_skip_info}"); > + already_synced_skip_info.reset(); > + return true; didn't you just discover that this return here is wrong? ;) > + } > + > + if pos < cutoff && last_snapshot_time != snapshot.time { > + transfer_last_skip_info.update(snapshot.time); > + return false; does the last_snapshot_time vs snapshot.time check make sense for push? we can't overwrite the existing manifest in that case, all we could do would be uploading a log that is missing.. but we don't currently attempt that (continued below..) > + } else if transfer_last_skip_info.count > 0 { > + info!("{transfer_last_skip_info}"); > + transfer_last_skip_info.reset(); > + } > + true > + }) > + .map(|(_, dir)| dir) > + .collect(); > + > + progress.group_snapshots = snapshots.len() as u64; > + > + let target_snapshots = fetch_target_snapshots(params, namespace, group).await?; > + let target_snapshots: Vec = target_snapshots > + .into_iter() > + .map(|snapshot| snapshot.backup) > + .collect(); > + > + let mut stats = SyncStats::default(); > + let mut fetch_previous_manifest = !target_snapshots.is_empty(); > + for (pos, source_snapshot) in snapshots.into_iter().enumerate() { > + if target_snapshots.contains(&source_snapshot) { > + progress.done_snapshots = pos as u64 + 1; > + info!("percentage done: {progress}"); > + continue; because any existing snapshots are skipped here ;) > + } > + let result = > + push_snapshot(params, namespace, &source_snapshot, fetch_previous_manifest).await; > + fetch_previous_manifest = true; > + > + progress.done_snapshots = pos as u64 + 1; > + info!("percentage done: {progress}"); > + > + // stop on error > + let sync_stats = result?; > + stats.add(sync_stats); > + } > + > + if params.remove_vanished { > + let target_snapshots = fetch_target_snapshots(params, namespace, group).await?; should use target_namespace > + for snapshot in target_snapshots { > + if source_snapshots.contains(&snapshot.backup.time) { > + continue; > + } > + if snapshot.protected { > + info!( > + "don't delete vanished snapshot {name} (protected)", > + name = snapshot.backup > + ); > + continue; > + } > + if let Err(err) = forget_target_snapshot(params, namespace, &snapshot.backup).await { should use target_namespace > + info!( > + "could not delete vanished snapshot {name} - {err}", > + name = snapshot.backup > + ); > + } > + info!("delete vanished snapshot {name}", name = snapshot.backup); > + stats.add(SyncStats::from(RemovedVanishedStats { > + snapshots: 1, > + groups: 0, > + namespaces: 0, > + })); > + } > + } > + > + Ok(stats) > +} > + > +/// Push snapshot to target > +/// > +/// Creates a new snapshot on the target and pushes the content of the source snapshot to the > +/// target by creating a new manifest file and connecting to the remote as backup writer client. > +/// Chunks are written by recreating the index by uploading the chunk stream as read from the > +/// source. Data blobs are uploaded as such. > +pub(crate) async fn push_snapshot( > + params: &PushParameters, > + namespace: &BackupNamespace, > + snapshot: &BackupDir, > + fetch_previous_manifest: bool, > +) -> Result { > + let mut stats = SyncStats::default(); > + let target_ns = params.map_to_target(namespace)?; > + let backup_dir = params > + .source > + .store > + .backup_dir(namespace.clone(), snapshot.clone())?; > + > + // Reader locks the snapshot > + let reader = params.source.reader(namespace, snapshot).await?; > + > + // Does not lock the manifest, but the reader already assures a locked snapshot > + let source_manifest = match backup_dir.load_manifest() { > + Ok((manifest, _raw_size)) => manifest, > + Err(err) => { > + // No manifest in snapshot or failed to read, warn and skip > + log::warn!("failed to load manifest - {err}"); > + return Ok(stats); > + } > + }; > + > + // Manifest to be created on target, referencing all the source archives after upload. > + let mut manifest = BackupManifest::new(snapshot.clone()); > + > + // Writer instance locks the snapshot on the remote side > + let backup_writer = BackupWriter::start( > + ¶ms.target.client, > + None, > + params.target.repo.store(), > + &target_ns, > + snapshot, > + false, > + false, > + ) > + .await?; > + > + let mut previous_manifest = None; > + // Use manifest of previous snapshots in group on target for chunk upload deduplication > + if fetch_previous_manifest { > + match backup_writer.download_previous_manifest().await { > + Ok(manifest) => previous_manifest = Some(Arc::new(manifest)), > + Err(err) => log::info!("Could not download previous manifest - {err}"), > + } > + }; as discussed off-list, this is not enough - we also need to download each index for the previous snapshot using the backup writer to properly initialize their contents as known chunks - both on the client side, and on the server side. > + > + // Dummy upload options: the actual compression and/or encryption already happened while > + // the chunks were generated during creation of the backup snapshot, therefore pre-existing > + // chunks (already compressed and/or encrypted) can be pushed to the target. > + // Further, these steps are skipped in the backup writer upload stream. > + // > + // Therefore, these values do not need to fit the values given in the manifest. > + // The original manifest is uploaded in the end anyways. > + // > + // Compression is set to true so that the uploaded manifest will be compressed. > + // Encrypt is set to assure that above files are not encrypted. > + let upload_options = UploadOptions { > + compress: true, > + encrypt: false, > + previous_manifest, > + ..UploadOptions::default() > + }; > + > + // Avoid double upload penalty by remembering already seen chunks > + let known_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024 * 1024))); see above ;) > + > + for entry in source_manifest.files() { > + let mut path = backup_dir.full_path(); > + path.push(&entry.filename); > + if path.try_exists()? { > + match ArchiveType::from_path(&entry.filename)? { > + ArchiveType::Blob => { > + let file = std::fs::File::open(path.clone())?; > + let backup_stats = backup_writer.upload_blob(file, &entry.filename).await?; > + manifest.add_file( > + entry.filename.to_string(), > + backup_stats.size, > + backup_stats.csum, > + entry.chunk_crypt_mode(), > + )?; I think this > + stats.add(SyncStats { > + chunk_count: backup_stats.chunk_count as usize, > + bytes: backup_stats.size as usize, > + elapsed: backup_stats.duration, > + removed: None, > + }); > + } > + ArchiveType::DynamicIndex => { > + let index = DynamicIndexReader::open(&path)?; > + let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); > + let sync_stats = push_index( > + &entry.filename, > + index, > + chunk_reader, > + &backup_writer, > + &mut manifest, and this parameter > + entry.chunk_crypt_mode(), > + None, > + known_chunks.clone(), > + ) > + .await?; > + stats.add(sync_stats); > + } > + ArchiveType::FixedIndex => { > + let index = FixedIndexReader::open(&path)?; > + let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); > + let size = index.index_bytes(); > + let sync_stats = push_index( > + &entry.filename, > + index, > + chunk_reader, > + &backup_writer, > + &mut manifest, and this parameter can all be dropped.. because we end up uploading the source manifest anyway below? > + entry.chunk_crypt_mode(), > + Some(size), > + known_chunks.clone(), > + ) > + .await?; > + stats.add(sync_stats); > + } > + } > + } else { > + info!("{path:?} does not exist, skipped."); this should be a warning, or potentially even an error? if the source manifest references a file that doesn't exist, something is rather wrong? > + } > + } > + > + // Fetch client log from source and push to target > + // this has to be handled individually since the log is never part of the manifest > + let mut client_log_path = backup_dir.full_path(); > + client_log_path.push(CLIENT_LOG_BLOB_NAME); > + if client_log_path.is_file() { > + backup_writer > + .upload_blob_from_file( > + &client_log_path, > + CLIENT_LOG_BLOB_NAME, > + upload_options.clone(), > + ) > + .await?; > + } see comment a bit above w.r.t. handling of resyncing the last already existing snapshot.. > + //TODO: only add log line for conditions as described in feedback leftover? > + > + // Rewrite manifest for pushed snapshot, recreating manifest from source on target > + let manifest_json = serde_json::to_value(source_manifest)?; unsure: should we drop verification state and upload stats? this and not re-syncing notes and verification state comes up from time to time.. > + let manifest_string = serde_json::to_string_pretty(&manifest_json)?; > + let backup_stats = backup_writer > + .upload_blob_from_data( > + manifest_string.into_bytes(), > + MANIFEST_BLOB_NAME, > + upload_options, > + ) > + .await?; > + backup_writer.finish().await?; > + > + stats.add(SyncStats { > + chunk_count: backup_stats.chunk_count as usize, > + bytes: backup_stats.size as usize, > + elapsed: backup_stats.duration, > + removed: None, > + }); > + > + Ok(stats) > +} > + > +// Read fixed or dynamic index and push to target by uploading via the backup writer instance > +// > +// For fixed indexes, the size must be provided as given by the index reader. > +#[allow(clippy::too_many_arguments)] > +async fn push_index<'a>( > + filename: &'a str, > + index: impl IndexFile + Send + 'static, > + chunk_reader: Arc, > + backup_writer: &BackupWriter, > + manifest: &mut BackupManifest, only used to add the file below > + crypt_mode: CryptMode, > + size: Option, > + known_chunks: Arc>>, > +) -> Result { > + let (upload_channel_tx, upload_channel_rx) = mpsc::channel(20); > + let mut chunk_infos = > + stream::iter(0..index.index_count()).map(move |pos| index.chunk_info(pos).unwrap()); > + > + tokio::spawn(async move { > + while let Some(chunk_info) = chunk_infos.next().await { > + // Avoid reading known chunks, as they are not uploaded by the backup writer anyways > + let needs_upload = { > + // Need to limit the scope of the lock, otherwise the async block is not `Send` > + let mut known_chunks = known_chunks.lock().unwrap(); > + // Check if present and insert, chunk will be read and uploaded below if not present > + known_chunks.insert(chunk_info.digest) > + }; > + > + let merged_chunk_info = if needs_upload { > + chunk_reader > + .read_raw_chunk(&chunk_info.digest) > + .await > + .map(|chunk| { > + MergedChunkInfo::New(ChunkInfo { > + chunk, > + digest: chunk_info.digest, > + chunk_len: chunk_info.size(), > + offset: chunk_info.range.start, > + }) > + }) > + } else { > + Ok(MergedChunkInfo::Known(vec![( > + // Pass size instead of offset, will be replaced with offset by the backup > + // writer > + chunk_info.size(), > + chunk_info.digest, > + )])) > + }; > + let _ = upload_channel_tx.send(merged_chunk_info).await; > + } > + }); > + > + let merged_chunk_info_stream = ReceiverStream::new(upload_channel_rx).map_err(Error::from); > + > + let upload_options = UploadOptions { > + compress: true, > + encrypt: false, > + fixed_size: size, > + ..UploadOptions::default() > + }; > + > + let upload_stats = backup_writer > + .upload_index_chunk_info(filename, merged_chunk_info_stream, upload_options) > + .await?; > + > + manifest.add_file( > + filename.to_string(), > + upload_stats.size, > + upload_stats.csum, > + crypt_mode, > + )?; but this is then not used anywhere because we use the original manifest at the call site -> can be dropped.. > + > + Ok(SyncStats { > + chunk_count: upload_stats.chunk_count as usize, > + bytes: upload_stats.size as usize, > + elapsed: upload_stats.duration, > + removed: None, > + }) > +} > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Wed Nov 6 12:57:59 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Wed, 06 Nov 2024 12:57:59 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <20241031121519.434337-13-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> Message-ID: <173089427968.79072.3773251895934605531@yuna.proxmox.com> @Thomas: since there's a few questions below that have long-term implications, I'd appreciate feedback.. Quoting Christian Ebner (2024-10-31 13:15:02) > Add a dedicated api type for the `version` api endpoint and helper > methods for supported feature comparison. > This will be used to detect api incompatibility of older hosts, not > supporting some features. > > Use the new api type to refactor the version endpoint and set it as > return type. > > Signed-off-by: Christian Ebner > --- > changes since version 5: > - add `features` vector to store supported feature strings > - drop `min_version_check`, introduce `supported_feature` check > > pbs-api-types/src/lib.rs | 3 + > pbs-api-types/src/version.rs | 109 +++++++++++++++++++++++++++++++++++ > src/api2/version.rs | 42 ++++++++------ > 3 files changed, 137 insertions(+), 17 deletions(-) > create mode 100644 pbs-api-types/src/version.rs > > diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs > index 460c7da7c..6bae4a52b 100644 > --- a/pbs-api-types/src/lib.rs > +++ b/pbs-api-types/src/lib.rs > @@ -155,6 +155,9 @@ pub use zfs::*; > mod metrics; > pub use metrics::*; > > +mod version; > +pub use version::*; > + > const_regex! { > // just a rough check - dummy acceptor is used before persisting > pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$"; > diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs > new file mode 100644 > index 000000000..c7c91a53a > --- /dev/null > +++ b/pbs-api-types/src/version.rs > @@ -0,0 +1,109 @@ > +//! Defines the types for the api version info endpoint > +use std::convert::TryFrom; > + > +use anyhow::Context; > + > +use proxmox_schema::api; > + > +#[api( > + description: "Api version information", > + properties: { > + "version": { > + description: "Version 'major.minor'", > + type: String, > + }, > + "release": { > + description: "Version release", > + type: String, > + }, > + "repoid": { > + description: "Version repository id", > + type: String, > + }, > + "features": { > + description: "List of supported features", > + type: Array, > + items: { > + type: String, > + description: "Feature id", > + }, > + }, > + } > +)] > +#[derive(serde::Deserialize, serde::Serialize)] > +pub struct ApiVersionInfo { > + pub version: String, > + pub release: String, > + pub repoid: String, > + #[serde(default, skip_serializing_if = "Vec::is_empty")] > + pub features: Vec, > +} > + > +pub type ApiVersionMajor = u64; > +pub type ApiVersionMinor = u64; > +pub type ApiVersionRelease = u64; > + > +#[allow(dead_code)] > +pub struct ApiVersion { > + major: ApiVersionMajor, > + minor: ApiVersionMinor, > + release: ApiVersionRelease, > + features: Vec, > +} nit: if the fields were pub, this wouldn't be dead code and the new below could be dropped.. but, I am not sure if we even need this now, we could also just implement helpers on ApiVersionInfo that give us the major, minor, release versions as u64? especially if we do "does the server support XX" via explicit named features, and don't even have a use case (yet) for accessing the version parts? the big question here is - do we want to expose this kind of thing? so far, we've used the approach of making things opt-in or backwards compatible, or failing hard if a newer client tries to use a feature that is not supported by an older server (e.g., if a client tries to use namespaces with a server that doesn't support them, it will just error out on whichever request it makes). there are two ways to handle explicit versioning between client and server: 1.) client retrieves the version, and has a list of "feature A is supported since version X.Y.Z" 2.) client retrieves a list of supported features from the server (this patch (series)) variant 1 has the advantage that we don't have to keep an ever-growing list of features around (or worry about "naming" and organizing them). variant 2 has the advantage that the server can explicitly tell what it supports without needing the client to adapt its version <-> feature mapping (i.e., if we or somebody else backports a feature). it also has the advantage that there is no risk of the version mapping being wrong (e.g., because there was unexpected delay in applying a patch series, or somebody made a mistake in the contained version number). variant 1 was what I actually had in mind when I originally proposed this, but I do like variant 2 as well! > +impl TryFrom for ApiVersion { > + type Error = anyhow::Error; > + > + fn try_from(value: ApiVersionInfo) -> Result { > + let mut parts = value.version.split('.'); > + let major: ApiVersionMajor = if let Some(val) = parts.next() { > + val.parse() > + .with_context(|| "failed to parse major version")? > + } else { > + ApiVersionMajor::default() > + }; > + let minor: ApiVersionMinor = if let Some(val) = parts.next() { > + val.parse() > + .with_context(|| "failed to parse minor version")? > + } else { > + ApiVersionMinor::default() > + }; > + > + let release: ApiVersionMinor = value > + .release > + .parse() > + .with_context(|| "failed to parse release version")?; > + > + Ok(Self { > + major, > + minor, > + release, > + features: value.features.to_vec(), > + }) > + } > +} > + > +impl ApiVersion { > + pub fn new( > + major: ApiVersionMajor, > + minor: ApiVersionMinor, > + release: ApiVersionRelease, > + features: Vec, > + ) -> Self { > + Self { > + major, > + minor, > + release, > + features, > + } > + } > + > + pub fn supports_feature(&self, feature: &str) -> bool { this is just ver.features.iter().any(|f| f == feature) which isn't really that longer than ver.supports_feature(feature) that being said, if we expect to do more complicated things here in the feature, an explicit helper might be nice anyway.. but then the body can just be that single line for now ;) > + for supported_feature in &self.features { > + if *supported_feature == feature { > + return true; > + } > + } > + false > + } > +} > diff --git a/src/api2/version.rs b/src/api2/version.rs > index 0e91688b5..a6cec5216 100644 > --- a/src/api2/version.rs > +++ b/src/api2/version.rs > @@ -1,27 +1,35 @@ > //! Version information > > use anyhow::Error; > -use serde_json::{json, Value}; > +use serde_json::Value; > > -use proxmox_router::{ApiHandler, ApiMethod, Permission, Router, RpcEnvironment}; > -use proxmox_schema::ObjectSchema; > +use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; > +use proxmox_schema::api; > > -fn get_version( > +use pbs_api_types::ApiVersionInfo; > + > +const FEATURES: &'static [&'static str] = &[]; > + > +#[api( > + returns: { > + type: ApiVersionInfo, > + }, > + access: { > + permission: &Permission::Anybody, > + } > +)] > +///Proxmox Backup Server API version. > +fn version( > _param: Value, > _info: &ApiMethod, > _rpcenv: &mut dyn RpcEnvironment, > -) -> Result { > - Ok(json!({ > - "version": pbs_buildcfg::PROXMOX_PKG_VERSION, > - "release": pbs_buildcfg::PROXMOX_PKG_RELEASE, > - "repoid": pbs_buildcfg::PROXMOX_PKG_REPOID > - })) > +) -> Result { > + Ok(ApiVersionInfo { > + version: pbs_buildcfg::PROXMOX_PKG_VERSION.to_string(), > + release: pbs_buildcfg::PROXMOX_PKG_RELEASE.to_string(), > + repoid: pbs_buildcfg::PROXMOX_PKG_REPOID.to_string(), > + features: FEATURES.iter().map(|feature| feature.to_string()).collect(), > + }) > } > > -pub const ROUTER: Router = Router::new().get( > - &ApiMethod::new( > - &ApiHandler::Sync(&get_version), > - &ObjectSchema::new("Proxmox Backup Server API version.", &[]), > - ) > - .access(None, &Permission::Anybody), > -); > +pub const ROUTER: Router = Router::new().get(&API_METHOD_VERSION); > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Wed Nov 6 12:58:12 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Wed, 06 Nov 2024 12:58:12 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 08/29] api types: define remote permissions and roles for push sync In-Reply-To: <20241031121519.434337-9-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-9-c.ebner@proxmox.com> Message-ID: <173089429298.79072.5443597893467760141@yuna.proxmox.com> Quoting Christian Ebner (2024-10-31 13:14:58) > Adding the privileges to allow backup, namespace creation and prune > on remote targets, to be used for sync jobs in push direction. > > Also adds dedicated roles setting the required privileges. > > Signed-off-by: Christian Ebner > --- > changes since version 5: > - dedicated remote datastore modify and remote datastore prune roles > - remove local datastore read access for sync job push operator > > pbs-api-types/src/acl.rs | 31 +++++++++++++++++++++++++++++++ > 1 file changed, 31 insertions(+) > > diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs > index a8ae57a9d..68ed59105 100644 > --- a/pbs-api-types/src/acl.rs > +++ b/pbs-api-types/src/acl.rs > @@ -58,6 +58,12 @@ constnamedbitmap! { > PRIV_REMOTE_MODIFY("Remote.Modify"); > /// Remote.Read allows reading data from a configured `Remote` > PRIV_REMOTE_READ("Remote.Read"); > + /// Remote.DatastoreBackup allows creating new snapshots on remote datastores > + PRIV_REMOTE_DATASTORE_BACKUP("Remote.DatastoreBackup"); > + /// Remote.DatastoreModify allows to modify remote datastores > + PRIV_REMOTE_DATASTORE_MODIFY("Remote.DatastoreModify"); > + /// Remote.DatastorePrune allows deleting snapshots on remote datastores > + PRIV_REMOTE_DATASTORE_PRUNE("Remote.DatastorePrune"); > > /// Sys.Console allows access to the system's console > PRIV_SYS_CONSOLE("Sys.Console"); > @@ -160,6 +166,25 @@ pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0 > | PRIV_REMOTE_AUDIT > | PRIV_REMOTE_READ; > > +#[rustfmt::skip] > +#[allow(clippy::identity_op)] > +/// Remote.SyncPushOperator can do read and push snapshots to the remote. > +pub const ROLE_REMOTE_SYNC_PUSH_OPERATOR: u64 = 0 > + | PRIV_REMOTE_AUDIT > + | PRIV_REMOTE_DATASTORE_BACKUP; > + > +#[rustfmt::skip] > +#[allow(clippy::identity_op)] > +/// Remote.DatastorePrune can prune owned snapshots and groups. > +pub const ROLE_REMOTE_DATASTORE_PRUNE: u64 = 0 > + | PRIV_REMOTE_DATASTORE_PRUNE; > + > +#[rustfmt::skip] > +#[allow(clippy::identity_op)] > +/// Remote.DatastoreModify can create and remove namespaces on the remote. > +pub const ROLE_REMOTE_DATASTORE_MODIFY: u64 = 0 > + | PRIV_REMOTE_DATASTORE_MODIFY; > + > #[rustfmt::skip] > #[allow(clippy::identity_op)] > /// Tape.Audit can audit the tape backup configuration and media content > @@ -225,6 +250,12 @@ pub enum Role { > RemoteAdmin = ROLE_REMOTE_ADMIN, > /// Synchronization Operator > RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR, > + /// Synchronisation Operator (push direction) > + RemoteSyncPushOperator = ROLE_REMOTE_SYNC_PUSH_OPERATOR, > + /// Remote Datastore Prune > + RemoteDatastorePrune = ROLE_REMOTE_DATASTORE_PRUNE, should we name this role "RemoteDatastorePowerUser", to match the local role? > + /// Remote Datastore Modify > + RemoteDatastoreModify = ROLE_REMOTE_DATASTORE_MODIFY, and this would then become RemoteDatastoreAdmin ? although that is not a 1:1 match, since DatastoreAdmin also includes verify, read, .. naming the roles just after the privs might bite us down the line, if we ever extend/split the privs.. > /// Tape Auditor > TapeAudit = ROLE_TAPE_AUDIT, > /// Tape Administrator > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Wed Nov 6 16:10:39 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Wed, 06 Nov 2024 16:10:39 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 15/29] api: push: implement endpoint for sync in push direction In-Reply-To: <20241031121519.434337-16-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-16-c.ebner@proxmox.com> Message-ID: <173090583969.79072.15737271044931374423@yuna.proxmox.com> Quoting Christian Ebner (2024-10-31 13:15:05) > Expose the sync job in push direction via a dedicated API endpoint, > analogous to the pull direction. > > Signed-off-by: Christian Ebner > --- > changes since version 5: > - Avoid double deserialization for backup namespaces > - Drop TryFrom<&SyncJobConfig> for PushParameters impl, as constructing > them requires an api call to fetch the remote api version now > > src/api2/mod.rs | 2 + > src/api2/push.rs | 183 +++++++++++++++++++++++++++++++++++++++++++++++ > 2 files changed, 185 insertions(+) > create mode 100644 src/api2/push.rs > > diff --git a/src/api2/mod.rs b/src/api2/mod.rs > index a83e4c205..03596326b 100644 > --- a/src/api2/mod.rs > +++ b/src/api2/mod.rs > @@ -12,6 +12,7 @@ pub mod helpers; > pub mod node; > pub mod ping; > pub mod pull; > +pub mod push; > pub mod reader; > pub mod status; > pub mod tape; > @@ -29,6 +30,7 @@ const SUBDIRS: SubdirMap = &sorted!([ > ("nodes", &node::ROUTER), > ("ping", &ping::ROUTER), > ("pull", &pull::ROUTER), > + ("push", &push::ROUTER), > ("reader", &reader::ROUTER), > ("status", &status::ROUTER), > ("tape", &tape::ROUTER), > diff --git a/src/api2/push.rs b/src/api2/push.rs > new file mode 100644 > index 000000000..28f4417d1 > --- /dev/null > +++ b/src/api2/push.rs > @@ -0,0 +1,183 @@ > +use anyhow::{format_err, Error}; > +use futures::{future::FutureExt, select}; > +use tracing::info; > + > +use pbs_api_types::{ > + Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA, > + GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_READ, > + PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, REMOTE_ID_SCHEMA, > + REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, > +}; > +use proxmox_rest_server::WorkerTask; > +use proxmox_router::{Permission, Router, RpcEnvironment}; > +use proxmox_schema::api; > + > +use pbs_config::CachedUserInfo; > + > +use crate::server::push::{push_store, PushParameters}; > + > +/// Check if the provided user is allowed to read from the local source and act on the remote > +/// target for pushing content > +pub fn check_push_privs( not used anywhere except here, could be private? > + auth_id: &Authid, > + store: &str, > + namespace: &BackupNamespace, > + remote: &str, > + remote_store: &str, > + remote_ns: Option<&BackupNamespace>, since we don't actually need to support not setting the root namespace, the Option here can go away.. > + delete: bool, > +) -> Result<(), Error> { > + let user_info = CachedUserInfo::new()?; > + > + let target_acl_path = match remote_ns { > + Some(ns) => ns.remote_acl_path(remote, remote_store), > + None => vec!["remote", remote, remote_store], > + }; which makes this simpler > + > + // Check user is allowed to backup to remote/// > + user_info.check_privs( > + auth_id, > + &target_acl_path, > + PRIV_REMOTE_DATASTORE_BACKUP, > + false, > + )?; > + > + if delete { > + // Check user is allowed to prune remote datastore > + user_info.check_privs( > + auth_id, > + &target_acl_path, > + PRIV_REMOTE_DATASTORE_PRUNE, > + false, > + )?; > + } > + > + // Check user is allowed to read source datastore > + user_info.check_privs( > + auth_id, > + &namespace.acl_path(store), > + PRIV_DATASTORE_READ, isn't this too restrictive? should be PRIV_DATASTORE_BACKUP *or* READ? the push task will then filter the local namespaces/backup groups/.. by what the user is allowed to see.. > + false, > + )?; > + > + Ok(()) > +} > + > +#[api( > + input: { > + properties: { > + store: { > + schema: DATASTORE_SCHEMA, > + }, > + ns: { > + type: BackupNamespace, > + optional: true, > + }, > + remote: { > + schema: REMOTE_ID_SCHEMA, > + }, > + "remote-store": { > + schema: DATASTORE_SCHEMA, > + }, > + "remote-ns": { > + type: BackupNamespace, > + optional: true, > + }, > + "remove-vanished": { > + schema: REMOVE_VANISHED_BACKUPS_SCHEMA, > + optional: true, > + }, > + "max-depth": { > + schema: NS_MAX_DEPTH_REDUCED_SCHEMA, > + optional: true, > + }, > + "group-filter": { > + schema: GROUP_FILTER_LIST_SCHEMA, > + optional: true, > + }, > + limit: { > + type: RateLimitConfig, > + flatten: true, > + }, > + "transfer-last": { > + schema: TRANSFER_LAST_SCHEMA, > + optional: true, > + }, > + }, > + }, > + access: { > + description: r###"The user needs Remote.Backup privilege on '/remote/{remote}/{remote-store}' > +and needs to own the backup group. Datastore.Read is required on '/datastore/{store}'. > +The delete flag additionally requires the Remote.Prune privilege on '/remote/{remote}/{remote-store}'. this is partly wrong and/or weirdly phrased ;) maybe something like The user needs (at least) Remote.DatastoreBackup on '/remote/{remote}/{remote-store}[/{remote-ns}]', and either Datastore.Backup or Datastore.Read on '/datastore/{store}[/{ns}]'. The 'remove-vanished' parameter might require additional privileges. > +"###, > + permission: &Permission::Anybody, > + }, > +)] > +/// Push store to other repository > +#[allow(clippy::too_many_arguments)] > +async fn push( > + store: String, > + ns: Option, > + remote: String, > + remote_store: String, > + remote_ns: Option, > + remove_vanished: Option, > + max_depth: Option, > + group_filter: Option>, > + limit: RateLimitConfig, > + transfer_last: Option, > + rpcenv: &mut dyn RpcEnvironment, > +) -> Result { > + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > + let delete = remove_vanished.unwrap_or(false); > + let ns = ns.unwrap_or_default(); this could also be done for remote_ns > + > + check_push_privs( > + &auth_id, > + &store, > + &ns, > + &remote, > + &remote_store, > + remote_ns.as_ref(), > + delete, > + )?; > + > + let push_params = PushParameters::new( > + &store, > + ns, > + &remote, > + &remote_store, > + remote_ns.unwrap_or_default(), since we unwrap it here anyway ;) > + auth_id.clone(), > + remove_vanished, > + max_depth, > + group_filter, > + limit, > + transfer_last, > + ) > + .await?; > + > + let upid_str = WorkerTask::spawn( > + "sync", > + Some(store.clone()), > + auth_id.to_string(), > + true, > + move |worker| async move { > + info!("push datastore '{store}' to '{remote}/{remote_store}'"); this is a bit redundant (and incomplete), the push output will contain this correctly extended with namespace information.. > + > + let push_future = push_store(push_params); > + (select! { > + success = push_future.fuse() => success, > + abort = worker.abort_future().map(|_| Err(format_err!("push aborted"))) => abort, > + })?; > + > + info!("push datastore '{store}' end"); same here > + > + Ok(()) > + }, > + )?; > + > + Ok(upid_str) > +} > + > +pub const ROUTER: Router = Router::new().post(&API_METHOD_PUSH); > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Wed Nov 6 16:20:35 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Wed, 06 Nov 2024 16:20:35 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 19/29] api: sync jobs: expose optional `sync-direction` parameter In-Reply-To: <20241031121519.434337-20-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-20-c.ebner@proxmox.com> Message-ID: <173090643519.79072.2923413753129715762@yuna.proxmox.com> Quoting Christian Ebner (2024-10-31 13:15:09) > Exposes and switch the config type for sync job operations based > on the `sync-direction` parameter, exposed on required api endpoints. > > If not set, the default config type is `sync` and the default sync > direction is `pull` for full backwards compatibility. Whenever > possible, deterimne the sync direction and config type from the sync typo "determine" > job config directly rather than requiring it as optional api > parameter. > > Further, extend read and modify access checks by sync direction to > conditionally check for the required permissions in pull and push > direction. > > Signed-off-by: Christian Ebner > --- > changes since version 5: > - Squashed permission check patches into this one, as they make not much > sense without this > - Only expose optional sync-direction parameter for api endpoints which > require them, use the job config to determine sync-direction and/or > config-type otherwise. > > src/api2/admin/sync.rs | 34 ++-- > src/api2/config/datastore.rs | 11 +- > src/api2/config/notifications/mod.rs | 19 +- > src/api2/config/sync.rs | 280 ++++++++++++++++++++------- > src/bin/proxmox-backup-proxy.rs | 11 +- > 5 files changed, 261 insertions(+), 94 deletions(-) > > diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs > index be324564c..8a242b1c3 100644 > --- a/src/api2/admin/sync.rs > +++ b/src/api2/admin/sync.rs > @@ -1,6 +1,7 @@ > //! Datastore Synchronization Job Management > > use anyhow::{bail, format_err, Error}; > +use serde::Deserialize; > use serde_json::Value; > > use proxmox_router::{ > @@ -29,6 +30,10 @@ use crate::{ > schema: DATASTORE_SCHEMA, > optional: true, > }, > + "sync-direction": { > + type: SyncDirection, > + optional: true, > + }, > }, > }, > returns: { > @@ -44,6 +49,7 @@ use crate::{ > /// List all sync jobs > pub fn list_sync_jobs( > store: Option, > + sync_direction: Option, > _param: Value, > rpcenv: &mut dyn RpcEnvironment, > ) -> Result, Error> { > @@ -52,8 +58,9 @@ pub fn list_sync_jobs( > > let (config, digest) = sync::config()?; > > + let sync_direction = sync_direction.unwrap_or_default(); > let job_config_iter = config > - .convert_to_typed_array("sync")? > + .convert_to_typed_array(sync_direction.as_config_type_str())? > .into_iter() > .filter(|job: &SyncJobConfig| { > if let Some(store) = &store { > @@ -62,7 +69,9 @@ pub fn list_sync_jobs( > true > } > }) > - .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job)); > + .filter(|job: &SyncJobConfig| { > + check_sync_job_read_access(&user_info, &auth_id, job, sync_direction) > + }); > > let mut list = Vec::new(); > > @@ -106,24 +115,23 @@ pub fn run_sync_job( > let user_info = CachedUserInfo::new()?; > > let (config, _digest) = sync::config()?; > - let sync_job: SyncJobConfig = config.lookup("sync", &id)?; > + let (config_type, config_section) = config > + .sections > + .get(&id) > + .ok_or_else(|| format_err!("No sync job with id '{id}' found in config"))?; > + > + let sync_direction = SyncDirection::from_config_type_str(config_type)?; > + let sync_job = SyncJobConfig::deserialize(config_section)?; > > - if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) { > - bail!("permission check failed"); > + if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job, sync_direction) { > + bail!("permission check failed, '{auth_id}' is missing access"); > } > > let job = Job::new("syncjob", &id)?; > > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > > - let upid_str = do_sync_job( > - job, > - sync_job, > - &auth_id, > - None, > - SyncDirection::Pull, > - to_stdout, > - )?; > + let upid_str = do_sync_job(job, sync_job, &auth_id, None, sync_direction, to_stdout)?; > > Ok(upid_str) > } > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index ca6edf05a..c151eda10 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -13,8 +13,9 @@ use proxmox_uuid::Uuid; > > use pbs_api_types::{ > Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions, > - MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, > - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, > + MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA, > + PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, > + PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, > }; > use pbs_config::BackupLockGuard; > use pbs_datastore::chunk_store::ChunkStore; > @@ -498,8 +499,10 @@ pub async fn delete_datastore( > for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { > delete_verification_job(job.config.id, None, rpcenv)? > } > - for job in list_sync_jobs(Some(name.clone()), Value::Null, rpcenv)? { > - delete_sync_job(job.config.id, None, rpcenv)? > + for direction in [SyncDirection::Pull, SyncDirection::Push] { > + for job in list_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? { > + delete_sync_job(job.config.id, None, rpcenv)? > + } > } > for job in list_prune_jobs(Some(name.clone()), Value::Null, rpcenv)? { > delete_prune_job(job.config.id, None, rpcenv)? > diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs > index dfe82ed03..31c4851c1 100644 > --- a/src/api2/config/notifications/mod.rs > +++ b/src/api2/config/notifications/mod.rs > @@ -9,7 +9,7 @@ use proxmox_schema::api; > use proxmox_sortable_macro::sortable; > > use crate::api2::admin::datastore::get_datastore_list; > -use pbs_api_types::PRIV_SYS_AUDIT; > +use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT}; > > use crate::api2::admin::prune::list_prune_jobs; > use crate::api2::admin::sync::list_sync_jobs; > @@ -154,13 +154,15 @@ pub fn get_values( > }); > } > > - let sync_jobs = list_sync_jobs(None, param.clone(), rpcenv)?; > - for job in sync_jobs { > - values.push(MatchableValue { > - field: "job-id".into(), > - value: job.config.id, > - comment: job.config.comment, > - }); > + for direction in [SyncDirection::Pull, SyncDirection::Push] { > + let sync_jobs = list_sync_jobs(None, Some(direction), param.clone(), rpcenv)?; > + for job in sync_jobs { > + values.push(MatchableValue { > + field: "job-id".into(), > + value: job.config.id, > + comment: job.config.comment, > + }); > + } > } > > let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?; > @@ -184,6 +186,7 @@ pub fn get_values( > "package-updates", > "prune", > "sync", > + "sync-push", > "system-mail", > "tape-backup", > "tape-load", > diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs > index 3963049e9..2f32aaccb 100644 > --- a/src/api2/config/sync.rs > +++ b/src/api2/config/sync.rs > @@ -1,6 +1,7 @@ > use ::serde::{Deserialize, Serialize}; > use anyhow::{bail, Error}; > use hex::FromHex; > +use pbs_api_types::SyncDirection; > use serde_json::Value; > > use proxmox_router::{http_bail, Permission, Router, RpcEnvironment}; > @@ -8,8 +9,9 @@ use proxmox_schema::{api, param_bail}; > > use pbs_api_types::{ > Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT, > - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT, > - PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, > + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, > + PRIV_REMOTE_AUDIT, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, > + PRIV_REMOTE_DATASTORE_PRUNE, PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, > }; > use pbs_config::sync; > > @@ -20,18 +22,35 @@ pub fn check_sync_job_read_access( > user_info: &CachedUserInfo, > auth_id: &Authid, > job: &SyncJobConfig, > + sync_direction: SyncDirection, > ) -> bool { > + // check for audit access on datastore/namespace, applies for pull and push direction > let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); > if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { > return false; > } > > - if let Some(remote) = &job.remote { > - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); > - remote_privs & PRIV_REMOTE_AUDIT != 0 > - } else { > - let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); > - source_ds_privs & PRIV_DATASTORE_AUDIT != 0 > + match sync_direction { > + SyncDirection::Pull => { > + if let Some(remote) = &job.remote { > + let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); > + remote_privs & PRIV_REMOTE_AUDIT != 0 > + } else { > + let source_ds_privs = > + user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); > + source_ds_privs & PRIV_DATASTORE_AUDIT != 0 > + } > + } > + SyncDirection::Push => { > + // check for audit access on remote/datastore/namespace > + if let Some(target_acl_path) = job.remote_acl_path() { > + let remote_privs = user_info.lookup_privs(auth_id, &target_acl_path); > + remote_privs & PRIV_REMOTE_AUDIT != 0 the other two checks above check the source side, this checks the the target side.. should we check both here? > + } else { > + // Remote must always be present for sync in push direction, fail otherwise > + false > + } > + } > } > } > > @@ -43,41 +62,93 @@ fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool { > } > } > > -/// checks whether user can run the corresponding pull job > +/// checks whether user can run the corresponding sync job, depending on sync direction > /// > -/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly. > +/// namespace creation/deletion ACL and backup group ownership checks happen in the pull/push code > +/// directly. > /// remote side checks/filters remote datastore/namespace/group access. > pub fn check_sync_job_modify_access( > user_info: &CachedUserInfo, > auth_id: &Authid, > job: &SyncJobConfig, > + sync_direction: SyncDirection, > ) -> bool { > - let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); > - if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { > - return false; > - } > + match sync_direction { > + SyncDirection::Pull => { > + let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); > + if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 > + || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 > + { > + return false; > + } > + > + if let Some(true) = job.remove_vanished { > + if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { > + return false; > + } > + } > > - if let Some(true) = job.remove_vanished { > - if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { > - return false; > + // same permission as changing ownership after syncing > + if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { > + return false; > + } > + > + if let Some(remote) = &job.remote { > + let remote_privs = > + user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); > + return remote_privs & PRIV_REMOTE_READ != 0; > + } > + true > } > - } > + SyncDirection::Push => { > + // Remote must always be present for sync in push direction, fail otherwise > + let target_privs = if let Some(target_acl_path) = job.remote_acl_path() { > + user_info.lookup_privs(auth_id, &target_acl_path) > + } else { > + return false; > + }; > + > + // check user is allowed to create backups on remote datastore > + if target_privs & PRIV_REMOTE_DATASTORE_BACKUP == 0 { > + return false; > + } > > - // same permission as changing ownership after syncing > - if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { > - return false; > - } > + if let Some(true) = job.remove_vanished { > + // check user is allowed to prune backup snapshots on remote datastore > + if target_privs & PRIV_REMOTE_DATASTORE_PRUNE == 0 { > + return false; > + } > + } > + > + // check user is not the owner of the sync job, but has remote datastore modify permissions > + if !is_correct_owner(auth_id, job) && target_privs & PRIV_REMOTE_DATASTORE_MODIFY == 0 { > + return false; > + } isn't this wrong? if I am modifying/running a sync job "owned" by somebody else, then I need to have Datastore.Read or Datastore.Modify on the *local* source datastore+namespace.. else I could use such a sync job to exfiltrate backups I wouldn't otherwise have access to.. > + > + // check user is allowed to read from (local) source datastore/namespace > + let source_privs = user_info.lookup_privs(auth_id, &job.acl_path()); > + if source_privs & PRIV_DATASTORE_AUDIT == 0 { > + return false; > + } > > - if let Some(remote) = &job.remote { > - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); > - return remote_privs & PRIV_REMOTE_READ != 0; > + // check for either datastore read or datastore backup access > + // (the later implying read access for owned snapshot groups) > + if source_privs & PRIV_DATASTORE_READ != 0 { > + return true; > + } > + source_privs & PRIV_DATASTORE_BACKUP != 0 > + } > } > - true > } > > #[api( > input: { > - properties: {}, > + properties: { > + "sync-direction": { > + type: SyncDirection, > + optional: true, > + }, > + }, > }, > returns: { > description: "List configured jobs.", > @@ -92,6 +163,7 @@ pub fn check_sync_job_modify_access( > /// List all sync jobs > pub fn list_sync_jobs( > _param: Value, > + sync_direction: Option, > rpcenv: &mut dyn RpcEnvironment, > ) -> Result, Error> { > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > @@ -99,13 +171,16 @@ pub fn list_sync_jobs( > > let (config, digest) = sync::config()?; > > - let list = config.convert_to_typed_array("sync")?; > + let sync_direction = sync_direction.unwrap_or_default(); > + let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?; > > rpcenv["digest"] = hex::encode(digest).into(); > > let list = list > .into_iter() > - .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job)) > + .filter(|sync_job| { > + check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction) > + }) > .collect(); > Ok(list) > } > @@ -118,6 +193,10 @@ pub fn list_sync_jobs( > type: SyncJobConfig, > flatten: true, > }, > + "sync-direction": { > + type: SyncDirection, > + optional: true, > + }, > }, > }, > access: { > @@ -128,14 +207,16 @@ pub fn list_sync_jobs( > /// Create a new sync job. > pub fn create_sync_job( > config: SyncJobConfig, > + sync_direction: Option, > rpcenv: &mut dyn RpcEnvironment, > ) -> Result<(), Error> { > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let user_info = CachedUserInfo::new()?; > + let sync_direction = sync_direction.unwrap_or_default(); > > let _lock = sync::lock_config()?; > > - if !check_sync_job_modify_access(&user_info, &auth_id, &config) { > + if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) { > bail!("permission check failed"); > } > > @@ -158,7 +239,7 @@ pub fn create_sync_job( > param_bail!("id", "job '{}' already exists.", config.id); > } > > - section_config.set_data(&config.id, "sync", &config)?; > + section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?; > > sync::save_config(§ion_config)?; > > @@ -188,8 +269,17 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result > let (config, digest) = sync::config()?; > > - let sync_job = config.lookup("sync", &id)?; > - if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) { > + let (sync_job, sync_direction) = > + if let Some((config_type, config_section)) = config.sections.get(&id) { > + ( > + SyncJobConfig::deserialize(config_section)?, > + SyncDirection::from_config_type_str(config_type)?, > + ) > + } else { > + http_bail!(NOT_FOUND, "job '{id}' does not exist.") > + }; > + > + if !check_sync_job_read_access(&user_info, &auth_id, &sync_job, sync_direction) { > bail!("permission check failed"); > } > > @@ -284,7 +374,15 @@ pub fn update_sync_job( > crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; > } > > - let mut data: SyncJobConfig = config.lookup("sync", &id)?; > + let (mut data, sync_direction) = > + if let Some((config_type, config_section)) = config.sections.get(&id) { > + ( > + SyncJobConfig::deserialize(config_section)?, > + SyncDirection::from_config_type_str(config_type)?, > + ) > + } else { > + http_bail!(NOT_FOUND, "job '{id}' does not exist.") > + }; > > if let Some(delete) = delete { > for delete_prop in delete { > @@ -405,11 +503,11 @@ pub fn update_sync_job( > } > } > > - if !check_sync_job_modify_access(&user_info, &auth_id, &data) { > + if !check_sync_job_modify_access(&user_info, &auth_id, &data, sync_direction) { > bail!("permission check failed"); > } > > - config.set_data(&id, "sync", &data)?; > + config.set_data(&id, sync_direction.as_config_type_str(), &data)?; > > sync::save_config(&config)?; > > @@ -456,17 +554,16 @@ pub fn delete_sync_job( > crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; > } > > - match config.lookup("sync", &id) { > - Ok(job) => { > - if !check_sync_job_modify_access(&user_info, &auth_id, &job) { > - bail!("permission check failed"); > - } > - config.sections.remove(&id); > - } > - Err(_) => { > - http_bail!(NOT_FOUND, "job '{}' does not exist.", id) > + if let Some((config_type, config_section)) = config.sections.get(&id) { > + let sync_direction = SyncDirection::from_config_type_str(config_type)?; > + let job = SyncJobConfig::deserialize(config_section)?; > + if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) { > + bail!("permission check failed"); > } > - }; > + config.sections.remove(&id); > + } else { > + http_bail!(NOT_FOUND, "job '{}' does not exist.", id) > + } > > sync::save_config(&config)?; > > @@ -536,39 +633,67 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > }; > > // should work without ACLs > - assert!(check_sync_job_read_access(&user_info, root_auth_id, &job)); > - assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job)); > + assert!(check_sync_job_read_access( > + &user_info, > + root_auth_id, > + &job, > + SyncDirection::Pull, > + )); > + assert!(check_sync_job_modify_access( > + &user_info, > + root_auth_id, > + &job, > + SyncDirection::Pull, > + )); > > // user without permissions must fail > assert!(!check_sync_job_read_access( > &user_info, > &no_perm_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > assert!(!check_sync_job_modify_access( > &user_info, > &no_perm_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // reading without proper read permissions on either remote or local must fail > - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); > + assert!(!check_sync_job_read_access( > + &user_info, > + &read_auth_id, > + &job, > + SyncDirection::Pull, > + )); > > // reading without proper read permissions on local end must fail > job.remote = Some("remote1".to_string()); > - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); > + assert!(!check_sync_job_read_access( > + &user_info, > + &read_auth_id, > + &job, > + SyncDirection::Pull, > + )); > > // reading without proper read permissions on remote end must fail > job.remote = Some("remote0".to_string()); > job.store = "localstore1".to_string(); > - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); > + assert!(!check_sync_job_read_access( > + &user_info, > + &read_auth_id, > + &job, > + SyncDirection::Pull, > + )); > > // writing without proper write permissions on either end must fail > job.store = "localstore0".to_string(); > assert!(!check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // writing without proper write permissions on local end must fail > @@ -580,39 +705,54 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > assert!(!check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // reset remote to one where users have access > job.remote = Some("remote1".to_string()); > > // user with read permission can only read, but not modify/run > - assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job)); > + assert!(check_sync_job_read_access( > + &user_info, > + &read_auth_id, > + &job, > + SyncDirection::Pull, > + )); > job.owner = Some(read_auth_id.clone()); > assert!(!check_sync_job_modify_access( > &user_info, > &read_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > job.owner = None; > assert!(!check_sync_job_modify_access( > &user_info, > &read_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > job.owner = Some(write_auth_id.clone()); > assert!(!check_sync_job_modify_access( > &user_info, > &read_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // user with simple write permission can modify/run > - assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job)); > + assert!(check_sync_job_read_access( > + &user_info, > + &write_auth_id, > + &job, > + SyncDirection::Pull, > + )); > assert!(check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // but can't modify/run with deletion > @@ -620,7 +760,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > assert!(!check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // unless they have Datastore.Prune as well > @@ -628,7 +769,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > assert!(check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // changing owner is not possible > @@ -636,7 +778,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > assert!(!check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // also not to the default 'root at pam' > @@ -644,7 +787,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > assert!(!check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > // unless they have Datastore.Modify as well > @@ -653,13 +797,15 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > assert!(check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > job.owner = None; > assert!(check_sync_job_modify_access( > &user_info, > &write_auth_id, > - &job > + &job, > + SyncDirection::Pull, > )); > > Ok(()) > diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs > index 6f19a3fbd..70283510d 100644 > --- a/src/bin/proxmox-backup-proxy.rs > +++ b/src/bin/proxmox-backup-proxy.rs > @@ -589,7 +589,14 @@ async fn schedule_datastore_sync_jobs() { > Ok((config, _digest)) => config, > }; > > - for (job_id, (_, job_config)) in config.sections { > + for (job_id, (job_type, job_config)) in config.sections { > + let sync_direction = match SyncDirection::from_config_type_str(&job_type) { > + Ok(direction) => direction, > + Err(err) => { > + eprintln!("unexpected config type in sync job config - {err}"); > + continue; > + } > + }; > let job_config: SyncJobConfig = match serde_json::from_value(job_config) { > Ok(c) => c, > Err(err) => { > @@ -616,7 +623,7 @@ async fn schedule_datastore_sync_jobs() { > job_config, > &auth_id, > Some(event_str), > - SyncDirection::Pull, > + sync_direction, > false, > ) { > eprintln!("unable to start datastore sync job {job_id} - {err}"); > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From c.ebner at proxmox.com Thu Nov 7 10:10:13 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 7 Nov 2024 10:10:13 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 19/29] api: sync jobs: expose optional `sync-direction` parameter In-Reply-To: <173090643519.79072.2923413753129715762@yuna.proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-20-c.ebner@proxmox.com> <173090643519.79072.2923413753129715762@yuna.proxmox.com> Message-ID: On 11/6/24 16:20, Fabian Gr?nbichler wrote: > Quoting Christian Ebner (2024-10-31 13:15:09) >> Exposes and switch the config type for sync job operations based >> on the `sync-direction` parameter, exposed on required api endpoints. >> >> If not set, the default config type is `sync` and the default sync >> direction is `pull` for full backwards compatibility. Whenever >> possible, deterimne the sync direction and config type from the sync > > typo "determine" Acked! > >> job config directly rather than requiring it as optional api >> parameter. >> >> Further, extend read and modify access checks by sync direction to >> conditionally check for the required permissions in pull and push >> direction. >> >> Signed-off-by: Christian Ebner >> --- >> changes since version 5: >> - Squashed permission check patches into this one, as they make not much >> sense without this >> - Only expose optional sync-direction parameter for api endpoints which >> require them, use the job config to determine sync-direction and/or >> config-type otherwise. >> >> src/api2/admin/sync.rs | 34 ++-- >> src/api2/config/datastore.rs | 11 +- >> src/api2/config/notifications/mod.rs | 19 +- >> src/api2/config/sync.rs | 280 ++++++++++++++++++++------- >> src/bin/proxmox-backup-proxy.rs | 11 +- >> 5 files changed, 261 insertions(+), 94 deletions(-) >> >> diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs >> index be324564c..8a242b1c3 100644 >> --- a/src/api2/admin/sync.rs >> +++ b/src/api2/admin/sync.rs >> @@ -1,6 +1,7 @@ >> //! Datastore Synchronization Job Management >> >> use anyhow::{bail, format_err, Error}; >> +use serde::Deserialize; >> use serde_json::Value; >> >> use proxmox_router::{ >> @@ -29,6 +30,10 @@ use crate::{ >> schema: DATASTORE_SCHEMA, >> optional: true, >> }, >> + "sync-direction": { >> + type: SyncDirection, >> + optional: true, >> + }, >> }, >> }, >> returns: { >> @@ -44,6 +49,7 @@ use crate::{ >> /// List all sync jobs >> pub fn list_sync_jobs( >> store: Option, >> + sync_direction: Option, >> _param: Value, >> rpcenv: &mut dyn RpcEnvironment, >> ) -> Result, Error> { >> @@ -52,8 +58,9 @@ pub fn list_sync_jobs( >> >> let (config, digest) = sync::config()?; >> >> + let sync_direction = sync_direction.unwrap_or_default(); >> let job_config_iter = config >> - .convert_to_typed_array("sync")? >> + .convert_to_typed_array(sync_direction.as_config_type_str())? >> .into_iter() >> .filter(|job: &SyncJobConfig| { >> if let Some(store) = &store { >> @@ -62,7 +69,9 @@ pub fn list_sync_jobs( >> true >> } >> }) >> - .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job)); >> + .filter(|job: &SyncJobConfig| { >> + check_sync_job_read_access(&user_info, &auth_id, job, sync_direction) >> + }); >> >> let mut list = Vec::new(); >> >> @@ -106,24 +115,23 @@ pub fn run_sync_job( >> let user_info = CachedUserInfo::new()?; >> >> let (config, _digest) = sync::config()?; >> - let sync_job: SyncJobConfig = config.lookup("sync", &id)?; >> + let (config_type, config_section) = config >> + .sections >> + .get(&id) >> + .ok_or_else(|| format_err!("No sync job with id '{id}' found in config"))?; >> + >> + let sync_direction = SyncDirection::from_config_type_str(config_type)?; >> + let sync_job = SyncJobConfig::deserialize(config_section)?; >> >> - if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) { >> - bail!("permission check failed"); >> + if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job, sync_direction) { >> + bail!("permission check failed, '{auth_id}' is missing access"); >> } >> >> let job = Job::new("syncjob", &id)?; >> >> let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; >> >> - let upid_str = do_sync_job( >> - job, >> - sync_job, >> - &auth_id, >> - None, >> - SyncDirection::Pull, >> - to_stdout, >> - )?; >> + let upid_str = do_sync_job(job, sync_job, &auth_id, None, sync_direction, to_stdout)?; >> >> Ok(upid_str) >> } >> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs >> index ca6edf05a..c151eda10 100644 >> --- a/src/api2/config/datastore.rs >> +++ b/src/api2/config/datastore.rs >> @@ -13,8 +13,9 @@ use proxmox_uuid::Uuid; >> >> use pbs_api_types::{ >> Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions, >> - MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, >> - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, >> + MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA, >> + PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, >> + PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, >> }; >> use pbs_config::BackupLockGuard; >> use pbs_datastore::chunk_store::ChunkStore; >> @@ -498,8 +499,10 @@ pub async fn delete_datastore( >> for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { >> delete_verification_job(job.config.id, None, rpcenv)? >> } >> - for job in list_sync_jobs(Some(name.clone()), Value::Null, rpcenv)? { >> - delete_sync_job(job.config.id, None, rpcenv)? >> + for direction in [SyncDirection::Pull, SyncDirection::Push] { >> + for job in list_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? { >> + delete_sync_job(job.config.id, None, rpcenv)? >> + } >> } >> for job in list_prune_jobs(Some(name.clone()), Value::Null, rpcenv)? { >> delete_prune_job(job.config.id, None, rpcenv)? >> diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs >> index dfe82ed03..31c4851c1 100644 >> --- a/src/api2/config/notifications/mod.rs >> +++ b/src/api2/config/notifications/mod.rs >> @@ -9,7 +9,7 @@ use proxmox_schema::api; >> use proxmox_sortable_macro::sortable; >> >> use crate::api2::admin::datastore::get_datastore_list; >> -use pbs_api_types::PRIV_SYS_AUDIT; >> +use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT}; >> >> use crate::api2::admin::prune::list_prune_jobs; >> use crate::api2::admin::sync::list_sync_jobs; >> @@ -154,13 +154,15 @@ pub fn get_values( >> }); >> } >> >> - let sync_jobs = list_sync_jobs(None, param.clone(), rpcenv)?; >> - for job in sync_jobs { >> - values.push(MatchableValue { >> - field: "job-id".into(), >> - value: job.config.id, >> - comment: job.config.comment, >> - }); >> + for direction in [SyncDirection::Pull, SyncDirection::Push] { >> + let sync_jobs = list_sync_jobs(None, Some(direction), param.clone(), rpcenv)?; >> + for job in sync_jobs { >> + values.push(MatchableValue { >> + field: "job-id".into(), >> + value: job.config.id, >> + comment: job.config.comment, >> + }); >> + } >> } >> >> let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?; >> @@ -184,6 +186,7 @@ pub fn get_values( >> "package-updates", >> "prune", >> "sync", >> + "sync-push", >> "system-mail", >> "tape-backup", >> "tape-load", >> diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs >> index 3963049e9..2f32aaccb 100644 >> --- a/src/api2/config/sync.rs >> +++ b/src/api2/config/sync.rs >> @@ -1,6 +1,7 @@ >> use ::serde::{Deserialize, Serialize}; >> use anyhow::{bail, Error}; >> use hex::FromHex; >> +use pbs_api_types::SyncDirection; >> use serde_json::Value; >> >> use proxmox_router::{http_bail, Permission, Router, RpcEnvironment}; >> @@ -8,8 +9,9 @@ use proxmox_schema::{api, param_bail}; >> >> use pbs_api_types::{ >> Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT, >> - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT, >> - PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, >> + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, >> + PRIV_REMOTE_AUDIT, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, >> + PRIV_REMOTE_DATASTORE_PRUNE, PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, >> }; >> use pbs_config::sync; >> >> @@ -20,18 +22,35 @@ pub fn check_sync_job_read_access( >> user_info: &CachedUserInfo, >> auth_id: &Authid, >> job: &SyncJobConfig, >> + sync_direction: SyncDirection, >> ) -> bool { >> + // check for audit access on datastore/namespace, applies for pull and push direction >> let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { >> return false; >> } >> >> - if let Some(remote) = &job.remote { >> - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); >> - remote_privs & PRIV_REMOTE_AUDIT != 0 >> - } else { >> - let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); >> - source_ds_privs & PRIV_DATASTORE_AUDIT != 0 >> + match sync_direction { >> + SyncDirection::Pull => { >> + if let Some(remote) = &job.remote { >> + let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); >> + remote_privs & PRIV_REMOTE_AUDIT != 0 >> + } else { >> + let source_ds_privs = >> + user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); >> + source_ds_privs & PRIV_DATASTORE_AUDIT != 0 >> + } >> + } >> + SyncDirection::Push => { >> + // check for audit access on remote/datastore/namespace >> + if let Some(target_acl_path) = job.remote_acl_path() { >> + let remote_privs = user_info.lookup_privs(auth_id, &target_acl_path); >> + remote_privs & PRIV_REMOTE_AUDIT != 0 > > the other two checks above check the source side, this checks the the target > side.. should we check both here? Well, AUDIT access on the source for push is already checked by the common check outside the match statement, so not sure what further to check here? The common part check for (so soruce in case of push, target incase of pull) and the match arms check target in case of push and source in case of pull. > >> + } else { >> + // Remote must always be present for sync in push direction, fail otherwise >> + false >> + } >> + } >> } >> } >> >> @@ -43,41 +62,93 @@ fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool { >> } >> } >> >> -/// checks whether user can run the corresponding pull job >> +/// checks whether user can run the corresponding sync job, depending on sync direction >> /// >> -/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly. >> +/// namespace creation/deletion ACL and backup group ownership checks happen in the pull/push code >> +/// directly. >> /// remote side checks/filters remote datastore/namespace/group access. >> pub fn check_sync_job_modify_access( >> user_info: &CachedUserInfo, >> auth_id: &Authid, >> job: &SyncJobConfig, >> + sync_direction: SyncDirection, >> ) -> bool { >> - let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> - if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { >> - return false; >> - } >> + match sync_direction { >> + SyncDirection::Pull => { >> + let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> + if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 >> + || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 >> + { >> + return false; >> + } >> + >> + if let Some(true) = job.remove_vanished { >> + if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { >> + return false; >> + } >> + } >> >> - if let Some(true) = job.remove_vanished { >> - if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { >> - return false; >> + // same permission as changing ownership after syncing >> + if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { >> + return false; >> + } >> + >> + if let Some(remote) = &job.remote { >> + let remote_privs = >> + user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); >> + return remote_privs & PRIV_REMOTE_READ != 0; >> + } >> + true >> } >> - } >> + SyncDirection::Push => { >> + // Remote must always be present for sync in push direction, fail otherwise >> + let target_privs = if let Some(target_acl_path) = job.remote_acl_path() { >> + user_info.lookup_privs(auth_id, &target_acl_path) >> + } else { >> + return false; >> + }; >> + >> + // check user is allowed to create backups on remote datastore >> + if target_privs & PRIV_REMOTE_DATASTORE_BACKUP == 0 { >> + return false; >> + } >> >> - // same permission as changing ownership after syncing >> - if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { >> - return false; >> - } >> + if let Some(true) = job.remove_vanished { >> + // check user is allowed to prune backup snapshots on remote datastore >> + if target_privs & PRIV_REMOTE_DATASTORE_PRUNE == 0 { >> + return false; >> + } >> + } >> + >> + // check user is not the owner of the sync job, but has remote datastore modify permissions >> + if !is_correct_owner(auth_id, job) && target_privs & PRIV_REMOTE_DATASTORE_MODIFY == 0 { >> + return false; >> + } > > isn't this wrong? if I am modifying/running a sync job "owned" by somebody > else, then I need to have Datastore.Read or Datastore.Modify on the *local* > source datastore+namespace.. else I could use such a sync job to exfiltrate > backups I wouldn't otherwise have access to.. But that is checked right below? The Remote datastore modify check here just allows to execute the job if either the user owns the job, or it has the privs to do so. At least that was my intention. > >> + >> + // check user is allowed to read from (local) source datastore/namespace >> + let source_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> + if source_privs & PRIV_DATASTORE_AUDIT == 0 { >> + return false; >> + } >> >> - if let Some(remote) = &job.remote { >> - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); >> - return remote_privs & PRIV_REMOTE_READ != 0; >> + // check for either datastore read or datastore backup access >> + // (the later implying read access for owned snapshot groups) >> + if source_privs & PRIV_DATASTORE_READ != 0 { >> + return true; >> + } >> + source_privs & PRIV_DATASTORE_BACKUP != 0 >> + } >> } >> - true >> } >> >> #[api( >> input: { >> - properties: {}, >> + properties: { >> + "sync-direction": { >> + type: SyncDirection, >> + optional: true, >> + }, >> + }, >> }, >> returns: { >> description: "List configured jobs.", >> @@ -92,6 +163,7 @@ pub fn check_sync_job_modify_access( >> /// List all sync jobs >> pub fn list_sync_jobs( >> _param: Value, >> + sync_direction: Option, >> rpcenv: &mut dyn RpcEnvironment, >> ) -> Result, Error> { >> let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; >> @@ -99,13 +171,16 @@ pub fn list_sync_jobs( >> >> let (config, digest) = sync::config()?; >> >> - let list = config.convert_to_typed_array("sync")?; >> + let sync_direction = sync_direction.unwrap_or_default(); >> + let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?; >> >> rpcenv["digest"] = hex::encode(digest).into(); >> >> let list = list >> .into_iter() >> - .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job)) >> + .filter(|sync_job| { >> + check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction) >> + }) >> .collect(); >> Ok(list) >> } >> @@ -118,6 +193,10 @@ pub fn list_sync_jobs( >> type: SyncJobConfig, >> flatten: true, >> }, >> + "sync-direction": { >> + type: SyncDirection, >> + optional: true, >> + }, >> }, >> }, >> access: { >> @@ -128,14 +207,16 @@ pub fn list_sync_jobs( >> /// Create a new sync job. >> pub fn create_sync_job( >> config: SyncJobConfig, >> + sync_direction: Option, >> rpcenv: &mut dyn RpcEnvironment, >> ) -> Result<(), Error> { >> let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; >> let user_info = CachedUserInfo::new()?; >> + let sync_direction = sync_direction.unwrap_or_default(); >> >> let _lock = sync::lock_config()?; >> >> - if !check_sync_job_modify_access(&user_info, &auth_id, &config) { >> + if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) { >> bail!("permission check failed"); >> } >> >> @@ -158,7 +239,7 @@ pub fn create_sync_job( >> param_bail!("id", "job '{}' already exists.", config.id); >> } >> >> - section_config.set_data(&config.id, "sync", &config)?; >> + section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?; >> >> sync::save_config(§ion_config)?; >> >> @@ -188,8 +269,17 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result> >> let (config, digest) = sync::config()?; >> >> - let sync_job = config.lookup("sync", &id)?; >> - if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) { >> + let (sync_job, sync_direction) = >> + if let Some((config_type, config_section)) = config.sections.get(&id) { >> + ( >> + SyncJobConfig::deserialize(config_section)?, >> + SyncDirection::from_config_type_str(config_type)?, >> + ) >> + } else { >> + http_bail!(NOT_FOUND, "job '{id}' does not exist.") >> + }; >> + >> + if !check_sync_job_read_access(&user_info, &auth_id, &sync_job, sync_direction) { >> bail!("permission check failed"); >> } >> >> @@ -284,7 +374,15 @@ pub fn update_sync_job( >> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; >> } >> >> - let mut data: SyncJobConfig = config.lookup("sync", &id)?; >> + let (mut data, sync_direction) = >> + if let Some((config_type, config_section)) = config.sections.get(&id) { >> + ( >> + SyncJobConfig::deserialize(config_section)?, >> + SyncDirection::from_config_type_str(config_type)?, >> + ) >> + } else { >> + http_bail!(NOT_FOUND, "job '{id}' does not exist.") >> + }; >> >> if let Some(delete) = delete { >> for delete_prop in delete { >> @@ -405,11 +503,11 @@ pub fn update_sync_job( >> } >> } >> >> - if !check_sync_job_modify_access(&user_info, &auth_id, &data) { >> + if !check_sync_job_modify_access(&user_info, &auth_id, &data, sync_direction) { >> bail!("permission check failed"); >> } >> >> - config.set_data(&id, "sync", &data)?; >> + config.set_data(&id, sync_direction.as_config_type_str(), &data)?; >> >> sync::save_config(&config)?; >> >> @@ -456,17 +554,16 @@ pub fn delete_sync_job( >> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; >> } >> >> - match config.lookup("sync", &id) { >> - Ok(job) => { >> - if !check_sync_job_modify_access(&user_info, &auth_id, &job) { >> - bail!("permission check failed"); >> - } >> - config.sections.remove(&id); >> - } >> - Err(_) => { >> - http_bail!(NOT_FOUND, "job '{}' does not exist.", id) >> + if let Some((config_type, config_section)) = config.sections.get(&id) { >> + let sync_direction = SyncDirection::from_config_type_str(config_type)?; >> + let job = SyncJobConfig::deserialize(config_section)?; >> + if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) { >> + bail!("permission check failed"); >> } >> - }; >> + config.sections.remove(&id); >> + } else { >> + http_bail!(NOT_FOUND, "job '{}' does not exist.", id) >> + } >> >> sync::save_config(&config)?; >> >> @@ -536,39 +633,67 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> }; >> >> // should work without ACLs >> - assert!(check_sync_job_read_access(&user_info, root_auth_id, &job)); >> - assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job)); >> + assert!(check_sync_job_read_access( >> + &user_info, >> + root_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> + assert!(check_sync_job_modify_access( >> + &user_info, >> + root_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // user without permissions must fail >> assert!(!check_sync_job_read_access( >> &user_info, >> &no_perm_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &no_perm_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // reading without proper read permissions on either remote or local must fail >> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(!check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // reading without proper read permissions on local end must fail >> job.remote = Some("remote1".to_string()); >> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(!check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // reading without proper read permissions on remote end must fail >> job.remote = Some("remote0".to_string()); >> job.store = "localstore1".to_string(); >> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(!check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // writing without proper write permissions on either end must fail >> job.store = "localstore0".to_string(); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // writing without proper write permissions on local end must fail >> @@ -580,39 +705,54 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // reset remote to one where users have access >> job.remote = Some("remote1".to_string()); >> >> // user with read permission can only read, but not modify/run >> - assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> job.owner = Some(read_auth_id.clone()); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &read_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> job.owner = None; >> assert!(!check_sync_job_modify_access( >> &user_info, >> &read_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> job.owner = Some(write_auth_id.clone()); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &read_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // user with simple write permission can modify/run >> - assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job)); >> + assert!(check_sync_job_read_access( >> + &user_info, >> + &write_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // but can't modify/run with deletion >> @@ -620,7 +760,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // unless they have Datastore.Prune as well >> @@ -628,7 +769,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // changing owner is not possible >> @@ -636,7 +778,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // also not to the default 'root at pam' >> @@ -644,7 +787,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // unless they have Datastore.Modify as well >> @@ -653,13 +797,15 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> job.owner = None; >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> Ok(()) >> diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs >> index 6f19a3fbd..70283510d 100644 >> --- a/src/bin/proxmox-backup-proxy.rs >> +++ b/src/bin/proxmox-backup-proxy.rs >> @@ -589,7 +589,14 @@ async fn schedule_datastore_sync_jobs() { >> Ok((config, _digest)) => config, >> }; >> >> - for (job_id, (_, job_config)) in config.sections { >> + for (job_id, (job_type, job_config)) in config.sections { >> + let sync_direction = match SyncDirection::from_config_type_str(&job_type) { >> + Ok(direction) => direction, >> + Err(err) => { >> + eprintln!("unexpected config type in sync job config - {err}"); >> + continue; >> + } >> + }; >> let job_config: SyncJobConfig = match serde_json::from_value(job_config) { >> Ok(c) => c, >> Err(err) => { >> @@ -616,7 +623,7 @@ async fn schedule_datastore_sync_jobs() { >> job_config, >> &auth_id, >> Some(event_str), >> - SyncDirection::Pull, >> + sync_direction, >> false, >> ) { >> eprintln!("unable to start datastore sync job {job_id} - {err}"); >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> From c.ebner at proxmox.com Thu Nov 7 10:18:02 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 7 Nov 2024 10:18:02 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 15/29] api: push: implement endpoint for sync in push direction In-Reply-To: <173090583969.79072.15737271044931374423@yuna.proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-16-c.ebner@proxmox.com> <173090583969.79072.15737271044931374423@yuna.proxmox.com> Message-ID: <9bcf24f5-14aa-4170-b663-fd25486bd78a@proxmox.com> On 11/6/24 16:10, Fabian Gr?nbichler wrote: > Quoting Christian Ebner (2024-10-31 13:15:05) >> Expose the sync job in push direction via a dedicated API endpoint, >> analogous to the pull direction. >> >> Signed-off-by: Christian Ebner >> --- >> changes since version 5: >> - Avoid double deserialization for backup namespaces >> - Drop TryFrom<&SyncJobConfig> for PushParameters impl, as constructing >> them requires an api call to fetch the remote api version now >> >> src/api2/mod.rs | 2 + >> src/api2/push.rs | 183 +++++++++++++++++++++++++++++++++++++++++++++++ >> 2 files changed, 185 insertions(+) >> create mode 100644 src/api2/push.rs >> >> diff --git a/src/api2/mod.rs b/src/api2/mod.rs >> index a83e4c205..03596326b 100644 >> --- a/src/api2/mod.rs >> +++ b/src/api2/mod.rs >> @@ -12,6 +12,7 @@ pub mod helpers; >> pub mod node; >> pub mod ping; >> pub mod pull; >> +pub mod push; >> pub mod reader; >> pub mod status; >> pub mod tape; >> @@ -29,6 +30,7 @@ const SUBDIRS: SubdirMap = &sorted!([ >> ("nodes", &node::ROUTER), >> ("ping", &ping::ROUTER), >> ("pull", &pull::ROUTER), >> + ("push", &push::ROUTER), >> ("reader", &reader::ROUTER), >> ("status", &status::ROUTER), >> ("tape", &tape::ROUTER), >> diff --git a/src/api2/push.rs b/src/api2/push.rs >> new file mode 100644 >> index 000000000..28f4417d1 >> --- /dev/null >> +++ b/src/api2/push.rs >> @@ -0,0 +1,183 @@ >> +use anyhow::{format_err, Error}; >> +use futures::{future::FutureExt, select}; >> +use tracing::info; >> + >> +use pbs_api_types::{ >> + Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA, >> + GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_READ, >> + PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, REMOTE_ID_SCHEMA, >> + REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, >> +}; >> +use proxmox_rest_server::WorkerTask; >> +use proxmox_router::{Permission, Router, RpcEnvironment}; >> +use proxmox_schema::api; >> + >> +use pbs_config::CachedUserInfo; >> + >> +use crate::server::push::{push_store, PushParameters}; >> + >> +/// Check if the provided user is allowed to read from the local source and act on the remote >> +/// target for pushing content >> +pub fn check_push_privs( > > not used anywhere except here, could be private? Acked! > >> + auth_id: &Authid, >> + store: &str, >> + namespace: &BackupNamespace, >> + remote: &str, >> + remote_store: &str, >> + remote_ns: Option<&BackupNamespace>, > > since we don't actually need to support not setting the root namespace, the > Option here can go away.. Acked! > >> + delete: bool, >> +) -> Result<(), Error> { >> + let user_info = CachedUserInfo::new()?; >> + >> + let target_acl_path = match remote_ns { >> + Some(ns) => ns.remote_acl_path(remote, remote_store), >> + None => vec!["remote", remote, remote_store], >> + }; > > which makes this simpler > >> + >> + // Check user is allowed to backup to remote/// >> + user_info.check_privs( >> + auth_id, >> + &target_acl_path, >> + PRIV_REMOTE_DATASTORE_BACKUP, >> + false, >> + )?; >> + >> + if delete { >> + // Check user is allowed to prune remote datastore >> + user_info.check_privs( >> + auth_id, >> + &target_acl_path, >> + PRIV_REMOTE_DATASTORE_PRUNE, >> + false, >> + )?; >> + } >> + >> + // Check user is allowed to read source datastore >> + user_info.check_privs( >> + auth_id, >> + &namespace.acl_path(store), >> + PRIV_DATASTORE_READ, > > isn't this too restrictive? should be PRIV_DATASTORE_BACKUP *or* READ? > > the push task will then filter the local namespaces/backup groups/.. by what > the user is allowed to see.. Agreed, extended the check to also allow if user has PRIV_DATASTORE_BACKUP > >> + false, >> + )?; >> + >> + Ok(()) >> +} >> + >> +#[api( >> + input: { >> + properties: { >> + store: { >> + schema: DATASTORE_SCHEMA, >> + }, >> + ns: { >> + type: BackupNamespace, >> + optional: true, >> + }, >> + remote: { >> + schema: REMOTE_ID_SCHEMA, >> + }, >> + "remote-store": { >> + schema: DATASTORE_SCHEMA, >> + }, >> + "remote-ns": { >> + type: BackupNamespace, >> + optional: true, >> + }, >> + "remove-vanished": { >> + schema: REMOVE_VANISHED_BACKUPS_SCHEMA, >> + optional: true, >> + }, >> + "max-depth": { >> + schema: NS_MAX_DEPTH_REDUCED_SCHEMA, >> + optional: true, >> + }, >> + "group-filter": { >> + schema: GROUP_FILTER_LIST_SCHEMA, >> + optional: true, >> + }, >> + limit: { >> + type: RateLimitConfig, >> + flatten: true, >> + }, >> + "transfer-last": { >> + schema: TRANSFER_LAST_SCHEMA, >> + optional: true, >> + }, >> + }, >> + }, >> + access: { >> + description: r###"The user needs Remote.Backup privilege on '/remote/{remote}/{remote-store}' >> +and needs to own the backup group. Datastore.Read is required on '/datastore/{store}'. >> +The delete flag additionally requires the Remote.Prune privilege on '/remote/{remote}/{remote-store}'. > > this is partly wrong and/or weirdly phrased ;) maybe something like > > The user needs (at least) Remote.DatastoreBackup on '/remote/{remote}/{remote-store}[/{remote-ns}]', and either Datastore.Backup or Datastore.Read on '/datastore/{store}[/{ns}]'. The 'remove-vanished' parameter might require additional privileges. Yeah, that was indeed not adapted and is from a previous iteration, adapted to your suggestions. > >> +"###, >> + permission: &Permission::Anybody, >> + }, >> +)] >> +/// Push store to other repository >> +#[allow(clippy::too_many_arguments)] >> +async fn push( >> + store: String, >> + ns: Option, >> + remote: String, >> + remote_store: String, >> + remote_ns: Option, >> + remove_vanished: Option, >> + max_depth: Option, >> + group_filter: Option>, >> + limit: RateLimitConfig, >> + transfer_last: Option, >> + rpcenv: &mut dyn RpcEnvironment, >> +) -> Result { >> + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; >> + let delete = remove_vanished.unwrap_or(false); >> + let ns = ns.unwrap_or_default(); > > this could also be done for remote_ns Acked! > >> + >> + check_push_privs( >> + &auth_id, >> + &store, >> + &ns, >> + &remote, >> + &remote_store, >> + remote_ns.as_ref(), >> + delete, >> + )?; >> + >> + let push_params = PushParameters::new( >> + &store, >> + ns, >> + &remote, >> + &remote_store, >> + remote_ns.unwrap_or_default(), > > since we unwrap it here anyway ;) Acked! > >> + auth_id.clone(), >> + remove_vanished, >> + max_depth, >> + group_filter, >> + limit, >> + transfer_last, >> + ) >> + .await?; >> + >> + let upid_str = WorkerTask::spawn( >> + "sync", >> + Some(store.clone()), >> + auth_id.to_string(), >> + true, >> + move |worker| async move { >> + info!("push datastore '{store}' to '{remote}/{remote_store}'"); > > this is a bit redundant (and incomplete), the push output will contain this > correctly extended with namespace information.. Okay, agreed. Removed this and the log output below. > >> + >> + let push_future = push_store(push_params); >> + (select! { >> + success = push_future.fuse() => success, >> + abort = worker.abort_future().map(|_| Err(format_err!("push aborted"))) => abort, >> + })?; >> + >> + info!("push datastore '{store}' end"); > > same here > >> + >> + Ok(()) >> + }, >> + )?; >> + >> + Ok(upid_str) >> +} >> + >> +pub const ROUTER: Router = Router::new().post(&API_METHOD_PUSH); >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> > Return-Path: > Delivered-To: c.ebner at proxmox.com > Received: from ronja.mits.lan > by ronja.mits.lan with LMTP > id ANW1EEmJK2f3aQAAxxbTJA > (envelope-from ) > for ; Wed, 06 Nov 2024 16:20:41 +0100 > Received: from localhost (unknown [192.168.16.37]) > by ronja.mits.lan (Postfix) with ESMTPSA id 36470F64912; > Wed, 6 Nov 2024 16:20:41 +0100 (CET) > Content-Type: text/plain; charset=tf-8" > MIME-Version: 1.0 > Content-Transfer-Encoding: quoted-printable > In-Reply-To: <20241031121519.434337-20-c.ebner at proxmox.com> > References: <20241031121519.434337-1-c.ebner at proxmox.com> <20241031121519.434337-20-c.ebner at proxmox.com> > Subject: Re: [pbs-devel] [PATCH v6 proxmox-backup 19/29] api: sync jobs: expose optional `sync-direction` parameter > From: Fabian =tf-8?q?Gr?nbichler?= > To: Christian Ebner , pbs-devel at lists.proxmox.com > Date: Wed, 06 Nov 2024 16:20:35 +0100 > Message-ID: <173090643519.79072.2923413753129715762 at yuna.proxmox.com> > User-Agent: alot/0.10 > > Quoting Christian Ebner (2024-10-31 13:15:09) >> Exposes and switch the config type for sync job operations based >> on the `sync-direction` parameter, exposed on required api endpoints. >> >> If not set, the default config type is `sync` and the default sync >> direction is `pull` for full backwards compatibility. Whenever >> possible, deterimne the sync direction and config type from the sync > > typo "determine" > >> job config directly rather than requiring it as optional api >> parameter. >> >> Further, extend read and modify access checks by sync direction to >> conditionally check for the required permissions in pull and push >> direction. >> >> Signed-off-by: Christian Ebner >> --- >> changes since version 5: >> - Squashed permission check patches into this one, as they make not much >> sense without this >> - Only expose optional sync-direction parameter for api endpoints which >> require them, use the job config to determine sync-direction and/or >> config-type otherwise. >> >> src/api2/admin/sync.rs | 34 ++-- >> src/api2/config/datastore.rs | 11 +- >> src/api2/config/notifications/mod.rs | 19 +- >> src/api2/config/sync.rs | 280 ++++++++++++++++++++------- >> src/bin/proxmox-backup-proxy.rs | 11 +- >> 5 files changed, 261 insertions(+), 94 deletions(-) >> >> diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs >> index be324564c..8a242b1c3 100644 >> --- a/src/api2/admin/sync.rs >> +++ b/src/api2/admin/sync.rs >> @@ -1,6 +1,7 @@ >> //! Datastore Synchronization Job Management >> >> use anyhow::{bail, format_err, Error}; >> +use serde::Deserialize; >> use serde_json::Value; >> >> use proxmox_router::{ >> @@ -29,6 +30,10 @@ use crate::{ >> schema: DATASTORE_SCHEMA, >> optional: true, >> }, >> + "sync-direction": { >> + type: SyncDirection, >> + optional: true, >> + }, >> }, >> }, >> returns: { >> @@ -44,6 +49,7 @@ use crate::{ >> /// List all sync jobs >> pub fn list_sync_jobs( >> store: Option, >> + sync_direction: Option, >> _param: Value, >> rpcenv: &mut dyn RpcEnvironment, >> ) -> Result, Error> { >> @@ -52,8 +58,9 @@ pub fn list_sync_jobs( >> >> let (config, digest) = sync::config()?; >> >> + let sync_direction = sync_direction.unwrap_or_default(); >> let job_config_iter = config >> - .convert_to_typed_array("sync")? >> + .convert_to_typed_array(sync_direction.as_config_type_str())? >> .into_iter() >> .filter(|job: &SyncJobConfig| { >> if let Some(store) = &store { >> @@ -62,7 +69,9 @@ pub fn list_sync_jobs( >> true >> } >> }) >> - .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job)); >> + .filter(|job: &SyncJobConfig| { >> + check_sync_job_read_access(&user_info, &auth_id, job, sync_direction) >> + }); >> >> let mut list = Vec::new(); >> >> @@ -106,24 +115,23 @@ pub fn run_sync_job( >> let user_info = CachedUserInfo::new()?; >> >> let (config, _digest) = sync::config()?; >> - let sync_job: SyncJobConfig = config.lookup("sync", &id)?; >> + let (config_type, config_section) = config >> + .sections >> + .get(&id) >> + .ok_or_else(|| format_err!("No sync job with id '{id}' found in config"))?; >> + >> + let sync_direction = SyncDirection::from_config_type_str(config_type)?; >> + let sync_job = SyncJobConfig::deserialize(config_section)?; >> >> - if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) { >> - bail!("permission check failed"); >> + if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job, sync_direction) { >> + bail!("permission check failed, '{auth_id}' is missing access"); >> } >> >> let job = Job::new("syncjob", &id)?; >> >> let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; >> >> - let upid_str = do_sync_job( >> - job, >> - sync_job, >> - &auth_id, >> - None, >> - SyncDirection::Pull, >> - to_stdout, >> - )?; >> + let upid_str = do_sync_job(job, sync_job, &auth_id, None, sync_direction, to_stdout)?; >> >> Ok(upid_str) >> } >> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs >> index ca6edf05a..c151eda10 100644 >> --- a/src/api2/config/datastore.rs >> +++ b/src/api2/config/datastore.rs >> @@ -13,8 +13,9 @@ use proxmox_uuid::Uuid; >> >> use pbs_api_types::{ >> Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions, >> - MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, >> - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, >> + MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA, >> + PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, >> + PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, >> }; >> use pbs_config::BackupLockGuard; >> use pbs_datastore::chunk_store::ChunkStore; >> @@ -498,8 +499,10 @@ pub async fn delete_datastore( >> for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { >> delete_verification_job(job.config.id, None, rpcenv)? >> } >> - for job in list_sync_jobs(Some(name.clone()), Value::Null, rpcenv)? { >> - delete_sync_job(job.config.id, None, rpcenv)? >> + for direction in [SyncDirection::Pull, SyncDirection::Push] { >> + for job in list_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? { >> + delete_sync_job(job.config.id, None, rpcenv)? >> + } >> } >> for job in list_prune_jobs(Some(name.clone()), Value::Null, rpcenv)? { >> delete_prune_job(job.config.id, None, rpcenv)? >> diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs >> index dfe82ed03..31c4851c1 100644 >> --- a/src/api2/config/notifications/mod.rs >> +++ b/src/api2/config/notifications/mod.rs >> @@ -9,7 +9,7 @@ use proxmox_schema::api; >> use proxmox_sortable_macro::sortable; >> >> use crate::api2::admin::datastore::get_datastore_list; >> -use pbs_api_types::PRIV_SYS_AUDIT; >> +use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT}; >> >> use crate::api2::admin::prune::list_prune_jobs; >> use crate::api2::admin::sync::list_sync_jobs; >> @@ -154,13 +154,15 @@ pub fn get_values( >> }); >> } >> >> - let sync_jobs = list_sync_jobs(None, param.clone(), rpcenv)?; >> - for job in sync_jobs { >> - values.push(MatchableValue { >> - field: "job-id".into(), >> - value: job.config.id, >> - comment: job.config.comment, >> - }); >> + for direction in [SyncDirection::Pull, SyncDirection::Push] { >> + let sync_jobs = list_sync_jobs(None, Some(direction), param.clone(), rpcenv)?; >> + for job in sync_jobs { >> + values.push(MatchableValue { >> + field: "job-id".into(), >> + value: job.config.id, >> + comment: job.config.comment, >> + }); >> + } >> } >> >> let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?; >> @@ -184,6 +186,7 @@ pub fn get_values( >> "package-updates", >> "prune", >> "sync", >> + "sync-push", >> "system-mail", >> "tape-backup", >> "tape-load", >> diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs >> index 3963049e9..2f32aaccb 100644 >> --- a/src/api2/config/sync.rs >> +++ b/src/api2/config/sync.rs >> @@ -1,6 +1,7 @@ >> use ::serde::{Deserialize, Serialize}; >> use anyhow::{bail, Error}; >> use hex::FromHex; >> +use pbs_api_types::SyncDirection; >> use serde_json::Value; >> >> use proxmox_router::{http_bail, Permission, Router, RpcEnvironment}; >> @@ -8,8 +9,9 @@ use proxmox_schema::{api, param_bail}; >> >> use pbs_api_types::{ >> Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT, >> - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT, >> - PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, >> + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, >> + PRIV_REMOTE_AUDIT, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, >> + PRIV_REMOTE_DATASTORE_PRUNE, PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, >> }; >> use pbs_config::sync; >> >> @@ -20,18 +22,35 @@ pub fn check_sync_job_read_access( >> user_info: &CachedUserInfo, >> auth_id: &Authid, >> job: &SyncJobConfig, >> + sync_direction: SyncDirection, >> ) -> bool { >> + // check for audit access on datastore/namespace, applies for pull and push direction >> let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { >> return false; >> } >> >> - if let Some(remote) = &job.remote { >> - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); >> - remote_privs & PRIV_REMOTE_AUDIT != 0 >> - } else { >> - let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); >> - source_ds_privs & PRIV_DATASTORE_AUDIT != 0 >> + match sync_direction { >> + SyncDirection::Pull => { >> + if let Some(remote) = &job.remote { >> + let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); >> + remote_privs & PRIV_REMOTE_AUDIT != 0 >> + } else { >> + let source_ds_privs = >> + user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); >> + source_ds_privs & PRIV_DATASTORE_AUDIT != 0 >> + } >> + } >> + SyncDirection::Push => { >> + // check for audit access on remote/datastore/namespace >> + if let Some(target_acl_path) = job.remote_acl_path() { >> + let remote_privs = user_info.lookup_privs(auth_id, &target_acl_path); >> + remote_privs & PRIV_REMOTE_AUDIT != 0 > > the other two checks above check the source side, this checks the the target > side.. should we check both here? > >> + } else { >> + // Remote must always be present for sync in push direction, fail otherwise >> + false >> + } >> + } >> } >> } >> >> @@ -43,41 +62,93 @@ fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool { >> } >> } >> >> -/// checks whether user can run the corresponding pull job >> +/// checks whether user can run the corresponding sync job, depending on sync direction >> /// >> -/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly. >> +/// namespace creation/deletion ACL and backup group ownership checks happen in the pull/push code >> +/// directly. >> /// remote side checks/filters remote datastore/namespace/group access. >> pub fn check_sync_job_modify_access( >> user_info: &CachedUserInfo, >> auth_id: &Authid, >> job: &SyncJobConfig, >> + sync_direction: SyncDirection, >> ) -> bool { >> - let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> - if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { >> - return false; >> - } >> + match sync_direction { >> + SyncDirection::Pull => { >> + let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> + if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 >> + || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 >> + { >> + return false; >> + } >> + >> + if let Some(true) = job.remove_vanished { >> + if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { >> + return false; >> + } >> + } >> >> - if let Some(true) = job.remove_vanished { >> - if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { >> - return false; >> + // same permission as changing ownership after syncing >> + if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { >> + return false; >> + } >> + >> + if let Some(remote) = &job.remote { >> + let remote_privs = >> + user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); >> + return remote_privs & PRIV_REMOTE_READ != 0; >> + } >> + true >> } >> - } >> + SyncDirection::Push => { >> + // Remote must always be present for sync in push direction, fail otherwise >> + let target_privs = if let Some(target_acl_path) = job.remote_acl_path() { >> + user_info.lookup_privs(auth_id, &target_acl_path) >> + } else { >> + return false; >> + }; >> + >> + // check user is allowed to create backups on remote datastore >> + if target_privs & PRIV_REMOTE_DATASTORE_BACKUP == 0 { >> + return false; >> + } >> >> - // same permission as changing ownership after syncing >> - if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { >> - return false; >> - } >> + if let Some(true) = job.remove_vanished { >> + // check user is allowed to prune backup snapshots on remote datastore >> + if target_privs & PRIV_REMOTE_DATASTORE_PRUNE == 0 { >> + return false; >> + } >> + } >> + >> + // check user is not the owner of the sync job, but has remote datastore modify permissions >> + if !is_correct_owner(auth_id, job) && target_privs & PRIV_REMOTE_DATASTORE_MODIFY == 0 { >> + return false; >> + } > > isn't this wrong? if I am modifying/running a sync job "owned" by somebody > else, then I need to have Datastore.Read or Datastore.Modify on the *local* > source datastore+namespace.. else I could use such a sync job to exfiltrate > backups I wouldn't otherwise have access to.. > >> + >> + // check user is allowed to read from (local) source datastore/namespace >> + let source_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >> + if source_privs & PRIV_DATASTORE_AUDIT == 0 { >> + return false; >> + } >> >> - if let Some(remote) = &job.remote { >> - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); >> - return remote_privs & PRIV_REMOTE_READ != 0; >> + // check for either datastore read or datastore backup access >> + // (the later implying read access for owned snapshot groups) >> + if source_privs & PRIV_DATASTORE_READ != 0 { >> + return true; >> + } >> + source_privs & PRIV_DATASTORE_BACKUP != 0 >> + } >> } >> - true >> } >> >> #[api( >> input: { >> - properties: {}, >> + properties: { >> + "sync-direction": { >> + type: SyncDirection, >> + optional: true, >> + }, >> + }, >> }, >> returns: { >> description: "List configured jobs.", >> @@ -92,6 +163,7 @@ pub fn check_sync_job_modify_access( >> /// List all sync jobs >> pub fn list_sync_jobs( >> _param: Value, >> + sync_direction: Option, >> rpcenv: &mut dyn RpcEnvironment, >> ) -> Result, Error> { >> let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; >> @@ -99,13 +171,16 @@ pub fn list_sync_jobs( >> >> let (config, digest) = sync::config()?; >> >> - let list = config.convert_to_typed_array("sync")?; >> + let sync_direction = sync_direction.unwrap_or_default(); >> + let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?; >> >> rpcenv["digest"] = hex::encode(digest).into(); >> >> let list = list >> .into_iter() >> - .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job)) >> + .filter(|sync_job| { >> + check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction) >> + }) >> .collect(); >> Ok(list) >> } >> @@ -118,6 +193,10 @@ pub fn list_sync_jobs( >> type: SyncJobConfig, >> flatten: true, >> }, >> + "sync-direction": { >> + type: SyncDirection, >> + optional: true, >> + }, >> }, >> }, >> access: { >> @@ -128,14 +207,16 @@ pub fn list_sync_jobs( >> /// Create a new sync job. >> pub fn create_sync_job( >> config: SyncJobConfig, >> + sync_direction: Option, >> rpcenv: &mut dyn RpcEnvironment, >> ) -> Result<(), Error> { >> let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; >> let user_info = CachedUserInfo::new()?; >> + let sync_direction = sync_direction.unwrap_or_default(); >> >> let _lock = sync::lock_config()?; >> >> - if !check_sync_job_modify_access(&user_info, &auth_id, &config) { >> + if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) { >> bail!("permission check failed"); >> } >> >> @@ -158,7 +239,7 @@ pub fn create_sync_job( >> param_bail!("id", "job '{}' already exists.", config.id); >> } >> >> - section_config.set_data(&config.id, "sync", &config)?; >> + section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?; >> >> sync::save_config(§ion_config)?; >> >> @@ -188,8 +269,17 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result> >> let (config, digest) = sync::config()?; >> >> - let sync_job = config.lookup("sync", &id)?; >> - if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) { >> + let (sync_job, sync_direction) = >> + if let Some((config_type, config_section)) = config.sections.get(&id) { >> + ( >> + SyncJobConfig::deserialize(config_section)?, >> + SyncDirection::from_config_type_str(config_type)?, >> + ) >> + } else { >> + http_bail!(NOT_FOUND, "job '{id}' does not exist.") >> + }; >> + >> + if !check_sync_job_read_access(&user_info, &auth_id, &sync_job, sync_direction) { >> bail!("permission check failed"); >> } >> >> @@ -284,7 +374,15 @@ pub fn update_sync_job( >> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; >> } >> >> - let mut data: SyncJobConfig = config.lookup("sync", &id)?; >> + let (mut data, sync_direction) = >> + if let Some((config_type, config_section)) = config.sections.get(&id) { >> + ( >> + SyncJobConfig::deserialize(config_section)?, >> + SyncDirection::from_config_type_str(config_type)?, >> + ) >> + } else { >> + http_bail!(NOT_FOUND, "job '{id}' does not exist.") >> + }; >> >> if let Some(delete) = delete { >> for delete_prop in delete { >> @@ -405,11 +503,11 @@ pub fn update_sync_job( >> } >> } >> >> - if !check_sync_job_modify_access(&user_info, &auth_id, &data) { >> + if !check_sync_job_modify_access(&user_info, &auth_id, &data, sync_direction) { >> bail!("permission check failed"); >> } >> >> - config.set_data(&id, "sync", &data)?; >> + config.set_data(&id, sync_direction.as_config_type_str(), &data)?; >> >> sync::save_config(&config)?; >> >> @@ -456,17 +554,16 @@ pub fn delete_sync_job( >> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; >> } >> >> - match config.lookup("sync", &id) { >> - Ok(job) => { >> - if !check_sync_job_modify_access(&user_info, &auth_id, &job) { >> - bail!("permission check failed"); >> - } >> - config.sections.remove(&id); >> - } >> - Err(_) => { >> - http_bail!(NOT_FOUND, "job '{}' does not exist.", id) >> + if let Some((config_type, config_section)) = config.sections.get(&id) { >> + let sync_direction = SyncDirection::from_config_type_str(config_type)?; >> + let job = SyncJobConfig::deserialize(config_section)?; >> + if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) { >> + bail!("permission check failed"); >> } >> - }; >> + config.sections.remove(&id); >> + } else { >> + http_bail!(NOT_FOUND, "job '{}' does not exist.", id) >> + } >> >> sync::save_config(&config)?; >> >> @@ -536,39 +633,67 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> }; >> >> // should work without ACLs >> - assert!(check_sync_job_read_access(&user_info, root_auth_id, &job)); >> - assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job)); >> + assert!(check_sync_job_read_access( >> + &user_info, >> + root_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> + assert!(check_sync_job_modify_access( >> + &user_info, >> + root_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // user without permissions must fail >> assert!(!check_sync_job_read_access( >> &user_info, >> &no_perm_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &no_perm_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // reading without proper read permissions on either remote or local must fail >> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(!check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // reading without proper read permissions on local end must fail >> job.remote = Some("remote1".to_string()); >> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(!check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // reading without proper read permissions on remote end must fail >> job.remote = Some("remote0".to_string()); >> job.store = "localstore1".to_string(); >> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(!check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> >> // writing without proper write permissions on either end must fail >> job.store = "localstore0".to_string(); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // writing without proper write permissions on local end must fail >> @@ -580,39 +705,54 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // reset remote to one where users have access >> job.remote = Some("remote1".to_string()); >> >> // user with read permission can only read, but not modify/run >> - assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job)); >> + assert!(check_sync_job_read_access( >> + &user_info, >> + &read_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> job.owner = Some(read_auth_id.clone()); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &read_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> job.owner = None; >> assert!(!check_sync_job_modify_access( >> &user_info, >> &read_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> job.owner = Some(write_auth_id.clone()); >> assert!(!check_sync_job_modify_access( >> &user_info, >> &read_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // user with simple write permission can modify/run >> - assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job)); >> + assert!(check_sync_job_read_access( >> + &user_info, >> + &write_auth_id, >> + &job, >> + SyncDirection::Pull, >> + )); >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // but can't modify/run with deletion >> @@ -620,7 +760,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // unless they have Datastore.Prune as well >> @@ -628,7 +769,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // changing owner is not possible >> @@ -636,7 +778,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // also not to the default 'root at pam' >> @@ -644,7 +787,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(!check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> // unless they have Datastore.Modify as well >> @@ -653,13 +797,15 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> job.owner = None; >> assert!(check_sync_job_modify_access( >> &user_info, >> &write_auth_id, >> - &job >> + &job, >> + SyncDirection::Pull, >> )); >> >> Ok(()) >> diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs >> index 6f19a3fbd..70283510d 100644 >> --- a/src/bin/proxmox-backup-proxy.rs >> +++ b/src/bin/proxmox-backup-proxy.rs >> @@ -589,7 +589,14 @@ async fn schedule_datastore_sync_jobs() { >> Ok((config, _digest)) => config, >> }; >> >> - for (job_id, (_, job_config)) in config.sections { >> + for (job_id, (job_type, job_config)) in config.sections { >> + let sync_direction = match SyncDirection::from_config_type_str(&job_type) { >> + Ok(direction) => direction, >> + Err(err) => { >> + eprintln!("unexpected config type in sync job config - {err}"); >> + continue; >> + } >> + }; >> let job_config: SyncJobConfig = match serde_json::from_value(job_config) { >> Ok(c) => c, >> Err(err) => { >> @@ -616,7 +623,7 @@ async fn schedule_datastore_sync_jobs() { >> job_config, >> &auth_id, >> Some(event_str), >> - SyncDirection::Pull, >> + sync_direction, >> false, >> ) { >> eprintln!("unable to start datastore sync job {job_id} - {err}"); >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> From c.ebner at proxmox.com Thu Nov 7 10:27:49 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 7 Nov 2024 10:27:49 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 13/29] fix #3044: server: implement push support for sync operations In-Reply-To: <173089426545.79072.10424056569024402158@yuna.proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-14-c.ebner@proxmox.com> <173089426545.79072.10424056569024402158@yuna.proxmox.com> Message-ID: Acked to the suggestions (not going to address them individually, as there is a lot). Did already adapt most of the suggestions, especially regarding the namespace mapping and critical issues such as the missing target namespace filtering in the namespace's remove vanished code path. Also, as discussed offlist, the list_namespace() method in the SyncSource trait should be extended to allow for filtering, so that a sync job might not leak namespace names if not having the required permissions on that namespace. From f.gruenbichler at proxmox.com Thu Nov 7 10:40:10 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 07 Nov 2024 10:40:10 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 19/29] api: sync jobs: expose optional `sync-direction` parameter In-Reply-To: References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-20-c.ebner@proxmox.com> <173090643519.79072.2923413753129715762@yuna.proxmox.com> Message-ID: <1730971860.alw3ph9r8x.astroid@yuna.none> On November 7, 2024 10:10 am, Christian Ebner wrote: > On 11/6/24 16:20, Fabian Gr?nbichler wrote: >> Quoting Christian Ebner (2024-10-31 13:15:09) >>> @@ -20,18 +22,35 @@ pub fn check_sync_job_read_access( >>> user_info: &CachedUserInfo, >>> auth_id: &Authid, >>> job: &SyncJobConfig, >>> + sync_direction: SyncDirection, >>> ) -> bool { >>> + // check for audit access on datastore/namespace, applies for pull and push direction >>> let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >>> if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { >>> return false; >>> } >>> >>> - if let Some(remote) = &job.remote { >>> - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); >>> - remote_privs & PRIV_REMOTE_AUDIT != 0 >>> - } else { >>> - let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); >>> - source_ds_privs & PRIV_DATASTORE_AUDIT != 0 >>> + match sync_direction { >>> + SyncDirection::Pull => { >>> + if let Some(remote) = &job.remote { >>> + let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); >>> + remote_privs & PRIV_REMOTE_AUDIT != 0 >>> + } else { >>> + let source_ds_privs = >>> + user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); >>> + source_ds_privs & PRIV_DATASTORE_AUDIT != 0 >>> + } >>> + } >>> + SyncDirection::Push => { >>> + // check for audit access on remote/datastore/namespace >>> + if let Some(target_acl_path) = job.remote_acl_path() { >>> + let remote_privs = user_info.lookup_privs(auth_id, &target_acl_path); >>> + remote_privs & PRIV_REMOTE_AUDIT != 0 >> >> the other two checks above check the source side, this checks the the target >> side.. should we check both here? > > Well, AUDIT access on the source for push is already checked by the > common check outside the match statement, so not sure what further to > check here? > > The common part check for (so soruce in case of push, target incase of > pull) and the match arms check target in case of push and source in case > of pull. right, sorry for the noise! > >> >>> + } else { >>> + // Remote must always be present for sync in push direction, fail otherwise >>> + false >>> + } >>> + } >>> } >>> } >>> >>> @@ -43,41 +62,93 @@ fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool { >>> } >>> } >>> >>> -/// checks whether user can run the corresponding pull job >>> +/// checks whether user can run the corresponding sync job, depending on sync direction >>> /// >>> -/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly. >>> +/// namespace creation/deletion ACL and backup group ownership checks happen in the pull/push code >>> +/// directly. >>> /// remote side checks/filters remote datastore/namespace/group access. >>> pub fn check_sync_job_modify_access( >>> user_info: &CachedUserInfo, >>> auth_id: &Authid, >>> job: &SyncJobConfig, >>> + sync_direction: SyncDirection, >>> ) -> bool { >>> - let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >>> - if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { >>> - return false; >>> - } >>> + match sync_direction { >>> + SyncDirection::Pull => { >>> + let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >>> + if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 >>> + || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 >>> + { >>> + return false; >>> + } >>> + >>> + if let Some(true) = job.remove_vanished { >>> + if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { >>> + return false; >>> + } >>> + } >>> >>> - if let Some(true) = job.remove_vanished { >>> - if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { >>> - return false; >>> + // same permission as changing ownership after syncing >>> + if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { >>> + return false; >>> + } >>> + >>> + if let Some(remote) = &job.remote { >>> + let remote_privs = >>> + user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); >>> + return remote_privs & PRIV_REMOTE_READ != 0; >>> + } >>> + true >>> } >>> - } >>> + SyncDirection::Push => { >>> + // Remote must always be present for sync in push direction, fail otherwise >>> + let target_privs = if let Some(target_acl_path) = job.remote_acl_path() { >>> + user_info.lookup_privs(auth_id, &target_acl_path) >>> + } else { >>> + return false; >>> + }; >>> + >>> + // check user is allowed to create backups on remote datastore >>> + if target_privs & PRIV_REMOTE_DATASTORE_BACKUP == 0 { >>> + return false; >>> + } >>> >>> - // same permission as changing ownership after syncing >>> - if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { >>> - return false; >>> - } >>> + if let Some(true) = job.remove_vanished { >>> + // check user is allowed to prune backup snapshots on remote datastore >>> + if target_privs & PRIV_REMOTE_DATASTORE_PRUNE == 0 { >>> + return false; >>> + } >>> + } >>> + >>> + // check user is not the owner of the sync job, but has remote datastore modify permissions >>> + if !is_correct_owner(auth_id, job) && target_privs & PRIV_REMOTE_DATASTORE_MODIFY == 0 { >>> + return false; >>> + } >> >> isn't this wrong? if I am modifying/running a sync job "owned" by somebody >> else, then I need to have Datastore.Read or Datastore.Modify on the *local* >> source datastore+namespace.. else I could use such a sync job to exfiltrate >> backups I wouldn't otherwise have access to.. > > But that is checked right below? The Remote datastore modify check here > just allows to execute the job if either the user owns the job, or it > has the privs to do so. At least that was my intention. but the local user affects the source side, not the target side.. if user A can setup a sync job "owned" by user B, they get effective access to all backups of user B.. so they need to have the privs to access those backups: - Datastore.Read on the *source* datastore+namespace (as that allows reading non-owned backups) OR - Datastore.Modify on the *source* datastore+namespace (as that allows changing ownership of backup groups, which would allow transfering ownership to user A and reading the backups that way) whether the user is allowed to create/remove namespaces on the target (what is currently checked) is not related at all to this decision.. that only comes into play at job execution time when those actions are taken. the checks below just ensure that the user can see the datastore+namespace, and can read all or owned backups (but that is irrespective of whether they own the sync job themselves, or not). there is some overlap here: - if the user has Datastore.Read, that covers both foreign and own jobs - if the user just has Datastore.Backup, they also need Datastore.Modify if the job is owned by a different user, but they don't need it for their own job.. I hope this makes it more clear what I meant :) >> >>> + >>> check user is allowed to read from (local) source datastore/namespace >>> + let source_privs = user_info.lookup_privs(auth_id, &job.acl_path()); >>> + if source_privs & PRIV_DATASTORE_AUDIT == 0 { >>> + return false; >>> + } >>> >>> - if let Some(remote) = &job.remote { >>> - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); >>> - return remote_privs & PRIV_REMOTE_READ != 0; >>> + // check for either datastore read or datastore backup access >>> + // (the later implying read access for owned snapshot groups) >>> + if source_privs & PRIV_DATASTORE_READ != 0 { >>> + return true; >>> + } >>> + source_privs & PRIV_DATASTORE_BACKUP != 0 >>> + } >>> } >>> - true >>> } >>> >>> #[api( >>> input: { >>> - properties: {}, >>> + properties: { >>> + "sync-direction": { >>> + type: SyncDirection, >>> + optional: true, >>> + }, >>> + }, >>> }, >>> returns: { >>> description: "List configured jobs.", >>> @@ -92,6 +163,7 @@ pub fn check_sync_job_modify_access( >>> /// List all sync jobs >>> pub fn list_sync_jobs( >>> _param: Value, >>> + sync_direction: Option, >>> rpcenv: &mut dyn RpcEnvironment, >>> ) -> Result, Error> { >>> let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; >>> @@ -99,13 +171,16 @@ pub fn list_sync_jobs( >>> >>> let (config, digest) = sync::config()?; >>> >>> - let list = config.convert_to_typed_array("sync")?; >>> + let sync_direction = sync_direction.unwrap_or_default(); >>> + let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?; >>> >>> rpcenv["digest"] = hex::encode(digest).into(); >>> >>> let list = list >>> .into_iter() >>> - .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job)) >>> + .filter(|sync_job| { >>> + check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction) >>> + }) >>> .collect(); >>> Ok(list) >>> } >>> @@ -118,6 +193,10 @@ pub fn list_sync_jobs( >>> type: SyncJobConfig, >>> flatten: true, >>> }, >>> + "sync-direction": { >>> + type: SyncDirection, >>> + optional: true, >>> + }, >>> }, >>> }, >>> access: { >>> @@ -128,14 +207,16 @@ pub fn list_sync_jobs( >>> /// Create a new sync job. >>> pub fn create_sync_job( >>> config: SyncJobConfig, >>> + sync_direction: Option, >>> rpcenv: &mut dyn RpcEnvironment, >>> ) -> Result<(), Error> { >>> let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; >>> let user_info = CachedUserInfo::new()?; >>> + let sync_direction = sync_direction.unwrap_or_default(); >>> >>> let _lock = sync::lock_config()?; >>> >>> - if !check_sync_job_modify_access(&user_info, &auth_id, &config) { >>> + if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) { >>> bail!("permission check failed"); >>> } >>> >>> @@ -158,7 +239,7 @@ pub fn create_sync_job( >>> param_bail!("id", "job '{}' already exists.", config.id); >>> } >>> >>> - section_config.set_data(&config.id, "sync", &config)?; >>> + section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?; >>> >>> sync::save_config(§ion_config)?; >>> >>> @@ -188,8 +269,17 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result>> >>> let (config, digest) = sync::config()?; >>> >>> - let sync_job = config.lookup("sync", &id)?; >>> - if !check_sync_job_read_access(&user_info, &auth_id, &sync_job) { >>> + let (sync_job, sync_direction) = >>> + if let Some((config_type, config_section)) = config.sections.get(&id) { >>> + ( >>> + SyncJobConfig::deserialize(config_section)?, >>> + SyncDirection::from_config_type_str(config_type)?, >>> + ) >>> + } else { >>> + http_bail!(NOT_FOUND, "job '{id}' does not exist.") >>> + }; >>> + >>> + if !check_sync_job_read_access(&user_info, &auth_id, &sync_job, sync_direction) { >>> bail!("permission check failed"); >>> } >>> >>> @@ -284,7 +374,15 @@ pub fn update_sync_job( >>> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; >>> } >>> >>> - let mut data: SyncJobConfig = config.lookup("sync", &id)?; >>> + let (mut data, sync_direction) = >>> + if let Some((config_type, config_section)) = config.sections.get(&id) { >>> + ( >>> + SyncJobConfig::deserialize(config_section)?, >>> + SyncDirection::from_config_type_str(config_type)?, >>> + ) >>> + } else { >>> + http_bail!(NOT_FOUND, "job '{id}' does not exist.") >>> + }; >>> >>> if let Some(delete) = delete { >>> for delete_prop in delete { >>> @@ -405,11 +503,11 @@ pub fn update_sync_job( >>> } >>> } >>> >>> - if !check_sync_job_modify_access(&user_info, &auth_id, &data) { >>> + if !check_sync_job_modify_access(&user_info, &auth_id, &data, sync_direction) { >>> bail!("permission check failed"); >>> } >>> >>> - config.set_data(&id, "sync", &data)?; >>> + config.set_data(&id, sync_direction.as_config_type_str(), &data)?; >>> >>> sync::save_config(&config)?; >>> >>> @@ -456,17 +554,16 @@ pub fn delete_sync_job( >>> crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; >>> } >>> >>> - match config.lookup("sync", &id) { >>> - Ok(job) => { >>> - if !check_sync_job_modify_access(&user_info, &auth_id, &job) { >>> - bail!("permission check failed"); >>> - } >>> - config.sections.remove(&id); >>> - } >>> - Err(_) => { >>> - http_bail!(NOT_FOUND, "job '{}' does not exist.", id) >>> + if let Some((config_type, config_section)) = config.sections.get(&id) { >>> + let sync_direction = SyncDirection::from_config_type_str(config_type)?; >>> + let job = SyncJobConfig::deserialize(config_section)?; >>> + if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) { >>> + bail!("permission check failed"); >>> } >>> - }; >>> + config.sections.remove(&id); >>> + } else { >>> + http_bail!(NOT_FOUND, "job '{}' does not exist.", id) >>> + } >>> >>> sync::save_config(&config)?; >>> >>> @@ -536,39 +633,67 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >>> }; >>> >>> // should work without ACLs >>> - assert!(check_sync_job_read_access(&user_info, root_auth_id, &job)); >>> - assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job)); >>> + assert!(check_sync_job_read_access( >>> + &user_info, >>> + root_auth_id, >>> + &job, >>> + SyncDirection::Pull, >>> + )); >>> + assert!(check_sync_job_modify_access( >>> + &user_info, >>> + root_auth_id, >>> + &job, >>> + SyncDirection::Pull, >>> + )); >>> >>> // user without permissions must fail >>> assert!(!check_sync_job_read_access( >>> &user_info, >>> &no_perm_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &no_perm_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // reading without proper read permissions on either remote or local must fail >>> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >>> + assert!(!check_sync_job_read_access( >>> + &user_info, >>> + &read_auth_id, >>> + &job, >>> + SyncDirection::Pull, >>> + )); >>> >>> // reading without proper read permissions on local end must fail >>> job.remote = Some("remote1".to_string()); >>> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >>> + assert!(!check_sync_job_read_access( >>> + &user_info, >>> + &read_auth_id, >>> + &job, >>> + SyncDirection::Pull, >>> + )); >>> >>> // reading without proper read permissions on remote end must fail >>> job.remote = Some("remote0".to_string()); >>> job.store = "localstore1".to_string(); >>> - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); >>> + assert!(!check_sync_job_read_access( >>> + &user_info, >>> + &read_auth_id, >>> + &job, >>> + SyncDirection::Pull, >>> + )); >>> >>> // writing without proper write permissions on either end must fail >>> job.store = "localstore0".to_string(); >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // writing without proper write permissions on local end must fail >>> @@ -580,39 +705,54 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // reset remote to one where users have access >>> job.remote = Some("remote1".to_string()); >>> >>> // user with read permission can only read, but not modify/run >>> - assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job)); >>> + assert!(check_sync_job_read_access( >>> + &user_info, >>> + &read_auth_id, >>> + &job, >>> + SyncDirection::Pull, >>> + )); >>> job.owner = Some(read_auth_id.clone()); >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &read_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> job.owner = None; >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &read_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> job.owner = Some(write_auth_id.clone()); >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &read_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // user with simple write permission can modify/run >>> - assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job)); >>> + assert!(check_sync_job_read_access( >>> + &user_info, >>> + &write_auth_id, >>> + &job, >>> + SyncDirection::Pull, >>> + )); >>> assert!(check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // but can't modify/run with deletion >>> @@ -620,7 +760,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // unless they have Datastore.Prune as well >>> @@ -628,7 +769,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >>> assert!(check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // changing owner is not possible >>> @@ -636,7 +778,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // also not to the default 'root at pam' >>> @@ -644,7 +787,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >>> assert!(!check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> // unless they have Datastore.Modify as well >>> @@ -653,13 +797,15 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator >>> assert!(check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> job.owner = None; >>> assert!(check_sync_job_modify_access( >>> &user_info, >>> &write_auth_id, >>> - &job >>> + &job, >>> + SyncDirection::Pull, >>> )); >>> >>> Ok(()) >>> diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs >>> index 6f19a3fbd..70283510d 100644 >>> --- a/src/bin/proxmox-backup-proxy.rs >>> +++ b/src/bin/proxmox-backup-proxy.rs >>> @@ -589,7 +589,14 @@ async fn schedule_datastore_sync_jobs() { >>> Ok((config, _digest)) => config, >>> }; >>> >>> - for (job_id, (_, job_config)) in config.sections { >>> + for (job_id, (job_type, job_config)) in config.sections { >>> + let sync_direction = match SyncDirection::from_config_type_str(&job_type) { >>> + Ok(direction) => direction, >>> + Err(err) => { >>> + eprintln!("unexpected config type in sync job config - {err}"); >>> + continue; >>> + } >>> + }; >>> let job_config: SyncJobConfig = match serde_json::from_value(job_config) { >>> Ok(c) => c, >>> Err(err) => { >>> @@ -616,7 +623,7 @@ async fn schedule_datastore_sync_jobs() { >>> job_config, >>> &auth_id, >>> Some(event_str), >>> - SyncDirection::Pull, >>> + sync_direction, >>> false, >>> ) { >>> eprintln!("unable to start datastore sync job {job_id} - {err}"); >>> -- >>> 2.39.5 >>> >>> >>> >>> _______________________________________________ >>> pbs-devel mailing list >>> pbs-devel at lists.proxmox.com >>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >>> >>> > > From c.heiss at proxmox.com Fri Nov 8 10:34:15 2024 From: c.heiss at proxmox.com (Christoph Heiss) Date: Fri, 8 Nov 2024 10:34:15 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] d/control: bump proxmox-subscription to 0.5 Message-ID: <20241108093421.123921-1-c.heiss@proxmox.com> Seems this was forgotten while bumping it in Cargo.toml in dcd863e0. Fixes: dcd863e0 ("bump proxmox-subscription to 0.5.0") Signed-off-by: Christoph Heiss --- debian/control | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/control b/debian/control index 7b8a048f..484941e4 100644 --- a/debian/control +++ b/debian/control @@ -92,8 +92,8 @@ Build-Depends: bash-completion, librust-proxmox-shared-cache-0.1+default-dev, librust-proxmox-shared-memory-0.3+default-dev, librust-proxmox-sortable-macro-0.1+default-dev (>= 0.1.2-~~), - librust-proxmox-subscription-0.4+api-types-dev (>= 0.4.2-~~), - librust-proxmox-subscription-0.4+default-dev (>= 0.4.2-~~), + librust-proxmox-subscription-0.5+api-types-dev, + librust-proxmox-subscription-0.5+default-dev, librust-proxmox-sys-0.6+acl-dev, librust-proxmox-sys-0.6+crypt-dev, librust-proxmox-sys-0.6+default-dev, -- 2.47.0 From dietmar at proxmox.com Fri Nov 8 11:12:38 2024 From: dietmar at proxmox.com (Dietmar Maurer) Date: Fri, 8 Nov 2024 11:12:38 +0100 (CET) Subject: [pbs-devel] applied: [PATCH proxmox-backup] d/control: bump proxmox-subscription to 0.5 In-Reply-To: <20241108093421.123921-1-c.heiss@proxmox.com> References: <20241108093421.123921-1-c.heiss@proxmox.com> Message-ID: <1457768276.330.1731060758279@webmail.proxmox.com> applied, thanks! From w.bumiller at proxmox.com Fri Nov 8 12:12:03 2024 From: w.bumiller at proxmox.com (Wolfgang Bumiller) Date: Fri, 8 Nov 2024 12:12:03 +0100 Subject: [pbs-devel] applied: [PATCH v3] rest-server: check permissions on proxy.key and proxy.pem files In-Reply-To: <20240829130722.300260-1-g.goller@proxmox.com> References: <20240829130722.300260-1-g.goller@proxmox.com> Message-ID: applied, thanks From l.wagner at proxmox.com Fri Nov 8 15:41:10 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:10 +0100 Subject: [pbs-devel] [PATCH many v3 00/14] notifications: add support for webhook endpoints Message-ID: <20241108144124.273550-1-l.wagner@proxmox.com> This series adds support for webhook notification targets to PVE and PBS. A webhook is a HTTP API route provided by a third-party service that can be used to inform the third-party about an event. In our case, we can easily interact with various third-party notification/messaging systems and send PVE/PBS notifications via this service. The changes were tested against ntfy.sh, Discord and Slack. The configuration of webhook targets allows one to configure: - The URL - The HTTP method (GET/POST/PUT) - HTTP Headers - Body One can use handlebar templating to inject notification text and metadata in the url, headers and body. One challenge is the handling of sensitve tokens and other secrets. Since the endpoint is completely generic, we cannot know in advance whether the body/header/url contains sensitive values. Thus we add 'secrets' which are stored in the protected config only accessible by root (e.g. /etc/pve/priv/notifications.cfg). These secrets are accessible in URLs/headers/body via templating: Url: https://example.com/{{ secrets.token }} Secrets can only be set and updated, but never retrieved via the API. In the UI, secrets are handled like other secret tokens/passwords. Bumps for PVE: - libpve-rs-perl needs proxmox-notify bumped - pve-manager needs proxmox-widget-toolkit and libpve-rs-perl bumped - proxmox-mail-forward needs proxmox-notify bumped Bumps for PBS: - proxmox-backup needs proxmox-notify bumped - proxmox-mail-forward needs proxmox-notify bumped Changes v1 -> v2: - Rebase proxmox-notify changes Changes v2 -> v3: - Fix utf8 -> base64 encoding bug (thx @ Stefan) - Fix bug that allowed one to save a target with an empty header value when updating the target - Additional UI-side input validation (e.g. target name, URL) - Code documentation improvments - Mask secrets in errors returned from the proxmox-notify crate, hopefully preventing them to be shown in logs or error messages - Rebased on the latest master branches proxmox: Lukas Wagner (3): notify: renderer: adapt to changes in proxmox-time notify: implement webhook targets notify: add api for webhook targets proxmox-notify/Cargo.toml | 9 +- proxmox-notify/src/api/mod.rs | 20 + proxmox-notify/src/api/webhook.rs | 432 +++++++++++++++++++ proxmox-notify/src/config.rs | 23 + proxmox-notify/src/endpoints/mod.rs | 2 + proxmox-notify/src/endpoints/webhook.rs | 550 ++++++++++++++++++++++++ proxmox-notify/src/lib.rs | 17 + proxmox-notify/src/renderer/mod.rs | 4 +- 8 files changed, 1052 insertions(+), 5 deletions(-) create mode 100644 proxmox-notify/src/api/webhook.rs create mode 100644 proxmox-notify/src/endpoints/webhook.rs proxmox-perl-rs: Lukas Wagner (2): common: notify: add bindings for webhook API routes common: notify: add bindings for get_targets common/src/notify.rs | 72 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) proxmox-widget-toolkit: Gabriel Goller (1): utils: add base64 conversion helper Lukas Wagner (1): notification: add UI for adding/updating webhook targets src/Makefile | 1 + src/Schema.js | 5 + src/Utils.js | 38 +++ src/panel/WebhookEditPanel.js | 424 ++++++++++++++++++++++++++++++++++ 4 files changed, 468 insertions(+) create mode 100644 src/panel/WebhookEditPanel.js pve-manager: Lukas Wagner (2): api: notifications: use get_targets impl from proxmox-notify api: add routes for webhook notification endpoints PVE/API2/Cluster/Notifications.pm | 297 ++++++++++++++++++++++++++---- 1 file changed, 263 insertions(+), 34 deletions(-) pve-docs: Lukas Wagner (1): notification: add documentation for webhook target endpoints. notifications.adoc | 93 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) proxmox-backup: Lukas Wagner (4): api: notification: add API routes for webhook targets management cli: add CLI for webhook targets ui: utils: enable webhook edit window docs: notification: add webhook endpoint documentation docs/notifications.rst | 100 ++++++++++ src/api2/config/notifications/mod.rs | 2 + src/api2/config/notifications/webhook.rs | 175 ++++++++++++++++++ .../notifications/mod.rs | 4 +- .../notifications/webhook.rs | 94 ++++++++++ www/Utils.js | 5 + 6 files changed, 379 insertions(+), 1 deletion(-) create mode 100644 src/api2/config/notifications/webhook.rs create mode 100644 src/bin/proxmox_backup_manager/notifications/webhook.rs Summary over all repositories: 21 files changed, 2327 insertions(+), 40 deletions(-) -- Generated by git-murpp 0.7.3 From l.wagner at proxmox.com Fri Nov 8 15:41:23 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:23 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 13/14] ui: utils: enable webhook edit window In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-14-l.wagner@proxmox.com> This allows users to add/edit new webhook targets. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- www/Utils.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/www/Utils.js b/www/Utils.js index 4853be36..b715972f 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -482,6 +482,11 @@ Ext.define('PBS.Utils', { ipanel: 'pmxGotifyEditPanel', iconCls: 'fa-bell-o', }, + webhook: { + name: 'Webhook', + ipanel: 'pmxWebhookEditPanel', + iconCls: 'fa-bell-o', + }, }; }, -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:20 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:20 +0100 Subject: [pbs-devel] [PATCH docs v3 10/14] notification: add documentation for webhook target endpoints. In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-11-l.wagner@proxmox.com> Signed-off-by: Lukas Wagner --- notifications.adoc | 93 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/notifications.adoc b/notifications.adoc index 2459095..b7470fe 100644 --- a/notifications.adoc +++ b/notifications.adoc @@ -178,6 +178,99 @@ gotify: example token somesecrettoken ---- +[[notification_targets_webhook]] +Webhook +~~~~~~~ + +Webhook notification targets perform HTTP requests to a configurable URL. + +The following configuration options are available: + +* `url`: The URL to which to perform the HTTP requests. +Supports templating to inject message contents, metadata and secrets. +* `method`: HTTP Method to use (POST/PUT/GET) +* `header`: Array of HTTP headers that should be set for the request. +Supports templating to inject message contents, metadata and secrets. +* `body`: HTTP body that should be sent. +Supports templating to inject message contents, metadata and secrets. +* `secret`: Array of secret key-value pairs. These will be stored in +a protected configuration file only readable by root. Secrets can be +accessed in body/header/URL templates via the `secrets` namespace. +* `comment`: Comment for this target. + +For configuration options that support templating, the +https://handlebarsjs.com/[Handlebars] syntax can be used to +access the following properties: + +* `{{ title }}`: The rendered notification title +* `{{ message }}`: The rendered notification body +* `{{ severity }}`: The severity of the notification (`info`, `notice`, +`warning`, `error`, `unknown`) +* `{{ timestamp }}`: The notification's timestamp as a UNIX epoch (in seconds). +* `{{ fields. }}`: Sub-namespace for any metadata fields of the notification. +For instance, `fields.type` contains the notification type - for all available fields refer +to xref:notification_events[Notification Events]. +* `{{ secrets. }}`: Sub-namespace for secrets. For instance, a secret named `token` +is accessible via `secrets.token`. + +For convenience, the following helpers are available: + +* `{{ url-encode }}`: URL-encode a property/literal. +* `{{ escape }}`: Escape any control characters that cannot be +safely represented as a JSON string. +* `{{ json }}`: Render a value as JSON. This can be useful to +pass a whole sub-namespace (e.g. `fields`) as a part of a JSON payload +(e.g. `{{ json fields }}`). + +==== Examples + +===== `ntfy.sh` + +* Method: `POST` +* URL: `https://ntfy.sh/{{ secrets.channel }}` +* Headers: +** `Markdown`: `Yes` +* Body: +---- +``` +{{ message }} +``` +---- +* Secrets: +** `channel`: `` + +===== Discord + +* Method: `POST` +* URL: `https://discord.com/api/webhooks/{{ secrets.token }}` +* Headers: +** `Content-Type`: `application/json` +* Body: +---- +{ + "content": "``` {{ escape message }}```" +} +---- +* Secrets: +** `token`: `` + +===== Slack + +* Method: `POST` +* URL: `https://hooks.slack.com/services/{{ secrets.token }}` +* Headers: +** `Content-Type`: `application/json` +* Body: +---- +{ + "text": "``` {{escape message}}```", + "type": "mrkdwn" +} +---- +* Secrets: +** `token`: `` + + [[notification_matchers]] Notification Matchers --------------------- -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:22 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:22 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 12/14] management cli: add CLI for webhook targets In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-13-l.wagner@proxmox.com> The code was copied and adapted from the gotify target CLI. Signed-off-by: Lukas Wagner --- .../notifications/mod.rs | 4 +- .../notifications/webhook.rs | 94 +++++++++++++++++++ 2 files changed, 97 insertions(+), 1 deletion(-) create mode 100644 src/bin/proxmox_backup_manager/notifications/webhook.rs diff --git a/src/bin/proxmox_backup_manager/notifications/mod.rs b/src/bin/proxmox_backup_manager/notifications/mod.rs index 678f9c54..9180a273 100644 --- a/src/bin/proxmox_backup_manager/notifications/mod.rs +++ b/src/bin/proxmox_backup_manager/notifications/mod.rs @@ -5,12 +5,14 @@ mod matchers; mod sendmail; mod smtp; mod targets; +mod webhook; pub fn notification_commands() -> CommandLineInterface { let endpoint_def = CliCommandMap::new() .insert("gotify", gotify::commands()) .insert("sendmail", sendmail::commands()) - .insert("smtp", smtp::commands()); + .insert("smtp", smtp::commands()) + .insert("webhook", webhook::commands()); let cmd_def = CliCommandMap::new() .insert("endpoint", endpoint_def) diff --git a/src/bin/proxmox_backup_manager/notifications/webhook.rs b/src/bin/proxmox_backup_manager/notifications/webhook.rs new file mode 100644 index 00000000..bd0ac41b --- /dev/null +++ b/src/bin/proxmox_backup_manager/notifications/webhook.rs @@ -0,0 +1,94 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; +use proxmox_schema::api; + +use proxmox_backup::api2; + +#[api( + input: { + properties: { + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// List all endpoints. +fn list_endpoints(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::webhook::API_METHOD_LIST_ENDPOINTS; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options() + .column(ColumnConfig::new("disable")) + .column(ColumnConfig::new("name")) + .column(ColumnConfig::new("method")) + .column(ColumnConfig::new("url")) + .column(ColumnConfig::new("comment")); + + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +#[api( + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Show a single endpoint. +fn show_endpoint(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let output_format = get_output_format(¶m); + + let info = &api2::config::notifications::webhook::API_METHOD_GET_ENDPOINT; + let mut data = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + let options = default_table_format_options(); + format_and_print_result_full(&mut data, &info.returns, &output_format, &options); + + Ok(Value::Null) +} + +pub fn commands() -> CommandLineInterface { + let cmd_def = CliCommandMap::new() + .insert("list", CliCommand::new(&API_METHOD_LIST_ENDPOINTS)) + .insert( + "show", + CliCommand::new(&API_METHOD_SHOW_ENDPOINT).arg_param(&["name"]), + ) + .insert( + "create", + CliCommand::new(&api2::config::notifications::webhook::API_METHOD_ADD_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "update", + CliCommand::new(&api2::config::notifications::webhook::API_METHOD_UPDATE_ENDPOINT) + .arg_param(&["name"]), + ) + .insert( + "delete", + CliCommand::new(&api2::config::notifications::webhook::API_METHOD_DELETE_ENDPOINT) + .arg_param(&["name"]), + ); + cmd_def.into() +} -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:24 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:24 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 14/14] docs: notification: add webhook endpoint documentation In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-15-l.wagner@proxmox.com> Same information as in pve-docs but translated to restructured text. Signed-off-by: Lukas Wagner --- docs/notifications.rst | 100 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) diff --git a/docs/notifications.rst b/docs/notifications.rst index 4ba8db86..d059fa76 100644 --- a/docs/notifications.rst +++ b/docs/notifications.rst @@ -85,6 +85,106 @@ integrate with different platforms and services. See :ref:`notifications.cfg` for all configuration options. +.. _notification_targets_webhook: +Webhook +^^^^^^^ +Webhook notification targets perform HTTP requests to a configurable URL. + +The following configuration options are available: + +* ``url``: The URL to which to perform the HTTP requests. + Supports templating to inject message contents, metadata and secrets. +* ``method``: HTTP Method to use (POST/PUT/GET) +* ``header``: Array of HTTP headers that should be set for the request. + Supports templating to inject message contents, metadata and secrets. +* ``body``: HTTP body that should be sent. + Supports templating to inject message contents, metadata and secrets. +* ``secret``: Array of secret key-value pairs. These will be stored in + a protected configuration file only readable by root. Secrets can be + accessed in body/header/URL templates via the ``secrets`` namespace. +* ``comment``: Comment for this target. + +For configuration options that support templating, the +`Handlebars `_ syntax can be used to +access the following properties: + +* ``{{ title }}``: The rendered notification title +* ``{{ message }}``: The rendered notification body +* ``{{ severity }}``: The severity of the notification (``info``, ``notice``, + ``warning``, ``error``, ``unknown``) +* ``{{ timestamp }}``: The notification's timestamp as a UNIX epoch (in seconds). +* ``{{ fields. }}``: Sub-namespace for any metadata fields of the + notification. For instance, ``fields.type`` contains the notification + type - for all available fields refer to :ref:`notification_events`. +* ``{{ secrets. }}``: Sub-namespace for secrets. For instance, a secret + named ``token`` is accessible via ``secrets.token``. + +For convenience, the following helpers are available: + +* ``{{ url-encode }}``: URL-encode a property/literal. +* ``{{ escape }}``: Escape any control characters that cannot + be safely represented as a JSON string. +* ``{{ json }}``: Render a value as JSON. This can be useful + to pass a whole sub-namespace (e.g. ``fields``) as a part of a JSON payload + (e.g. ``{{ json fields }}``). + +Example - ntfy.sh +""""""""""""""""" + +* Method: ``POST`` +* URL: ``https://ntfy.sh/{{ secrets.channel }}`` +* Headers: + + * ``Markdown``: ``Yes`` +* Body:: + + ``` + {{ message }} + ``` + +* Secrets: + + * ``channel``: ```` + +Example - Discord +""""""""""""""""" + +* Method: ``POST`` +* URL: ``https://discord.com/api/webhooks/{{ secrets.token }}`` +* Headers: + + * ``Content-Type``: ``application/json`` + +* Body:: + + { + "content": "``` {{ escape message }}```" + } + +* Secrets: + + * ``token``: ```` + +Example - Slack +""""""""""""""" + +* Method: ``POST`` +* URL: ``https://hooks.slack.com/services/{{ secrets.token }}`` +* Headers: + + * ``Content-Type``: ``application/json`` + +* Body:: + + { + "text": "``` {{escape message}}```", + "type": "mrkdwn" + } + +* Secrets: + + * ``token``: ```` + .. _notification_matchers: Notification Matchers -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:11 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:11 +0100 Subject: [pbs-devel] [PATCH proxmox v3 01/14] notify: renderer: adapt to changes in proxmox-time In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-2-l.wagner@proxmox.com> A recent commit [1] changed the `Display` implementation of `TimeSpan` such that minutes are now displayed as `20m` instead of `20min`. This commit adapts the tests for the notification template renderer accordingly. [1] 19129960 ("time: display minute/month such that it can be parsed again") Signed-off-by: Lukas Wagner --- proxmox-notify/src/renderer/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/proxmox-notify/src/renderer/mod.rs b/proxmox-notify/src/renderer/mod.rs index 8574a3fb..82473d03 100644 --- a/proxmox-notify/src/renderer/mod.rs +++ b/proxmox-notify/src/renderer/mod.rs @@ -329,8 +329,8 @@ mod tests { Some("1 KiB".to_string()) ); - assert_eq!(value_to_duration(&json!(60)), Some("1min ".to_string())); - assert_eq!(value_to_duration(&json!("60")), Some("1min ".to_string())); + assert_eq!(value_to_duration(&json!(60)), Some("1m".to_string())); + assert_eq!(value_to_duration(&json!("60")), Some("1m".to_string())); // The rendered value is in localtime, so we only check if the result is `Some`... // ... otherwise the test will break in another timezone :S -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:15 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:15 +0100 Subject: [pbs-devel] [PATCH proxmox-perl-rs v3 05/14] common: notify: add bindings for get_targets In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-6-l.wagner@proxmox.com> This allows us to drop the impl of that function on the perl side. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- common/src/notify.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/common/src/notify.rs b/common/src/notify.rs index fe192d5..0f8a35d 100644 --- a/common/src/notify.rs +++ b/common/src/notify.rs @@ -27,6 +27,7 @@ mod export { MatcherConfigUpdater, SeverityMatcher, }; use proxmox_notify::{api, Config, Notification, Severity}; + use proxmox_notify::api::Target; pub struct NotificationConfig { config: Mutex, @@ -112,6 +113,14 @@ mod export { api::common::send(&config, ¬ification) } + #[export(serialize_error)] + fn get_targets( + #[try_from_ref] this: &NotificationConfig, + ) -> Result, HttpError> { + let config = this.config.lock().unwrap(); + api::get_targets(&config) + } + #[export(serialize_error)] fn test_target( #[try_from_ref] this: &NotificationConfig, -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:14 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:14 +0100 Subject: [pbs-devel] [PATCH proxmox-perl-rs v3 04/14] common: notify: add bindings for webhook API routes In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-5-l.wagner@proxmox.com> Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- common/src/notify.rs | 63 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/common/src/notify.rs b/common/src/notify.rs index e1b006b..fe192d5 100644 --- a/common/src/notify.rs +++ b/common/src/notify.rs @@ -19,6 +19,9 @@ mod export { DeleteableSmtpProperty, SmtpConfig, SmtpConfigUpdater, SmtpMode, SmtpPrivateConfig, SmtpPrivateConfigUpdater, }; + use proxmox_notify::endpoints::webhook::{ + DeleteableWebhookProperty, WebhookConfig, WebhookConfigUpdater, + }; use proxmox_notify::matcher::{ CalendarMatcher, DeleteableMatcherProperty, FieldMatcher, MatchModeOperator, MatcherConfig, MatcherConfigUpdater, SeverityMatcher, @@ -393,6 +396,66 @@ mod export { api::smtp::delete_endpoint(&mut config, name) } + #[export(serialize_error)] + fn get_webhook_endpoints( + #[try_from_ref] this: &NotificationConfig, + ) -> Result, HttpError> { + let config = this.config.lock().unwrap(); + api::webhook::get_endpoints(&config) + } + + #[export(serialize_error)] + fn get_webhook_endpoint( + #[try_from_ref] this: &NotificationConfig, + id: &str, + ) -> Result { + let config = this.config.lock().unwrap(); + api::webhook::get_endpoint(&config, id) + } + + #[export(serialize_error)] + #[allow(clippy::too_many_arguments)] + fn add_webhook_endpoint( + #[try_from_ref] this: &NotificationConfig, + endpoint_config: WebhookConfig, + ) -> Result<(), HttpError> { + let mut config = this.config.lock().unwrap(); + api::webhook::add_endpoint( + &mut config, + endpoint_config, + ) + } + + #[export(serialize_error)] + #[allow(clippy::too_many_arguments)] + fn update_webhook_endpoint( + #[try_from_ref] this: &NotificationConfig, + name: &str, + config_updater: WebhookConfigUpdater, + delete: Option>, + digest: Option<&str>, + ) -> Result<(), HttpError> { + let mut config = this.config.lock().unwrap(); + let digest = decode_digest(digest)?; + + api::webhook::update_endpoint( + &mut config, + name, + config_updater, + delete.as_deref(), + digest.as_deref(), + ) + } + + #[export(serialize_error)] + fn delete_webhook_endpoint( + #[try_from_ref] this: &NotificationConfig, + name: &str, + ) -> Result<(), HttpError> { + let mut config = this.config.lock().unwrap(); + api::webhook::delete_endpoint(&mut config, name) + } + #[export(serialize_error)] fn get_matchers( #[try_from_ref] this: &NotificationConfig, -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:16 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:16 +0100 Subject: [pbs-devel] [PATCH widget-toolkit v3 06/14] utils: add base64 conversion helper In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-7-l.wagner@proxmox.com> From: Gabriel Goller Add helper functions to convert from a utf8 string to a base64 string and vice-versa. Using the TextEncoder/TextDecoder we can support unicode such as emojis as well [0]. [0]: https://developer.mozilla.org/en-US/docs/Glossary/Base64#the_unicode_problem Signed-off-by: Gabriel Goller Reviewed-by: Thomas Lamprecht --- src/Utils.js | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/src/Utils.js b/src/Utils.js index b68c0f4..4ff95af 100644 --- a/src/Utils.js +++ b/src/Utils.js @@ -1356,6 +1356,24 @@ utilities: { ); }, + // Convert utf-8 string to base64. + // This also escapes unicode characters such as emojis. + utf8ToBase64: function(string) { + let bytes = new TextEncoder().encode(string); + const escapedString = Array.from(bytes, (byte) => + String.fromCodePoint(byte), + ).join(""); + return btoa(escapedString); + }, + + // Converts a base64 string into a utf8 string. + // Decodes escaped unicode characters correctly. + base64ToUtf8: function(b64_string) { + let string = atob(b64_string); + let bytes = Uint8Array.from(string, (m) => m.codePointAt(0)); + return new TextDecoder().decode(bytes); + }, + stringToRGB: function(string) { let hash = 0; if (!string) { -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:18 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:18 +0100 Subject: [pbs-devel] [PATCH manager v3 08/14] api: notifications: use get_targets impl from proxmox-notify In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-9-l.wagner@proxmox.com> The get_targets API endpoint is now implemented in Rust. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- PVE/API2/Cluster/Notifications.pm | 34 +------------------------------ 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/PVE/API2/Cluster/Notifications.pm b/PVE/API2/Cluster/Notifications.pm index 2b202c28..8c9be1ed 100644 --- a/PVE/API2/Cluster/Notifications.pm +++ b/PVE/API2/Cluster/Notifications.pm @@ -309,39 +309,7 @@ __PACKAGE__->register_method ({ my $config = PVE::Notify::read_config(); my $targets = eval { - my $result = []; - - for my $target (@{$config->get_sendmail_endpoints()}) { - push @$result, { - name => $target->{name}, - comment => $target->{comment}, - type => 'sendmail', - disable => $target->{disable}, - origin => $target->{origin}, - }; - } - - for my $target (@{$config->get_gotify_endpoints()}) { - push @$result, { - name => $target->{name}, - comment => $target->{comment}, - type => 'gotify', - disable => $target->{disable}, - origin => $target->{origin}, - }; - } - - for my $target (@{$config->get_smtp_endpoints()}) { - push @$result, { - name => $target->{name}, - comment => $target->{comment}, - type => 'smtp', - disable => $target->{disable}, - origin => $target->{origin}, - }; - } - - $result + $config->get_targets(); }; raise_api_error($@) if $@; -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:21 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:21 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 11/14] api: notification: add API routes for webhook targets In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-12-l.wagner@proxmox.com> Copied and adapted from the Gotify ones. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- src/api2/config/notifications/mod.rs | 2 + src/api2/config/notifications/webhook.rs | 175 +++++++++++++++++++++++ 2 files changed, 177 insertions(+) create mode 100644 src/api2/config/notifications/webhook.rs diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs index dfe82ed0..81ca9800 100644 --- a/src/api2/config/notifications/mod.rs +++ b/src/api2/config/notifications/mod.rs @@ -22,6 +22,7 @@ pub mod matchers; pub mod sendmail; pub mod smtp; pub mod targets; +pub mod webhook; #[sortable] const SUBDIRS: SubdirMap = &sorted!([ @@ -41,6 +42,7 @@ const ENDPOINT_SUBDIRS: SubdirMap = &sorted!([ ("gotify", &gotify::ROUTER), ("sendmail", &sendmail::ROUTER), ("smtp", &smtp::ROUTER), + ("webhook", &webhook::ROUTER), ]); const ENDPOINT_ROUTER: Router = Router::new() diff --git a/src/api2/config/notifications/webhook.rs b/src/api2/config/notifications/webhook.rs new file mode 100644 index 00000000..4a040024 --- /dev/null +++ b/src/api2/config/notifications/webhook.rs @@ -0,0 +1,175 @@ +use anyhow::Error; +use serde_json::Value; + +use proxmox_notify::endpoints::webhook::{ + DeleteableWebhookProperty, WebhookConfig, WebhookConfigUpdater, +}; +use proxmox_notify::schema::ENTITY_NAME_SCHEMA; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_api_types::{PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA}; + +#[api( + protected: true, + input: { + properties: {}, + }, + returns: { + description: "List of webhook endpoints.", + type: Array, + items: { type: WebhookConfig }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// List all webhook endpoints. +pub fn list_endpoints( + _param: Value, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result, Error> { + let config = pbs_config::notifications::config()?; + + let endpoints = proxmox_notify::api::webhook::get_endpoints(&config)?; + + Ok(endpoints) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + returns: { type: WebhookConfig }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_AUDIT, false), + }, +)] +/// Get a webhook endpoint. +pub fn get_endpoint(name: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let config = pbs_config::notifications::config()?; + let endpoint = proxmox_notify::api::webhook::get_endpoint(&config, &name)?; + + rpcenv["digest"] = hex::encode(config.digest()).into(); + + Ok(endpoint) +} + +#[api( + protected: true, + input: { + properties: { + endpoint: { + type: WebhookConfig, + flatten: true, + }, + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Add a new webhook endpoint. +pub fn add_endpoint( + endpoint: WebhookConfig, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + + proxmox_notify::api::webhook::add_endpoint(&mut config, endpoint)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + updater: { + type: WebhookConfigUpdater, + flatten: true, + }, + delete: { + description: "List of properties to delete.", + type: Array, + optional: true, + items: { + type: DeleteableWebhookProperty, + } + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Update webhook endpoint. +pub fn update_endpoint( + name: String, + updater: WebhookConfigUpdater, + delete: Option>, + digest: Option, + _rpcenv: &mut dyn RpcEnvironment, +) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + let digest = digest.map(hex::decode).transpose()?; + + proxmox_notify::api::webhook::update_endpoint( + &mut config, + &name, + updater, + delete.as_deref(), + digest.as_deref(), + )?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + } + }, + }, + access: { + permission: &Permission::Privilege(&["system", "notifications"], PRIV_SYS_MODIFY, false), + }, +)] +/// Delete webhook endpoint. +pub fn delete_endpoint(name: String, _rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + let _lock = pbs_config::notifications::lock_config()?; + let mut config = pbs_config::notifications::config()?; + proxmox_notify::api::webhook::delete_endpoint(&mut config, &name)?; + + pbs_config::notifications::save_config(config)?; + Ok(()) +} + +const ITEM_ROUTER: Router = Router::new() + .get(&API_METHOD_GET_ENDPOINT) + .put(&API_METHOD_UPDATE_ENDPOINT) + .delete(&API_METHOD_DELETE_ENDPOINT); + +pub const ROUTER: Router = Router::new() + .get(&API_METHOD_LIST_ENDPOINTS) + .post(&API_METHOD_ADD_ENDPOINT) + .match_all("name", &ITEM_ROUTER); -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:19 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:19 +0100 Subject: [pbs-devel] [PATCH manager v3 09/14] api: add routes for webhook notification endpoints In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-10-l.wagner@proxmox.com> These just call the API implementation via the perl-rs bindings. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- PVE/API2/Cluster/Notifications.pm | 263 +++++++++++++++++++++++++++++- 1 file changed, 262 insertions(+), 1 deletion(-) diff --git a/PVE/API2/Cluster/Notifications.pm b/PVE/API2/Cluster/Notifications.pm index 8c9be1ed..7a89f4e9 100644 --- a/PVE/API2/Cluster/Notifications.pm +++ b/PVE/API2/Cluster/Notifications.pm @@ -247,6 +247,7 @@ __PACKAGE__->register_method ({ { name => 'gotify' }, { name => 'sendmail' }, { name => 'smtp' }, + { name => 'webhook' }, ]; return $result; @@ -283,7 +284,7 @@ __PACKAGE__->register_method ({ 'type' => { description => 'Type of the target.', type => 'string', - enum => [qw(sendmail gotify smtp)], + enum => [qw(sendmail gotify smtp webhook)], }, 'comment' => { description => 'Comment', @@ -1233,6 +1234,266 @@ __PACKAGE__->register_method ({ } }); +my $webhook_properties = { + name => { + description => 'The name of the endpoint.', + type => 'string', + format => 'pve-configid', + }, + url => { + description => 'Server URL', + type => 'string', + }, + method => { + description => 'HTTP method', + type => 'string', + enum => [qw(post put get)], + }, + header => { + description => 'HTTP headers to set. These have to be formatted as' + . ' a property string in the format name=,value=', + type => 'array', + items => { + type => 'string', + }, + optional => 1, + }, + body => { + description => 'HTTP body, base64 encoded', + type => 'string', + optional => 1, + }, + secret => { + description => 'Secrets to set. These have to be formatted as' + . ' a property string in the format name=,value=', + type => 'array', + items => { + type => 'string', + }, + optional => 1, + }, + comment => { + description => 'Comment', + type => 'string', + optional => 1, + }, + disable => { + description => 'Disable this target', + type => 'boolean', + optional => 1, + default => 0, + }, +}; + +__PACKAGE__->register_method ({ + name => 'get_webhook_endpoints', + path => 'endpoints/webhook', + method => 'GET', + description => 'Returns a list of all webhook endpoints', + protected => 1, + permissions => { + check => ['perm', '/mapping/notifications', ['Mapping.Modify']], + check => ['perm', '/mapping/notifications', ['Mapping.Audit']], + }, + parameters => { + additionalProperties => 0, + properties => {}, + }, + returns => { + type => 'array', + items => { + type => 'object', + properties => { + %$webhook_properties, + 'origin' => { + description => 'Show if this entry was created by a user or was built-in', + type => 'string', + enum => [qw(user-created builtin modified-builtin)], + }, + }, + }, + links => [ { rel => 'child', href => '{name}' } ], + }, + code => sub { + my $config = PVE::Notify::read_config(); + my $rpcenv = PVE::RPCEnvironment::get(); + + my $entities = eval { + $config->get_webhook_endpoints(); + }; + raise_api_error($@) if $@; + + return $entities; + } +}); + +__PACKAGE__->register_method ({ + name => 'get_webhook_endpoint', + path => 'endpoints/webhook/{name}', + method => 'GET', + description => 'Return a specific webhook endpoint', + protected => 1, + permissions => { + check => ['or', + ['perm', '/mapping/notifications', ['Mapping.Modify']], + ['perm', '/mapping/notifications', ['Mapping.Audit']], + ], + }, + parameters => { + additionalProperties => 0, + properties => { + name => { + type => 'string', + format => 'pve-configid', + description => 'Name of the endpoint.' + }, + } + }, + returns => { + type => 'object', + properties => { + %$webhook_properties, + digest => get_standard_option('pve-config-digest'), + } + }, + code => sub { + my ($param) = @_; + my $name = extract_param($param, 'name'); + + my $config = PVE::Notify::read_config(); + my $endpoint = eval { + $config->get_webhook_endpoint($name) + }; + + raise_api_error($@) if $@; + $endpoint->{digest} = $config->digest(); + + return $endpoint; + } +}); + +__PACKAGE__->register_method ({ + name => 'create_webhook_endpoint', + path => 'endpoints/webhook', + protected => 1, + method => 'POST', + description => 'Create a new webhook endpoint', + permissions => { + check => ['perm', '/mapping/notifications', ['Mapping.Modify']], + }, + parameters => { + additionalProperties => 0, + properties => $webhook_properties, + }, + returns => { type => 'null' }, + code => sub { + my ($param) = @_; + eval { + PVE::Notify::lock_config(sub { + my $config = PVE::Notify::read_config(); + + $config->add_webhook_endpoint( + $param, + ); + + PVE::Notify::write_config($config); + }); + }; + + raise_api_error($@) if $@; + return; + } +}); + +__PACKAGE__->register_method ({ + name => 'update_webhook_endpoint', + path => 'endpoints/webhook/{name}', + protected => 1, + method => 'PUT', + description => 'Update existing webhook endpoint', + permissions => { + check => ['perm', '/mapping/notifications', ['Mapping.Modify']], + }, + parameters => { + additionalProperties => 0, + properties => { + %{ make_properties_optional($webhook_properties) }, + delete => { + type => 'array', + items => { + type => 'string', + format => 'pve-configid', + }, + optional => 1, + description => 'A list of settings you want to delete.', + }, + digest => get_standard_option('pve-config-digest'), + } + }, + returns => { type => 'null' }, + code => sub { + my ($param) = @_; + + my $name = extract_param($param, 'name'); + my $delete = extract_param($param, 'delete'); + my $digest = extract_param($param, 'digest'); + + eval { + PVE::Notify::lock_config(sub { + my $config = PVE::Notify::read_config(); + + $config->update_webhook_endpoint( + $name, + $param, # Config updater + $delete, + $digest, + ); + + PVE::Notify::write_config($config); + }); + }; + + raise_api_error($@) if $@; + return; + } +}); + +__PACKAGE__->register_method ({ + name => 'delete_webhook_endpoint', + protected => 1, + path => 'endpoints/webhook/{name}', + method => 'DELETE', + description => 'Remove webhook endpoint', + permissions => { + check => ['perm', '/mapping/notifications', ['Mapping.Modify']], + }, + parameters => { + additionalProperties => 0, + properties => { + name => { + type => 'string', + format => 'pve-configid', + }, + } + }, + returns => { type => 'null' }, + code => sub { + my ($param) = @_; + my $name = extract_param($param, 'name'); + + eval { + PVE::Notify::lock_config(sub { + my $config = PVE::Notify::read_config(); + $config->delete_webhook_endpoint($name); + PVE::Notify::write_config($config); + }); + }; + + raise_api_error($@) if $@; + return; + } +}); + my $matcher_properties = { name => { description => 'Name of the matcher.', -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:13 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:13 +0100 Subject: [pbs-devel] [PATCH proxmox v3 03/14] notify: add api for webhook targets In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-4-l.wagner@proxmox.com> All in all pretty similar to other endpoint APIs. One thing worth noting is how secrets are handled. We never ever return the values of previously stored secrets in get_endpoint(s) calls, but only a list of the names of all secrets. This is needed to build the UI, where we display all secrets that were set before in a table. For update calls, one is supposed to send all secrets that should be kept and updated. If the value should be updated, the name and value is expected, and if the current value should preseved, only the name is sent. If a secret's name is not present in the updater, it will be dropped. If 'secret' is present in the 'delete' array, all secrets will be dropped, apart from those which are also set/preserved in the same update call. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- proxmox-notify/src/api/mod.rs | 20 ++ proxmox-notify/src/api/webhook.rs | 432 ++++++++++++++++++++++++++++++ 2 files changed, 452 insertions(+) create mode 100644 proxmox-notify/src/api/webhook.rs diff --git a/proxmox-notify/src/api/mod.rs b/proxmox-notify/src/api/mod.rs index a7f6261c..7f823bc7 100644 --- a/proxmox-notify/src/api/mod.rs +++ b/proxmox-notify/src/api/mod.rs @@ -15,6 +15,8 @@ pub mod matcher; pub mod sendmail; #[cfg(feature = "smtp")] pub mod smtp; +#[cfg(feature = "webhook")] +pub mod webhook; // We have our own, local versions of http_err and http_bail, because // we don't want to wrap the error in anyhow::Error. If we were to do that, @@ -54,6 +56,9 @@ pub enum EndpointType { /// Gotify endpoint #[cfg(feature = "gotify")] Gotify, + /// Webhook endpoint + #[cfg(feature = "webhook")] + Webhook, } #[api] @@ -113,6 +118,17 @@ pub fn get_targets(config: &Config) -> Result, HttpError> { }) } + #[cfg(feature = "webhook")] + for endpoint in webhook::get_endpoints(config)? { + targets.push(Target { + name: endpoint.name, + origin: endpoint.origin.unwrap_or(Origin::UserCreated), + endpoint_type: EndpointType::Webhook, + disable: endpoint.disable, + comment: endpoint.comment, + }) + } + Ok(targets) } @@ -145,6 +161,10 @@ fn ensure_endpoint_exists(#[allow(unused)] config: &Config, name: &str) -> Resul { exists = exists || smtp::get_endpoint(config, name).is_ok(); } + #[cfg(feature = "webhook")] + { + exists = exists || webhook::get_endpoint(config, name).is_ok(); + } if !exists { http_bail!(NOT_FOUND, "endpoint '{name}' does not exist") diff --git a/proxmox-notify/src/api/webhook.rs b/proxmox-notify/src/api/webhook.rs new file mode 100644 index 00000000..f786c36b --- /dev/null +++ b/proxmox-notify/src/api/webhook.rs @@ -0,0 +1,432 @@ +//! CRUD API for webhook targets. +//! +//! All methods assume that the caller has already done any required permission checks. + +use proxmox_http_error::HttpError; +use proxmox_schema::property_string::PropertyString; + +use crate::api::http_err; +use crate::endpoints::webhook::{ + DeleteableWebhookProperty, KeyAndBase64Val, WebhookConfig, WebhookConfigUpdater, + WebhookPrivateConfig, WEBHOOK_TYPENAME, +}; +use crate::{http_bail, Config}; + +use super::remove_private_config_entry; +use super::set_private_config_entry; + +/// Get a list of all webhook endpoints. +/// +/// The caller is responsible for any needed permission checks. +/// Returns a list of all webhook endpoints or a [`HttpError`] if the config is +/// erroneous (`500 Internal server error`). +pub fn get_endpoints(config: &Config) -> Result, HttpError> { + let mut endpoints: Vec = config + .config + .convert_to_typed_array(WEBHOOK_TYPENAME) + .map_err(|e| http_err!(NOT_FOUND, "Could not fetch endpoints: {e}"))?; + + for endpoint in &mut endpoints { + let priv_config: WebhookPrivateConfig = config + .private_config + .lookup(WEBHOOK_TYPENAME, &endpoint.name) + .unwrap_or_default(); + + let mut secret_names = Vec::new(); + // We only return *which* secrets we have stored, but not their values. + for secret in priv_config.secret { + secret_names.push( + KeyAndBase64Val { + name: secret.name.clone(), + value: None, + } + .into(), + ) + } + + endpoint.secret = secret_names; + } + + Ok(endpoints) +} + +/// Get webhook endpoint with given `name` +/// +/// The caller is responsible for any needed permission checks. +/// Returns the endpoint or a [`HttpError`] if the endpoint was not found (`404 Not found`). +pub fn get_endpoint(config: &Config, name: &str) -> Result { + let mut endpoint: WebhookConfig = config + .config + .lookup(WEBHOOK_TYPENAME, name) + .map_err(|_| http_err!(NOT_FOUND, "endpoint '{name}' not found"))?; + + let priv_config: Option = config + .private_config + .lookup(WEBHOOK_TYPENAME, &endpoint.name) + .ok(); + + let mut secret_names = Vec::new(); + if let Some(priv_config) = priv_config { + for secret in &priv_config.secret { + secret_names.push( + KeyAndBase64Val { + name: secret.name.clone(), + value: None, + } + .into(), + ); + } + } + + endpoint.secret = secret_names; + + Ok(endpoint) +} + +/// Add a new webhook endpoint. +/// +/// The caller is responsible for any needed permission checks. +/// The caller also responsible for locking the configuration files. +/// Returns a [`HttpError`] if: +/// - the target name is already used (`400 Bad request`) +/// - an entity with the same name already exists (`400 Bad request`) +/// - the configuration could not be saved (`500 Internal server error`) +pub fn add_endpoint( + config: &mut Config, + mut endpoint_config: WebhookConfig, +) -> Result<(), HttpError> { + super::ensure_unique(config, &endpoint_config.name)?; + + let secrets = std::mem::take(&mut endpoint_config.secret); + + set_private_config_entry( + config, + &WebhookPrivateConfig { + name: endpoint_config.name.clone(), + secret: secrets, + }, + WEBHOOK_TYPENAME, + &endpoint_config.name, + )?; + + config + .config + .set_data(&endpoint_config.name, WEBHOOK_TYPENAME, &endpoint_config) + .map_err(|e| { + http_err!( + INTERNAL_SERVER_ERROR, + "could not save endpoint '{}': {e}", + endpoint_config.name + ) + }) +} + +/// Update existing webhook endpoint. +/// +/// The caller is responsible for any needed permission checks. +/// The caller also responsible for locking the configuration files. +/// Returns a `HttpError` if: +/// - the passed `digest` does not match (`400 Bad request`) +/// - parameters are ill-formed (empty header value, invalid base64, unknown header/secret) +/// (`400 Bad request`) +/// - an entity with the same name already exists (`400 Bad request`) +/// - the configuration could not be saved (`500 Internal server error`) +pub fn update_endpoint( + config: &mut Config, + name: &str, + config_updater: WebhookConfigUpdater, + delete: Option<&[DeleteableWebhookProperty]>, + digest: Option<&[u8]>, +) -> Result<(), HttpError> { + super::verify_digest(config, digest)?; + + let mut endpoint = get_endpoint(config, name)?; + endpoint.secret.clear(); + + let old_secrets = config + .private_config + .lookup::(WEBHOOK_TYPENAME, name) + .map_err(|err| http_err!(INTERNAL_SERVER_ERROR, "could not read secret config: {err}"))? + .secret; + + if let Some(delete) = delete { + for deleteable_property in delete { + match deleteable_property { + DeleteableWebhookProperty::Comment => endpoint.comment = None, + DeleteableWebhookProperty::Disable => endpoint.disable = None, + DeleteableWebhookProperty::Header => endpoint.header = Vec::new(), + DeleteableWebhookProperty::Body => endpoint.body = None, + DeleteableWebhookProperty::Secret => { + set_private_config_entry( + config, + &WebhookPrivateConfig { + name: name.into(), + secret: Vec::new(), + }, + WEBHOOK_TYPENAME, + name, + )?; + } + } + } + } + + // Destructuring makes sure we don't forget any members + let WebhookConfigUpdater { + url, + body, + header, + method, + disable, + comment, + secret, + } = config_updater; + + if let Some(url) = url { + endpoint.url = url; + } + + if let Some(body) = body { + endpoint.body = Some(body); + } + + if let Some(header) = header { + for h in &header { + if h.value.is_none() { + http_bail!(BAD_REQUEST, "header '{}' has empty value", h.name); + } + if h.decode_value().is_err() { + http_bail!( + BAD_REQUEST, + "header '{}' does not have valid base64 encoded data", + h.name + ) + } + } + endpoint.header = header; + } + + if let Some(method) = method { + endpoint.method = method; + } + + if let Some(disable) = disable { + endpoint.disable = Some(disable); + } + + if let Some(comment) = comment { + endpoint.comment = Some(comment); + } + + if let Some(secret) = secret { + let mut new_secrets: Vec> = Vec::new(); + + for new_secret in &secret { + let sec = if new_secret.value.is_some() { + // Updating or creating a secret + + // Make sure it is valid base64 encoded data + if new_secret.decode_value().is_err() { + http_bail!( + BAD_REQUEST, + "secret '{}' does not have valid base64 encoded data", + new_secret.name + ) + } + new_secret.clone() + } else if let Some(old_secret) = old_secrets.iter().find(|v| v.name == new_secret.name) + { + // Keeping an already existing secret + old_secret.clone() + } else { + http_bail!(BAD_REQUEST, "secret '{}' not known", new_secret.name); + }; + + if new_secrets.iter().any(|s| sec.name == s.name) { + http_bail!(BAD_REQUEST, "secret '{}' defined multiple times", sec.name) + } + + new_secrets.push(sec); + } + + set_private_config_entry( + config, + &WebhookPrivateConfig { + name: name.into(), + secret: new_secrets, + }, + WEBHOOK_TYPENAME, + name, + )?; + } + + config + .config + .set_data(name, WEBHOOK_TYPENAME, &endpoint) + .map_err(|e| { + http_err!( + INTERNAL_SERVER_ERROR, + "could not save endpoint '{name}': {e}" + ) + }) +} + +/// Delete existing webhook endpoint. +/// +/// The caller is responsible for any needed permission checks. +/// The caller also responsible for locking the configuration files. +/// Returns a `HttpError` if: +/// - the entity does not exist (`404 Not found`) +/// - the endpoint is still referenced by another entity (`400 Bad request`) +pub fn delete_endpoint(config: &mut Config, name: &str) -> Result<(), HttpError> { + // Check if the endpoint exists + let _ = get_endpoint(config, name)?; + super::ensure_safe_to_delete(config, name)?; + + remove_private_config_entry(config, name)?; + config.config.sections.remove(name); + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{api::test_helpers::empty_config, endpoints::webhook::HttpMethod}; + + use base64::encode; + + pub fn add_default_webhook_endpoint(config: &mut Config) -> Result<(), HttpError> { + add_endpoint( + config, + WebhookConfig { + name: "webhook-endpoint".into(), + method: HttpMethod::Post, + url: "http://example.com/webhook".into(), + header: vec![KeyAndBase64Val::new_with_plain_value( + "Content-Type", + "application/json", + ) + .into()], + body: Some(encode("this is the body")), + comment: Some("comment".into()), + disable: Some(false), + secret: vec![KeyAndBase64Val::new_with_plain_value("token", "secret").into()], + ..Default::default() + }, + )?; + + assert!(get_endpoint(config, "webhook-endpoint").is_ok()); + Ok(()) + } + + #[test] + fn test_update_not_existing_returns_error() -> Result<(), HttpError> { + let mut config = empty_config(); + + assert!(update_endpoint(&mut config, "test", Default::default(), None, None).is_err()); + + Ok(()) + } + + #[test] + fn test_update_invalid_digest_returns_error() -> Result<(), HttpError> { + let mut config = empty_config(); + add_default_webhook_endpoint(&mut config)?; + + assert!(update_endpoint( + &mut config, + "webhook-endpoint", + Default::default(), + None, + Some(&[0; 32]) + ) + .is_err()); + + Ok(()) + } + + #[test] + fn test_update() -> Result<(), HttpError> { + let mut config = empty_config(); + add_default_webhook_endpoint(&mut config)?; + + let digest = config.digest; + + update_endpoint( + &mut config, + "webhook-endpoint", + WebhookConfigUpdater { + url: Some("http://new.example.com/webhook".into()), + comment: Some("newcomment".into()), + method: Some(HttpMethod::Put), + // Keep the old token and set a new one + secret: Some(vec![ + KeyAndBase64Val::new_with_plain_value("token2", "newsecret").into(), + KeyAndBase64Val { + name: "token".into(), + value: None, + } + .into(), + ]), + ..Default::default() + }, + None, + Some(&digest), + )?; + + let endpoint = get_endpoint(&config, "webhook-endpoint")?; + + assert_eq!(endpoint.url, "http://new.example.com/webhook".to_string()); + assert_eq!(endpoint.comment, Some("newcomment".to_string())); + assert!(matches!(endpoint.method, HttpMethod::Put)); + + let secrets = config + .private_config + .lookup::(WEBHOOK_TYPENAME, "webhook-endpoint") + .unwrap() + .secret; + + assert_eq!(secrets[1].name, "token".to_string()); + assert_eq!(secrets[1].value, Some(encode("secret"))); + assert_eq!(secrets[0].name, "token2".to_string()); + assert_eq!(secrets[0].value, Some(encode("newsecret"))); + + // Test property deletion + update_endpoint( + &mut config, + "webhook-endpoint", + Default::default(), + Some(&[ + DeleteableWebhookProperty::Comment, + DeleteableWebhookProperty::Secret, + ]), + None, + )?; + + let endpoint = get_endpoint(&config, "webhook-endpoint")?; + assert_eq!(endpoint.comment, None); + + let secrets = config + .private_config + .lookup::(WEBHOOK_TYPENAME, "webhook-endpoint") + .unwrap() + .secret; + + assert!(secrets.is_empty()); + + Ok(()) + } + + #[test] + fn test_delete() -> Result<(), HttpError> { + let mut config = empty_config(); + add_default_webhook_endpoint(&mut config)?; + + delete_endpoint(&mut config, "webhook-endpoint")?; + assert!(delete_endpoint(&mut config, "webhook-endpoint").is_err()); + assert_eq!(get_endpoints(&config)?.len(), 0); + + Ok(()) + } +} -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:17 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:17 +0100 Subject: [pbs-devel] [PATCH widget-toolkit v3 07/14] notification: add UI for adding/updating webhook targets In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-8-l.wagner@proxmox.com> The widgets for editing the headers/secrets were adapted from the 'Tag Edit' dialog from PVE's datacenter options. Apart from that, the new dialog is rather standard. I've decided to put the http method and url in a single row, mostly to save space and also to make it analogous to how an actual http request is structured (VERB URL, followed by headers, followed by the body). The secrets are a mechanism to store tokens/passwords in the protected notification config. Secrets are accessible via templating in the URL, headers and body via {{ secrets.NAME }}. Secrets can only be set/updated, but not retrieved/displayed. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- src/Makefile | 1 + src/Schema.js | 5 + src/Utils.js | 20 ++ src/panel/WebhookEditPanel.js | 424 ++++++++++++++++++++++++++++++++++ 4 files changed, 450 insertions(+) create mode 100644 src/panel/WebhookEditPanel.js diff --git a/src/Makefile b/src/Makefile index 0478251..cfaffd7 100644 --- a/src/Makefile +++ b/src/Makefile @@ -78,6 +78,7 @@ JSSRC= \ panel/StatusView.js \ panel/TfaView.js \ panel/NotesView.js \ + panel/WebhookEditPanel.js \ window/Edit.js \ window/PasswordEdit.js \ window/SafeDestroy.js \ diff --git a/src/Schema.js b/src/Schema.js index 42541e0..cd1c306 100644 --- a/src/Schema.js +++ b/src/Schema.js @@ -65,6 +65,11 @@ Ext.define('Proxmox.Schema', { // a singleton ipanel: 'pmxGotifyEditPanel', iconCls: 'fa-bell-o', }, + webhook: { + name: 'Webhook', + ipanel: 'pmxWebhookEditPanel', + iconCls: 'fa-bell-o', + }, }, // to add or change existing for product specific ones diff --git a/src/Utils.js b/src/Utils.js index 4ff95af..52375d2 100644 --- a/src/Utils.js +++ b/src/Utils.js @@ -1524,6 +1524,26 @@ utilities: { me.IP6_dotnotation_match = new RegExp("^(" + IPV6_REGEXP + ")(?:\\.(\\d+))?$"); me.Vlan_match = /^vlan(\d+)/; me.VlanInterface_match = /(\w+)\.(\d+)/; + + + // Taken from proxmox-schema and ported to JS + let PORT_REGEX_STR = "(?:[0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])"; + let IPRE_BRACKET_STR = "(?:" + IPV4_REGEXP + "|\\[(?:" + IPV6_REGEXP + ")\\])"; + let DNS_NAME_STR = "(?:(?:" + DnsName_REGEXP + "\\.)*" + DnsName_REGEXP + ")"; + let HTTP_URL_REGEX = "^https?://(?:(?:(?:" + + DNS_NAME_STR + + "|" + + IPRE_BRACKET_STR + + ")(?::" + + PORT_REGEX_STR + + ")?)|" + + IPV6_REGEXP + + ")(?:/[^\x00-\x1F\x7F]*)?$"; + + me.httpUrlRegex = new RegExp(HTTP_URL_REGEX); + + // Same as SAFE_ID_REGEX in proxmox-schema + me.safeIdRegex = /^(?:[A-Za-z0-9_][A-Za-z0-9._\\-]*)$/; }, }); diff --git a/src/panel/WebhookEditPanel.js b/src/panel/WebhookEditPanel.js new file mode 100644 index 0000000..0a39f3c --- /dev/null +++ b/src/panel/WebhookEditPanel.js @@ -0,0 +1,424 @@ +Ext.define('Proxmox.panel.WebhookEditPanel', { + extend: 'Proxmox.panel.InputPanel', + xtype: 'pmxWebhookEditPanel', + mixins: ['Proxmox.Mixin.CBind'], + onlineHelp: 'notification_targets_webhook', + + type: 'webhook', + + columnT: [ + + ], + + column1: [ + { + xtype: 'pmxDisplayEditField', + name: 'name', + cbind: { + value: '{name}', + editable: '{isCreate}', + }, + fieldLabel: gettext('Endpoint Name'), + regex: Proxmox.Utils.safeIdRegex, + allowBlank: false, + }, + ], + + column2: [ + { + xtype: 'proxmoxcheckbox', + name: 'enable', + fieldLabel: gettext('Enable'), + allowBlank: false, + checked: true, + }, + ], + + columnB: [ + { + layout: 'hbox', + border: false, + margin: '0 0 5 0', + items: [ + { + xtype: 'displayfield', + value: gettext('Method/URL:'), + width: 125, + }, + { + xtype: 'proxmoxKVComboBox', + name: 'method', + editable: false, + value: 'post', + comboItems: [ + ['post', 'POST'], + ['put', 'PUT'], + ['get', 'GET'], + ], + width: 80, + margin: '0 5 0 0', + }, + { + xtype: 'proxmoxtextfield', + name: 'url', + allowBlank: false, + emptyText: "https://example.com/hook", + regex: Proxmox.Utils.httpUrlRegex, + regexText: gettext('Must be a valid URL'), + flex: 4, + }, + ], + }, + { + xtype: 'pmxWebhookKeyValueList', + name: 'header', + fieldLabel: gettext('Headers'), + maskValues: false, + cbind: { + isCreate: '{isCreate}', + }, + }, + { + xtype: 'textarea', + fieldLabel: gettext('Body'), + name: 'body', + allowBlank: true, + minHeight: '150', + fieldStyle: { + 'font-family': 'monospace', + }, + margin: '15 0 0 0', + }, + { + xtype: 'pmxWebhookKeyValueList', + name: 'secret', + fieldLabel: gettext('Secrets'), + maskValues: true, + cbind: { + isCreate: '{isCreate}', + }, + }, + { + xtype: 'proxmoxtextfield', + name: 'comment', + fieldLabel: gettext('Comment'), + cbind: { + deleteEmpty: '{!isCreate}', + }, + }, + ], + + onSetValues: (values) => { + values.enable = !values.disable; + + if (values.body) { + values.body = Proxmox.Utils.base64ToUtf8(values.body); + } + + delete values.disable; + return values; + }, + + onGetValues: function(values) { + let me = this; + + if (values.enable) { + if (!me.isCreate) { + Proxmox.Utils.assemble_field_data(values, { 'delete': 'disable' }); + } + } else { + values.disable = 1; + } + + if (values.body) { + values.body = Proxmox.Utils.utf8ToBase64(values.body); + } else { + delete values.body; + if (!me.isCreate) { + Proxmox.Utils.assemble_field_data(values, { 'delete': 'body' }); + } + } + + if (Ext.isArray(values.header) && !values.header.length) { + delete values.header; + if (!me.isCreate) { + Proxmox.Utils.assemble_field_data(values, { 'delete': 'header' }); + } + } + + if (Ext.isArray(values.secret) && !values.secret.length) { + delete values.secret; + if (!me.isCreate) { + Proxmox.Utils.assemble_field_data(values, { 'delete': 'secret' }); + } + } + delete values.enable; + + return values; + }, +}); + +Ext.define('Proxmox.form.WebhookKeyValueList', { + extend: 'Ext.container.Container', + alias: 'widget.pmxWebhookKeyValueList', + + mixins: [ + 'Ext.form.field.Field', + ], + + // override for column header + fieldTitle: gettext('Item'), + + // will be applied to the textfields + maskRe: undefined, + + allowBlank: true, + selectAll: false, + isFormField: true, + deleteEmpty: false, + config: { + deleteEmpty: false, + maskValues: false, + }, + + setValue: function(list) { + let me = this; + + list = Ext.isArray(list) ? list : (list ?? '').split(';').filter(t => t !== ''); + + let store = me.lookup('grid').getStore(); + if (list.length > 0) { + store.setData(list.map(item => { + let properties = Proxmox.Utils.parsePropertyString(item); + + // decode base64 + let value = me.maskValues ? '' : Proxmox.Utils.base64ToUtf8(properties.value); + + let obj = { + headerName: properties.name, + headerValue: value, + }; + + if (!me.isCreate && me.maskValues) { + obj.emptyText = gettext('Unchanged'); + } + + return obj; + })); + } else { + store.removeAll(); + } + me.checkChange(); + return me; + }, + + getValue: function() { + let me = this; + let values = []; + me.lookup('grid').getStore().each((rec) => { + if (rec.data.headerName) { + let obj = { + name: rec.data.headerName, + value: Proxmox.Utils.utf8ToBase64(rec.data.headerValue), + }; + + values.push(Proxmox.Utils.printPropertyString(obj)); + } + }); + + return values; + }, + + getErrors: function(value) { + let me = this; + let empty = false; + + me.lookup('grid').getStore().each((rec) => { + if (!rec.data.headerName) { + empty = true; + } + + if (!rec.data.headerValue && rec.data.newValue) { + empty = true; + } + + if (!rec.data.headerValue && !me.maskValues) { + empty = true; + } + }); + if (empty) { + return [gettext('Name/value must not be empty.')]; + } + return []; + }, + + // override framework function to implement deleteEmpty behaviour + getSubmitData: function() { + let me = this, + data = null, + val; + if (!me.disabled && me.submitValue) { + val = me.getValue(); + if (val !== null && val !== '') { + data = {}; + data[me.getName()] = val; + } else if (me.getDeleteEmpty()) { + data = {}; + data.delete = me.getName(); + } + } + return data; + }, + + controller: { + xclass: 'Ext.app.ViewController', + + addLine: function() { + let me = this; + me.lookup('grid').getStore().add({ + headerName: '', + headerValue: '', + emptyText: '', + newValue: true, + }); + }, + + removeSelection: function(field) { + let me = this; + let view = me.getView(); + let grid = me.lookup('grid'); + + let record = field.getWidgetRecord(); + if (record === undefined) { + // this is sometimes called before a record/column is initialized + return; + } + + grid.getStore().remove(record); + view.checkChange(); + view.validate(); + }, + + itemChange: function(field, newValue) { + let rec = field.getWidgetRecord(); + if (!rec) { + return; + } + + let column = field.getWidgetColumn(); + rec.set(column.dataIndex, newValue); + let list = field.up('pmxWebhookKeyValueList'); + list.checkChange(); + list.validate(); + }, + + control: { + 'grid button': { + click: 'removeSelection', + }, + }, + }, + + margin: '10 0 5 0', + + items: [ + { + layout: 'hbox', + border: false, + items: [ + { + xtype: 'displayfield', + width: 125, + }, + { + xtype: 'button', + text: gettext('Add'), + iconCls: 'fa fa-plus-circle', + handler: 'addLine', + margin: '0 5 5 0', + }, + ], + }, + { + xtype: 'grid', + reference: 'grid', + minHeight: 100, + maxHeight: 100, + scrollable: 'vertical', + margin: '0 0 0 125', + + viewConfig: { + deferEmptyText: false, + }, + + store: { + listeners: { + update: function() { + this.commitChanges(); + }, + }, + }, + }, + ], + + initComponent: function() { + let me = this; + + for (const [key, value] of Object.entries(me.gridConfig ?? {})) { + me.items[1][key] = value; + } + + me.items[0].items[0].value = me.fieldLabel + ':'; + + me.items[1].columns = [ + { + header: me.fieldTtitle, + dataIndex: 'headerName', + xtype: 'widgetcolumn', + widget: { + xtype: 'textfield', + isFormField: false, + maskRe: me.maskRe, + allowBlank: false, + queryMode: 'local', + listeners: { + change: 'itemChange', + }, + }, + flex: 1, + }, + { + header: me.fieldTtitle, + dataIndex: 'headerValue', + xtype: 'widgetcolumn', + widget: { + xtype: 'proxmoxtextfield', + inputType: me.maskValues ? 'password' : 'text', + isFormField: false, + maskRe: me.maskRe, + queryMode: 'local', + listeners: { + change: 'itemChange', + }, + allowBlank: !me.isCreate && me.maskValues, + + bind: { + emptyText: '{record.emptyText}', + }, + }, + flex: 1, + }, + { + xtype: 'widgetcolumn', + width: 40, + widget: { + xtype: 'button', + iconCls: 'fa fa-trash-o', + }, + }, + ]; + + me.callParent(); + me.initField(); + }, +}); -- 2.39.5 From l.wagner at proxmox.com Fri Nov 8 15:41:12 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Fri, 8 Nov 2024 15:41:12 +0100 Subject: [pbs-devel] [PATCH proxmox v3 02/14] notify: implement webhook targets In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: <20241108144124.273550-3-l.wagner@proxmox.com> This target type allows users to perform HTTP requests to arbitrary third party (notification) services, for instance ntfy.sh/Discord/Slack. The configuration for these endpoints allows one to freely configure the URL, HTTP Method, headers and body. The URL, header values and body support handlebars templating to inject notification text, metadata and secrets. Secrets are stored in the protected configuration file (e.g. /etc/pve/priv/notification.cfg) as key value pairs, allowing users to protect sensitive tokens/passwords. Secrets are accessible in handlebar templating via the secrets.* namespace, e.g. if there is a secret named 'token', a body could contain '{{ secrets.token }}' to inject the token into the payload. A couple of handlebars helpers are also provided: - url-encoding (useful for templating in URLs) - escape (escape any control characters in strings) - json (print a property as json) In the configuration, the body, header values and secret values are stored in base64 encoding so that we can store any string we want. Signed-off-by: Lukas Wagner Tested-By: Stefan Hanreich --- proxmox-notify/Cargo.toml | 9 +- proxmox-notify/src/config.rs | 23 + proxmox-notify/src/endpoints/mod.rs | 2 + proxmox-notify/src/endpoints/webhook.rs | 550 ++++++++++++++++++++++++ proxmox-notify/src/lib.rs | 17 + 5 files changed, 598 insertions(+), 3 deletions(-) create mode 100644 proxmox-notify/src/endpoints/webhook.rs diff --git a/proxmox-notify/Cargo.toml b/proxmox-notify/Cargo.toml index d57a36cd..5a631bfc 100644 --- a/proxmox-notify/Cargo.toml +++ b/proxmox-notify/Cargo.toml @@ -13,13 +13,15 @@ rust-version.workspace = true [dependencies] anyhow.workspace = true -base64.workspace = true +base64 = { workspace = true, optional = true } const_format.workspace = true handlebars = { workspace = true } +http = { workspace = true, optional = true } lettre = { workspace = true, optional = true } log.workspace = true mail-parser = { workspace = true, optional = true } openssl.workspace = true +percent-encoding = { workspace = true, optional = true } regex.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true @@ -35,10 +37,11 @@ proxmox-time.workspace = true proxmox-uuid = { workspace = true, features = ["serde"] } [features] -default = ["sendmail", "gotify", "smtp"] +default = ["sendmail", "gotify", "smtp", "webhook"] mail-forwarder = ["dep:mail-parser", "dep:proxmox-sys"] -sendmail = ["dep:proxmox-sys"] +sendmail = ["dep:proxmox-sys", "dep:base64"] gotify = ["dep:proxmox-http"] pve-context = ["dep:proxmox-sys"] pbs-context = ["dep:proxmox-sys"] smtp = ["dep:lettre"] +webhook = ["dep:base64", "dep:http", "dep:percent-encoding", "dep:proxmox-http"] diff --git a/proxmox-notify/src/config.rs b/proxmox-notify/src/config.rs index 789c4a7d..4d0b53f7 100644 --- a/proxmox-notify/src/config.rs +++ b/proxmox-notify/src/config.rs @@ -57,6 +57,17 @@ fn config_init() -> SectionConfig { GOTIFY_SCHEMA, )); } + #[cfg(feature = "webhook")] + { + use crate::endpoints::webhook::{WebhookConfig, WEBHOOK_TYPENAME}; + + const WEBHOOK_SCHEMA: &ObjectSchema = WebhookConfig::API_SCHEMA.unwrap_object_schema(); + config.register_plugin(SectionConfigPlugin::new( + WEBHOOK_TYPENAME.to_string(), + Some(String::from("name")), + WEBHOOK_SCHEMA, + )); + } const MATCHER_SCHEMA: &ObjectSchema = MatcherConfig::API_SCHEMA.unwrap_object_schema(); config.register_plugin(SectionConfigPlugin::new( @@ -110,6 +121,18 @@ fn private_config_init() -> SectionConfig { )); } + #[cfg(feature = "webhook")] + { + use crate::endpoints::webhook::{WebhookPrivateConfig, WEBHOOK_TYPENAME}; + + const WEBHOOK_SCHEMA: &ObjectSchema = + WebhookPrivateConfig::API_SCHEMA.unwrap_object_schema(); + config.register_plugin(SectionConfigPlugin::new( + WEBHOOK_TYPENAME.to_string(), + Some(String::from("name")), + WEBHOOK_SCHEMA, + )); + } config } diff --git a/proxmox-notify/src/endpoints/mod.rs b/proxmox-notify/src/endpoints/mod.rs index 97f79fcc..f20bee21 100644 --- a/proxmox-notify/src/endpoints/mod.rs +++ b/proxmox-notify/src/endpoints/mod.rs @@ -4,5 +4,7 @@ pub mod gotify; pub mod sendmail; #[cfg(feature = "smtp")] pub mod smtp; +#[cfg(feature = "webhook")] +pub mod webhook; mod common; diff --git a/proxmox-notify/src/endpoints/webhook.rs b/proxmox-notify/src/endpoints/webhook.rs new file mode 100644 index 00000000..4ad9cb2f --- /dev/null +++ b/proxmox-notify/src/endpoints/webhook.rs @@ -0,0 +1,550 @@ +//! This endpoint implements a generic webhook target, allowing users to send notifications through +//! a highly customizable HTTP request. +//! +//! The configuration options include specifying the HTTP method, URL, headers, and body. +//! URLs, headers, and the body support template expansion using the [`handlebars`] templating engine. +//! For secure handling of passwords or tokens, these values can be stored as secrets. +//! Secrets are kept in a private configuration file, accessible only by root, and are not retrievable via the API. +//! Within templates, secrets can be referenced using `{{ secrets. }}`. +//! Additionally, we take measures to prevent secrets from appearing in logs or error messages. +use handlebars::{ + Context as HandlebarsContext, Handlebars, Helper, HelperResult, Output, RenderContext, + RenderError as HandlebarsRenderError, +}; +use http::Request; +use percent_encoding::AsciiSet; +use proxmox_schema::property_string::PropertyString; +use serde::{Deserialize, Serialize}; +use serde_json::{json, Map, Value}; + +use proxmox_http::client::sync::Client; +use proxmox_http::{HttpClient, HttpOptions, ProxyConfig}; +use proxmox_schema::api_types::{COMMENT_SCHEMA, HTTP_URL_SCHEMA}; +use proxmox_schema::{api, ApiStringFormat, ApiType, Schema, StringSchema, Updater}; + +use crate::context::context; +use crate::renderer::TemplateType; +use crate::schema::ENTITY_NAME_SCHEMA; +use crate::{renderer, Content, Endpoint, Error, Notification, Origin}; + +/// This will be used as a section type in the public/private configuration file. +pub(crate) const WEBHOOK_TYPENAME: &str = "webhook"; + +#[api] +#[derive(Serialize, Deserialize, Clone, Copy, Default)] +#[serde(rename_all = "kebab-case")] +/// HTTP Method to use. +pub enum HttpMethod { + /// HTTP POST + #[default] + Post, + /// HTTP PUT + Put, + /// HTTP GET + Get, +} + +// We only ever need a &str, so we rather implement this +// instead of Display. +impl From for &str { + fn from(value: HttpMethod) -> Self { + match value { + HttpMethod::Post => "POST", + HttpMethod::Put => "PUT", + HttpMethod::Get => "GET", + } + } +} + +#[api( + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + url: { + schema: HTTP_URL_SCHEMA, + }, + comment: { + optional: true, + schema: COMMENT_SCHEMA, + }, + header: { + type: Array, + items: { + schema: KEY_AND_BASE64_VALUE_SCHEMA, + }, + optional: true, + }, + secret: { + type: Array, + items: { + schema: KEY_AND_BASE64_VALUE_SCHEMA, + }, + optional: true, + }, + } +)] +#[derive(Serialize, Deserialize, Updater, Default, Clone)] +#[serde(rename_all = "kebab-case")] +/// Config for Webhook notification endpoints +pub struct WebhookConfig { + /// Name of the endpoint. + #[updater(skip)] + pub name: String, + + pub method: HttpMethod, + + /// Webhook URL. Supports templating. + pub url: String, + /// Array of HTTP headers. Each entry is a property string with a name and a value. + /// The value property contains the header in base64 encoding. Supports templating. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[updater(serde(skip_serializing_if = "Option::is_none"))] + pub header: Vec>, + /// The HTTP body to send. Supports templating. + #[serde(skip_serializing_if = "Option::is_none")] + pub body: Option, + + /// Comment. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, + /// Disable this target. + #[serde(skip_serializing_if = "Option::is_none")] + pub disable: Option, + /// Origin of this config entry. + #[serde(skip_serializing_if = "Option::is_none")] + #[updater(skip)] + pub origin: Option, + /// Array of secrets. Each entry is a property string with a name and an optional value. + /// The value property contains the secret in base64 encoding. + /// For any API endpoints returning the endpoint config, + /// only the secret name but not the value will be returned. + /// When updating the config, also send all secrets that you want + /// to keep, setting only the name but not the value. Can be accessed from templates. + #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[updater(serde(skip_serializing_if = "Option::is_none"))] + pub secret: Vec>, +} + +#[api( + properties: { + name: { + schema: ENTITY_NAME_SCHEMA, + }, + secret: { + type: Array, + items: { + schema: KEY_AND_BASE64_VALUE_SCHEMA, + }, + optional: true, + }, + } +)] +#[derive(Serialize, Deserialize, Clone, Updater, Default)] +#[serde(rename_all = "kebab-case")] +/// Private configuration for Webhook notification endpoints. +/// This config will be saved to a separate configuration file with stricter +/// permissions (root:root 0600). +pub struct WebhookPrivateConfig { + /// Name of the endpoint + #[updater(skip)] + pub name: String, + + #[serde(default, skip_serializing_if = "Vec::is_empty")] + #[updater(serde(skip_serializing_if = "Option::is_none"))] + /// Array of secrets. Each entry is a property string with a name, + /// and a value property. The value property contains the secret + /// in base64 encoding. Can be accessed from templates. + pub secret: Vec>, +} + +/// A Webhook notification endpoint. +pub struct WebhookEndpoint { + pub config: WebhookConfig, + pub private_config: WebhookPrivateConfig, +} + +#[api] +#[derive(Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Webhook configuration properties that can be deleted. +pub enum DeleteableWebhookProperty { + /// Delete `comment`. + Comment, + /// Delete `disable`. + Disable, + /// Delete `header`. + Header, + /// Delete `body`. + Body, + /// Delete `secret`. + Secret, +} + +#[api] +#[derive(Serialize, Deserialize, Debug, Default, Clone)] +/// Datatype used to represent key-value pairs, the value +/// being encoded in base64. +pub struct KeyAndBase64Val { + /// Name + pub name: String, + /// Base64 encoded value + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, +} + +impl KeyAndBase64Val { + #[cfg(test)] + pub fn new_with_plain_value(name: &str, value: &str) -> Self { + let value = base64::encode(value); + + Self { + name: name.into(), + value: Some(value), + } + } + + /// Decode the contained value, returning the plaintext value + /// + /// Returns an error if the contained value is not valid base64-encoded + /// text. + pub fn decode_value(&self) -> Result { + let value = self.value.as_deref().unwrap_or_default(); + let bytes = base64::decode(value).map_err(|_| { + Error::Generic(format!( + "could not decode base64 value with key '{}'", + self.name + )) + })?; + let value = String::from_utf8(bytes).map_err(|_| { + Error::Generic(format!( + "could not decode UTF8 string from base64, key '{}'", + self.name + )) + })?; + + Ok(value) + } +} + +pub const KEY_AND_BASE64_VALUE_SCHEMA: Schema = + StringSchema::new("String schema for pairs of keys and base64 encoded values") + .format(&ApiStringFormat::PropertyString( + &KeyAndBase64Val::API_SCHEMA, + )) + .schema(); + +impl Endpoint for WebhookEndpoint { + /// Send a notification to a webhook endpoint. + fn send(&self, notification: &Notification) -> Result<(), Error> { + let request = self.build_request(notification)?; + + self.create_client()? + .request(request) + .map_err(|err| self.mask_secret_in_error(err))?; + + Ok(()) + } + + /// Return the name of the endpoint. + fn name(&self) -> &str { + &self.config.name + } + + /// Check if the endpoint is disabled + fn disabled(&self) -> bool { + self.config.disable.unwrap_or_default() + } +} + +impl WebhookEndpoint { + fn create_client(&self) -> Result { + let proxy_config = context() + .http_proxy_config() + .map(|url| ProxyConfig::parse_proxy_url(&url)) + .transpose() + .map_err(|err| Error::NotifyFailed(self.name().to_string(), err.into()))?; + + let options = HttpOptions { + proxy_config, + ..Default::default() + }; + + Ok(Client::new(options)) + } + + fn build_request(&self, notification: &Notification) -> Result, Error> { + let (title, message) = match ¬ification.content { + Content::Template { + template_name, + data, + } => { + let rendered_title = + renderer::render_template(TemplateType::Subject, template_name, data)?; + let rendered_message = + renderer::render_template(TemplateType::PlaintextBody, template_name, data)?; + + (rendered_title, rendered_message) + } + #[cfg(feature = "mail-forwarder")] + Content::ForwardedMail { title, body, .. } => (title.clone(), body.clone()), + }; + + let mut fields = Map::new(); + + for (field_name, field_value) in ¬ification.metadata.additional_fields { + fields.insert(field_name.clone(), Value::String(field_value.to_string())); + } + + let mut secrets = Map::new(); + + for secret in &self.private_config.secret { + let value = secret.decode_value()?; + secrets.insert(secret.name.clone(), Value::String(value)); + } + + let data = json!({ + "title": &title, + "message": &message, + "severity": notification.metadata.severity, + "timestamp": notification.metadata.timestamp, + "fields": fields, + "secrets": secrets, + }); + + let handlebars = setup_handlebars(); + let body_template = self.base64_decode(self.config.body.as_deref().unwrap_or_default())?; + + let body = handlebars + .render_template(&body_template, &data) + .map_err(|err| self.mask_secret_in_error(err)) + .map_err(|err| Error::Generic(format!("failed to render webhook body: {err}")))?; + + let url = handlebars + .render_template(&self.config.url, &data) + .map_err(|err| self.mask_secret_in_error(err)) + .map_err(|err| Error::Generic(format!("failed to render webhook url: {err}")))?; + + let method: &str = self.config.method.into(); + let mut builder = http::Request::builder().uri(url).method(method); + + for header in &self.config.header { + let value = header.decode_value()?; + + let value = handlebars + .render_template(&value, &data) + .map_err(|err| self.mask_secret_in_error(err)) + .map_err(|err| { + Error::Generic(format!( + "failed to render header value template: {value}: {err}" + )) + })?; + + builder = builder.header(header.name.clone(), value); + } + + let request = builder + .body(body) + .map_err(|err| self.mask_secret_in_error(err)) + .map_err(|err| Error::Generic(format!("failed to build http request: {err}")))?; + + Ok(request) + } + + fn base64_decode(&self, s: &str) -> Result { + // Also here, TODO: revisit Error variants for the *whole* crate. + let s = base64::decode(s) + .map_err(|err| Error::Generic(format!("could not decode base64 value: {err}")))?; + + String::from_utf8(s).map_err(|err| { + Error::Generic(format!( + "base64 encoded value did not contain valid utf8: {err}" + )) + }) + } + + /// Mask secrets in errors to avoid them showing up in error messages and log files + /// + /// Use this for any error from third-party code where you are not 100% + /// sure whether it could leak the content of secrets in the error. + /// For instance, the http client will contain the URL, including + /// any URL parameters that could contain tokens. + /// + /// This function will only mask exact matches, but this should suffice + /// for the majority of cases. + fn mask_secret_in_error(&self, error: impl std::fmt::Display) -> Error { + let mut s = error.to_string(); + + for secret_value in &self.private_config.secret { + match secret_value.decode_value() { + Ok(value) => s = s.replace(&value, ""), + Err(e) => return e, + } + } + + Error::Generic(s) + } +} + +fn setup_handlebars() -> Handlebars<'static> { + let mut handlebars = Handlebars::new(); + + handlebars.register_helper("url-encode", Box::new(handlebars_percent_encode)); + handlebars.register_helper("json", Box::new(handlebars_json)); + handlebars.register_helper("escape", Box::new(handlebars_escape)); + + // There is no escape. + handlebars.register_escape_fn(handlebars::no_escape); + + handlebars +} + +fn handlebars_percent_encode( + h: &Helper, + _: &Handlebars, + _: &HandlebarsContext, + _rc: &mut RenderContext, + out: &mut dyn Output, +) -> HelperResult { + let param0 = h + .param(0) + .and_then(|v| v.value().as_str()) + .ok_or_else(|| HandlebarsRenderError::new("url-encode: missing parameter"))?; + + // See https://developer.mozilla.org/en-US/docs/Glossary/Percent-encoding + const FRAGMENT: &AsciiSet = &percent_encoding::CONTROLS + .add(b':') + .add(b'/') + .add(b'?') + .add(b'#') + .add(b'[') + .add(b']') + .add(b'@') + .add(b'!') + .add(b'$') + .add(b'&') + .add(b'\'') + .add(b'(') + .add(b')') + .add(b'*') + .add(b'+') + .add(b',') + .add(b';') + .add(b'=') + .add(b'%') + .add(b' '); + let a = percent_encoding::utf8_percent_encode(param0, FRAGMENT); + + out.write(&a.to_string())?; + + Ok(()) +} + +fn handlebars_json( + h: &Helper, + _: &Handlebars, + _: &HandlebarsContext, + _rc: &mut RenderContext, + out: &mut dyn Output, +) -> HelperResult { + let param0 = h + .param(0) + .map(|v| v.value()) + .ok_or_else(|| HandlebarsRenderError::new("json: missing parameter"))?; + + let json = serde_json::to_string(param0)?; + out.write(&json)?; + + Ok(()) +} + +fn handlebars_escape( + h: &Helper, + _: &Handlebars, + _: &HandlebarsContext, + _rc: &mut RenderContext, + out: &mut dyn Output, +) -> HelperResult { + let text = h + .param(0) + .and_then(|v| v.value().as_str()) + .ok_or_else(|| HandlebarsRenderError::new("escape: missing text parameter"))?; + + let val = Value::String(text.to_string()); + let json = serde_json::to_string(&val)?; + out.write(&json[1..json.len() - 1])?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + + use super::*; + use crate::Severity; + + #[test] + fn test_build_request() -> Result<(), Error> { + let data = HashMap::from_iter([ + ("hello".into(), "hello world".into()), + ("test".into(), "escaped\nstring".into()), + ]); + + let body_template = r#" +{{ fields.test }} +{{ escape fields.test }} + +{{ json fields }} +{{ json fields.hello }} + +{{ url-encode fields.hello }} + +{{ json severity }} + +"#; + + let expected_body = r#" +escaped +string +escaped\nstring + +{"hello":"hello world","test":"escaped\nstring"} +"hello world" + +hello%20world + +"info" + +"#; + + let endpoint = WebhookEndpoint { + config: WebhookConfig { + name: "test".into(), + method: HttpMethod::Post, + url: "http://localhost/{{ url-encode fields.hello }}".into(), + header: vec![ + KeyAndBase64Val::new_with_plain_value("X-Severity", "{{ severity }}").into(), + ], + body: Some(base64::encode(body_template)), + ..Default::default() + }, + private_config: WebhookPrivateConfig { + name: "test".into(), + ..Default::default() + }, + }; + + let notification = Notification::from_template(Severity::Info, "foo", json!({}), data); + + let request = endpoint.build_request(¬ification)?; + + assert_eq!(request.uri(), "http://localhost/hello%20world"); + assert_eq!(request.body(), expected_body); + assert_eq!(request.method(), "POST"); + + assert_eq!(request.headers().get("X-Severity").unwrap(), "info"); + + Ok(()) + } +} diff --git a/proxmox-notify/src/lib.rs b/proxmox-notify/src/lib.rs index 015d9b9c..12f3866b 100644 --- a/proxmox-notify/src/lib.rs +++ b/proxmox-notify/src/lib.rs @@ -500,6 +500,23 @@ impl Bus { ); } + #[cfg(feature = "webhook")] + { + use endpoints::webhook::WEBHOOK_TYPENAME; + use endpoints::webhook::{WebhookConfig, WebhookEndpoint, WebhookPrivateConfig}; + endpoints.extend( + parse_endpoints_with_private_config!( + config, + WebhookConfig, + WebhookPrivateConfig, + WebhookEndpoint, + WEBHOOK_TYPENAME + )? + .into_iter() + .map(|e| (e.name().into(), e)), + ); + } + let matchers = config .config .convert_to_typed_array(MATCHER_TYPENAME) -- 2.39.5 From t.lamprecht at proxmox.com Sun Nov 10 18:27:04 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Sun, 10 Nov 2024 18:27:04 +0100 Subject: [pbs-devel] applied: [pve-devel] [PATCH widget-toolkit v3 06/14] utils: add base64 conversion helper In-Reply-To: <20241108144124.273550-7-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> <20241108144124.273550-7-l.wagner@proxmox.com> Message-ID: Am 08.11.24 um 15:41 schrieb Lukas Wagner: > From: Gabriel Goller > > Add helper functions to convert from a utf8 string to a base64 string > and vice-versa. Using the TextEncoder/TextDecoder we can support unicode > such as emojis as well [0]. > > [0]: https://developer.mozilla.org/en-US/docs/Glossary/Base64#the_unicode_problem > > Signed-off-by: Gabriel Goller > Reviewed-by: Thomas Lamprecht > --- > src/Utils.js | 18 ++++++++++++++++++ > 1 file changed, 18 insertions(+) > > applied, thanks! From t.lamprecht at proxmox.com Sun Nov 10 18:27:09 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Sun, 10 Nov 2024 18:27:09 +0100 Subject: [pbs-devel] applied: [pve-devel] [PATCH widget-toolkit v3 07/14] notification: add UI for adding/updating webhook targets In-Reply-To: <20241108144124.273550-8-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> <20241108144124.273550-8-l.wagner@proxmox.com> Message-ID: <88eff893-8337-4d25-9cb5-a291d2149b8d@proxmox.com> Am 08.11.24 um 15:41 schrieb Lukas Wagner: > The widgets for editing the headers/secrets were adapted from > the 'Tag Edit' dialog from PVE's datacenter options. > > Apart from that, the new dialog is rather standard. I've decided > to put the http method and url in a single row, mostly to > save space and also to make it analogous to how an actual http request > is structured (VERB URL, followed by headers, followed by the body). > > The secrets are a mechanism to store tokens/passwords in the > protected notification config. Secrets are accessible via > templating in the URL, headers and body via {{ secrets.NAME }}. > Secrets can only be set/updated, but not retrieved/displayed. > > Signed-off-by: Lukas Wagner > Tested-By: Stefan Hanreich > --- > src/Makefile | 1 + > src/Schema.js | 5 + > src/Utils.js | 20 ++ > src/panel/WebhookEditPanel.js | 424 ++++++++++++++++++++++++++++++++++ > 4 files changed, 450 insertions(+) > create mode 100644 src/panel/WebhookEditPanel.js > > applied, thanks! From g.goller at proxmox.com Mon Nov 11 10:30:17 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Mon, 11 Nov 2024 10:30:17 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix #5861: remove min username length in ChangeOwner modal Message-ID: <20241111093017.50060-1-g.goller@proxmox.com> We allow usernames shorter than 4 characters since this patch [0] in pbs. [0]: https://lore.proxmox.com/pbs-devel/20240117142918.264978-1-g.goller at proxmox.com/ Signed-off-by: Gabriel Goller --- www/window/BackupGroupChangeOwner.js | 1 - 1 file changed, 1 deletion(-) diff --git a/www/window/BackupGroupChangeOwner.js b/www/window/BackupGroupChangeOwner.js index 025a133aba78..33b84f3019ff 100644 --- a/www/window/BackupGroupChangeOwner.js +++ b/www/window/BackupGroupChangeOwner.js @@ -41,7 +41,6 @@ Ext.define('PBS.BackupGroupChangeOwner', { name: 'new-owner', value: me.owner, fieldLabel: gettext('New Owner'), - minLength: 8, allowBlank: false, }, ], -- 2.39.5 From f.gruenbichler at proxmox.com Mon Nov 11 11:08:35 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 11 Nov 2024 11:08:35 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup] fix #5861: remove min username length in ChangeOwner modal In-Reply-To: <20241111093017.50060-1-g.goller@proxmox.com> References: <20241111093017.50060-1-g.goller@proxmox.com> Message-ID: <1731319706.nodhzfasr1.astroid@yuna.none> thanks! On November 11, 2024 10:30 am, Gabriel Goller wrote: > We allow usernames shorter than 4 characters since this patch [0] in > pbs. > > [0]: https://lore.proxmox.com/pbs-devel/20240117142918.264978-1-g.goller at proxmox.com/ > > Signed-off-by: Gabriel Goller > --- > www/window/BackupGroupChangeOwner.js | 1 - > 1 file changed, 1 deletion(-) > > diff --git a/www/window/BackupGroupChangeOwner.js b/www/window/BackupGroupChangeOwner.js > index 025a133aba78..33b84f3019ff 100644 > --- a/www/window/BackupGroupChangeOwner.js > +++ b/www/window/BackupGroupChangeOwner.js > @@ -41,7 +41,6 @@ Ext.define('PBS.BackupGroupChangeOwner', { > name: 'new-owner', > value: me.owner, > fieldLabel: gettext('New Owner'), > - minLength: 8, > allowBlank: false, > }, > ], > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From l.wagner at proxmox.com Mon Nov 11 13:56:18 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Mon, 11 Nov 2024 13:56:18 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/2] d/control: bump proxmox-widget-toolkit dependency Message-ID: <20241111125619.193930-1-l.wagner@proxmox.com> We need "notification: matcher: match-field: show known fields/values", which was released in proxmox-widget-toolkit 4.2.4 Signed-off-by: Lukas Wagner --- debian/control | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/control b/debian/control index 81f28d1c..a300a2c0 100644 --- a/debian/control +++ b/debian/control @@ -183,7 +183,7 @@ Depends: fonts-font-awesome, postfix | mail-transport-agent, proxmox-backup-docs, proxmox-mini-journalreader, - proxmox-widget-toolkit (>= 4.1.4), + proxmox-widget-toolkit (>= 4.2.4), pve-xtermjs (>= 4.7.0-1), sg3-utils, smartmontools, -- 2.39.5 From l.wagner at proxmox.com Mon Nov 11 13:56:19 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Mon, 11 Nov 2024 13:56:19 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/2] web ui: notification: remove matcher overiddes In-Reply-To: <20241111125619.193930-1-l.wagner@proxmox.com> References: <20241111125619.193930-1-l.wagner@proxmox.com> Message-ID: <20241111125619.193930-2-l.wagner@proxmox.com> These were put in place so that initial release of the new notification system for Proxmox Backup Server can already include improved notification matchers, which at that time have not been yet merged into proxmox-widget-toolkit. In the meanwhile, the changes have been merged an released in proxmox-widget-toolkit 4.2.4, hence we can remove the override. Signed-off-by: Lukas Wagner --- www/Makefile | 1 - www/window/NotificationMatcherOverride.js | 1105 --------------------- 2 files changed, 1106 deletions(-) delete mode 100644 www/window/NotificationMatcherOverride.js diff --git a/www/Makefile b/www/Makefile index 609a0ba6..f86cbb4d 100644 --- a/www/Makefile +++ b/www/Makefile @@ -77,7 +77,6 @@ JSSRC= \ window/NamespaceEdit.js \ window/MaintenanceOptions.js \ window/NotesEdit.js \ - window/NotificationMatcherOverride.js \ window/RemoteEdit.js \ window/TrafficControlEdit.js \ window/NotifyOptions.js \ diff --git a/www/window/NotificationMatcherOverride.js b/www/window/NotificationMatcherOverride.js deleted file mode 100644 index bc7b7c1d..00000000 --- a/www/window/NotificationMatcherOverride.js +++ /dev/null @@ -1,1105 +0,0 @@ -// Override some components from widget toolkit. -// This was done so that we can already use the improved UI for editing -// match rules without waiting for the needed API calls in PVE to be merged -// -// This can and *should* be removed once these changes have landed in -// widget toolkit: -// https://lists.proxmox.com/pipermail/pve-devel/2024-April/063539.html - - -Ext.define('pbs-notification-fields', { - extend: 'Ext.data.Model', - fields: ['name', 'description'], - idProperty: 'name', -}); - -Ext.define('pbs-notification-field-values', { - extend: 'Ext.data.Model', - fields: ['value', 'comment', 'field'], - idProperty: 'value', -}); - -Ext.define('PBS.panel.NotificationRulesEditPanel', { - override: 'Proxmox.panel.NotificationRulesEditPanel', - extend: 'Proxmox.panel.InputPanel', - xtype: 'pmxNotificationMatchRulesEditPanel', - mixins: ['Proxmox.Mixin.CBind'], - - controller: { - xclass: 'Ext.app.ViewController', - - // we want to also set the empty value, but 'bind' does not do that so - // we have to set it then (and only then) to get the correct value in - // the tree - control: { - 'field': { - change: function(cmp) { - let me = this; - let vm = me.getViewModel(); - if (cmp.field) { - let record = vm.get('selectedRecord'); - if (!record) { - return; - } - let data = Ext.apply({}, record.get('data')); - let value = cmp.getValue(); - // only update if the value is empty (or empty array) - if (!value || !value.length) { - data[cmp.field] = value; - record.set({ data }); - } - } - }, - }, - }, - }, - - viewModel: { - data: { - selectedRecord: null, - matchFieldType: 'exact', - matchFieldField: '', - matchFieldValue: '', - rootMode: 'all', - }, - - formulas: { - nodeType: { - get: function(get) { - let record = get('selectedRecord'); - return record?.get('type'); - }, - set: function(value) { - let me = this; - let record = me.get('selectedRecord'); - - let data; - - switch (value) { - case 'match-severity': - data = { - value: ['info', 'notice', 'warning', 'error', 'unknown'], - }; - break; - case 'match-field': - data = { - type: 'exact', - field: '', - value: '', - }; - break; - case 'match-calendar': - data = { - value: '', - }; - break; - } - - let node = { - type: value, - data, - }; - record.set(node); - }, - }, - showMatchingMode: function(get) { - let record = get('selectedRecord'); - if (!record) { - return false; - } - return record.isRoot(); - }, - showMatcherType: function(get) { - let record = get('selectedRecord'); - if (!record) { - return false; - } - return !record.isRoot(); - }, - - rootMode: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - set: function(value) { - let me = this; - let record = me.get('selectedRecord'); - let currentData = record.get('data'); - let invert = false; - if (value.startsWith('not')) { - value = value.substring(3); - invert = true; - } - record.set({ - data: { - ...currentData, - value, - invert, - }, - }); - }, - get: function(record) { - let prefix = record?.get('data').invert ? 'not' : ''; - return prefix + record?.get('data')?.value; - }, - }, - }, - }, - - column1: [ - { - xtype: 'pbsNotificationMatchRuleTree', - cbind: { - isCreate: '{isCreate}', - }, - }, - ], - column2: [ - { - xtype: 'pbsNotificationMatchRuleSettings', - cbind: { - baseUrl: '{baseUrl}', - }, - }, - - ], - - onGetValues: function(values) { - let me = this; - - let deleteArrayIfEmpty = (field) => { - if (Ext.isArray(values[field])) { - if (values[field].length === 0) { - delete values[field]; - if (!me.isCreate) { - Proxmox.Utils.assemble_field_data(values, { 'delete': field }); - } - } - } - }; - deleteArrayIfEmpty('match-field'); - deleteArrayIfEmpty('match-severity'); - deleteArrayIfEmpty('match-calendar'); - - return values; - }, -}); - -Ext.define('PBS.panel.NotificationMatchRuleTree', { - extend: 'Ext.panel.Panel', - xtype: 'pbsNotificationMatchRuleTree', - mixins: ['Proxmox.Mixin.CBind'], - border: false, - - getNodeTextAndIcon: function(type, data) { - let text; - let iconCls; - - switch (type) { - case 'match-severity': { - let v = data.value; - if (Ext.isArray(data.value)) { - v = data.value.join(', '); - } - text = Ext.String.format(gettext("Match severity: {0}"), v); - iconCls = 'fa fa-exclamation'; - if (!v) { - iconCls += ' internal-error'; - } - } break; - case 'match-field': { - let field = data.field; - let value = data.value; - text = Ext.String.format(gettext("Match field: {0}={1}"), field, value); - iconCls = 'fa fa-square-o'; - if (!field || !value || (Ext.isArray(value) && !value.length)) { - iconCls += ' internal-error'; - } - } break; - case 'match-calendar': { - let v = data.value; - text = Ext.String.format(gettext("Match calendar: {0}"), v); - iconCls = 'fa fa-calendar-o'; - if (!v || !v.length) { - iconCls += ' internal-error'; - } - } break; - case 'mode': - if (data.value === 'all') { - text = gettext("All"); - } else if (data.value === 'any') { - text = gettext("Any"); - } - if (data.invert) { - text = `!${text}`; - } - iconCls = 'fa fa-filter'; - - break; - } - - return [text, iconCls]; - }, - - initComponent: function() { - let me = this; - - let treeStore = Ext.create('Ext.data.TreeStore', { - root: { - expanded: true, - expandable: false, - text: '', - type: 'mode', - data: { - value: 'all', - invert: false, - }, - children: [], - iconCls: 'fa fa-filter', - }, - }); - - let realMatchFields = Ext.create({ - xtype: 'hiddenfield', - setValue: function(value) { - this.value = value; - this.checkChange(); - }, - getValue: function() { - return this.value; - }, - getErrors: function() { - for (const matcher of this.value ?? []) { - let matches = matcher.match(/^([^:]+):([^=]+)=(.+)$/); - if (!matches) { - return [""]; // fake error for validation - } - } - return []; - }, - getSubmitValue: function() { - let value = this.value; - if (!value) { - value = []; - } - return value; - }, - name: 'match-field', - }); - - let realMatchSeverity = Ext.create({ - xtype: 'hiddenfield', - setValue: function(value) { - this.value = value; - this.checkChange(); - }, - getValue: function() { - return this.value; - }, - getErrors: function() { - for (const severities of this.value ?? []) { - if (!severities) { - return [""]; // fake error for validation - } - } - return []; - }, - getSubmitValue: function() { - let value = this.value; - if (!value) { - value = []; - } - return value; - }, - name: 'match-severity', - }); - - let realMode = Ext.create({ - xtype: 'hiddenfield', - name: 'mode', - setValue: function(value) { - this.value = value; - this.checkChange(); - }, - getValue: function() { - return this.value; - }, - getSubmitValue: function() { - let value = this.value; - return value; - }, - }); - - let realMatchCalendar = Ext.create({ - xtype: 'hiddenfield', - name: 'match-calendar', - - setValue: function(value) { - this.value = value; - this.checkChange(); - }, - getValue: function() { - return this.value; - }, - getErrors: function() { - for (const timespan of this.value ?? []) { - if (!timespan) { - return [""]; // fake error for validation - } - } - return []; - }, - getSubmitValue: function() { - let value = this.value; - return value; - }, - }); - - let realInvertMatch = Ext.create({ - xtype: 'proxmoxcheckbox', - name: 'invert-match', - hidden: true, - deleteEmpty: !me.isCreate, - }); - - let storeChanged = function(store) { - store.suspendEvent('datachanged'); - - let matchFieldStmts = []; - let matchSeverityStmts = []; - let matchCalendarStmts = []; - let modeStmt = 'all'; - let invertMatchStmt = false; - - store.each(function(model) { - let type = model.get('type'); - let data = model.get('data'); - - switch (type) { - case 'match-field': - matchFieldStmts.push(`${data.type}:${data.field ?? ''}=${data.value ?? ''}`); - break; - case 'match-severity': - if (Ext.isArray(data.value)) { - matchSeverityStmts.push(data.value.join(',')); - } else { - matchSeverityStmts.push(data.value); - } - break; - case 'match-calendar': - matchCalendarStmts.push(data.value); - break; - case 'mode': - modeStmt = data.value; - invertMatchStmt = data.invert; - break; - } - - let [text, iconCls] = me.getNodeTextAndIcon(type, data); - model.set({ - text, - iconCls, - }); - }); - - realMatchFields.suspendEvent('change'); - realMatchFields.setValue(matchFieldStmts); - realMatchFields.resumeEvent('change'); - - realMatchCalendar.suspendEvent('change'); - realMatchCalendar.setValue(matchCalendarStmts); - realMatchCalendar.resumeEvent('change'); - - realMode.suspendEvent('change'); - realMode.setValue(modeStmt); - realMode.resumeEvent('change'); - - realInvertMatch.suspendEvent('change'); - realInvertMatch.setValue(invertMatchStmt); - realInvertMatch.resumeEvent('change'); - - realMatchSeverity.suspendEvent('change'); - realMatchSeverity.setValue(matchSeverityStmts); - realMatchSeverity.resumeEvent('change'); - - store.resumeEvent('datachanged'); - }; - - realMatchFields.addListener('change', function(field, value) { - let parseMatchField = function(filter) { - let [, type, matchedField, matchedValue] = - filter.match(/^(?:(regex|exact):)?([A-Za-z0-9_][A-Za-z0-9._-]*)=(.+)$/); - if (type === undefined) { - type = "exact"; - } - - if (type === 'exact') { - matchedValue = matchedValue.split(','); - } - - return { - type: 'match-field', - data: { - type, - field: matchedField, - value: matchedValue, - }, - leaf: true, - }; - }; - - for (let node of treeStore.queryBy( - record => record.get('type') === 'match-field', - ).getRange()) { - node.remove(true); - } - - if (!value) { - return; - } - let records = value.map(parseMatchField); - - let rootNode = treeStore.getRootNode(); - - for (let record of records) { - rootNode.appendChild(record); - } - }); - - realMatchSeverity.addListener('change', function(field, value) { - let parseSeverity = function(severities) { - return { - type: 'match-severity', - data: { - value: severities.split(','), - }, - leaf: true, - }; - }; - - for (let node of treeStore.queryBy( - record => record.get('type') === 'match-severity').getRange()) { - node.remove(true); - } - - let records = value.map(parseSeverity); - let rootNode = treeStore.getRootNode(); - - for (let record of records) { - rootNode.appendChild(record); - } - }); - - realMatchCalendar.addListener('change', function(field, value) { - let parseCalendar = function(timespan) { - return { - type: 'match-calendar', - data: { - value: timespan, - }, - leaf: true, - }; - }; - - for (let node of treeStore.queryBy( - record => record.get('type') === 'match-calendar').getRange()) { - node.remove(true); - } - - let records = value.map(parseCalendar); - let rootNode = treeStore.getRootNode(); - - for (let record of records) { - rootNode.appendChild(record); - } - }); - - realMode.addListener('change', function(field, value) { - let data = treeStore.getRootNode().get('data'); - treeStore.getRootNode().set('data', { - ...data, - value, - }); - }); - - realInvertMatch.addListener('change', function(field, value) { - let data = treeStore.getRootNode().get('data'); - treeStore.getRootNode().set('data', { - ...data, - invert: value, - }); - }); - - treeStore.addListener('datachanged', storeChanged); - - let treePanel = Ext.create({ - xtype: 'treepanel', - store: treeStore, - minHeight: 300, - maxHeight: 300, - scrollable: true, - - bind: { - selection: '{selectedRecord}', - }, - }); - - let addNode = function() { - let node = { - type: 'match-field', - data: { - type: 'exact', - field: '', - value: '', - }, - leaf: true, - }; - treeStore.getRootNode().appendChild(node); - treePanel.setSelection(treeStore.getRootNode().lastChild); - }; - - let deleteNode = function() { - let selection = treePanel.getSelection(); - for (let selected of selection) { - if (!selected.isRoot()) { - selected.remove(true); - } - } - }; - - Ext.apply(me, { - items: [ - realMatchFields, - realMode, - realMatchSeverity, - realInvertMatch, - realMatchCalendar, - treePanel, - { - xtype: 'button', - margin: '5 5 5 0', - text: gettext('Add'), - iconCls: 'fa fa-plus-circle', - handler: addNode, - }, - { - xtype: 'button', - margin: '5 5 5 0', - text: gettext('Remove'), - iconCls: 'fa fa-minus-circle', - handler: deleteNode, - }, - ], - }); - me.callParent(); - }, -}); - -Ext.define('PBS.panel.NotificationMatchRuleSettings', { - extend: 'Ext.panel.Panel', - xtype: 'pbsNotificationMatchRuleSettings', - mixins: ['Proxmox.Mixin.CBind'], - border: false, - layout: 'anchor', - - items: [ - { - xtype: 'proxmoxKVComboBox', - name: 'mode', - fieldLabel: gettext('Match if'), - allowBlank: false, - isFormField: false, - - matchFieldWidth: false, - - comboItems: [ - ['all', gettext('All rules match')], - ['any', gettext('Any rule matches')], - ['notall', gettext('At least one rule does not match')], - ['notany', gettext('No rule matches')], - ], - // Hide initially to avoid glitches when opening the window - hidden: true, - bind: { - hidden: '{!showMatchingMode}', - disabled: '{!showMatchingMode}', - value: '{rootMode}', - }, - }, - { - xtype: 'proxmoxKVComboBox', - fieldLabel: gettext('Node type'), - isFormField: false, - allowBlank: false, - // Hide initially to avoid glitches when opening the window - hidden: true, - bind: { - value: '{nodeType}', - hidden: '{!showMatcherType}', - disabled: '{!showMatcherType}', - }, - - comboItems: [ - ['match-field', gettext('Match Field')], - ['match-severity', gettext('Match Severity')], - ['match-calendar', gettext('Match Calendar')], - ], - }, - { - xtype: 'pbsNotificationMatchFieldSettings', - cbind: { - baseUrl: '{baseUrl}', - }, - }, - { - xtype: 'pbsNotificationMatchSeveritySettings', - }, - { - xtype: 'pbsNotificationMatchCalendarSettings', - }, - ], -}); - -Ext.define('PBS.panel.MatchCalendarSettings', { - extend: 'Ext.panel.Panel', - xtype: 'pbsNotificationMatchCalendarSettings', - border: false, - layout: 'anchor', - // Hide initially to avoid glitches when opening the window - hidden: true, - bind: { - hidden: '{!typeIsMatchCalendar}', - }, - viewModel: { - // parent is set in `initComponents` - formulas: { - typeIsMatchCalendar: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - get: function(record) { - return record?.get('type') === 'match-calendar'; - }, - }, - - matchCalendarValue: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - set: function(value) { - let me = this; - let record = me.get('selectedRecord'); - let currentData = record.get('data'); - record.set({ - data: { - ...currentData, - value: value, - }, - }); - }, - get: function(record) { - return record?.get('data')?.value; - }, - }, - }, - }, - items: [ - { - xtype: 'proxmoxKVComboBox', - fieldLabel: gettext('Timespan to match'), - isFormField: false, - allowBlank: false, - editable: true, - displayField: 'key', - field: 'value', - bind: { - value: '{matchCalendarValue}', - disabled: '{!typeIsMatchCalender}', - }, - - comboItems: [ - ['mon 8-12', ''], - ['tue..fri,sun 0:00-23:59', ''], - ], - }, - ], - - initComponent: function() { - let me = this; - Ext.apply(me.viewModel, { - parent: me.up('pmxNotificationMatchRulesEditPanel').getViewModel(), - }); - me.callParent(); - }, -}); - -Ext.define('PBS.panel.MatchSeveritySettings', { - extend: 'Ext.panel.Panel', - xtype: 'pbsNotificationMatchSeveritySettings', - border: false, - layout: 'anchor', - // Hide initially to avoid glitches when opening the window - hidden: true, - bind: { - hidden: '{!typeIsMatchSeverity}', - }, - viewModel: { - // parent is set in `initComponents` - formulas: { - typeIsMatchSeverity: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - get: function(record) { - return record?.get('type') === 'match-severity'; - }, - }, - matchSeverityValue: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - set: function(value) { - let record = this.get('selectedRecord'); - let currentData = record.get('data'); - record.set({ - data: { - ...currentData, - value: value, - }, - }); - }, - get: function(record) { - return record?.get('data')?.value; - }, - }, - }, - }, - items: [ - { - xtype: 'proxmoxKVComboBox', - fieldLabel: gettext('Severities to match'), - isFormField: false, - allowBlank: true, - multiSelect: true, - field: 'value', - // Hide initially to avoid glitches when opening the window - hidden: true, - bind: { - value: '{matchSeverityValue}', - hidden: '{!typeIsMatchSeverity}', - disabled: '{!typeIsMatchSeverity}', - }, - - comboItems: [ - ['info', gettext('Info')], - ['notice', gettext('Notice')], - ['warning', gettext('Warning')], - ['error', gettext('Error')], - ['unknown', gettext('Unknown')], - ], - }, - ], - - initComponent: function() { - let me = this; - Ext.apply(me.viewModel, { - parent: me.up('pmxNotificationMatchRulesEditPanel').getViewModel(), - }); - me.callParent(); - }, -}); - -Ext.define('PBS.panel.MatchFieldSettings', { - extend: 'Ext.panel.Panel', - xtype: 'pbsNotificationMatchFieldSettings', - border: false, - layout: 'anchor', - // Hide initially to avoid glitches when opening the window - hidden: true, - bind: { - hidden: '{!typeIsMatchField}', - }, - controller: { - xclass: 'Ext.app.ViewController', - - control: { - 'field[reference=fieldSelector]': { - change: function(field) { - let view = this.getView(); - let valueField = view.down('field[reference=valueSelector]'); - let store = valueField.getStore(); - let val = field.getValue(); - - if (val) { - store.setFilters([ - { - property: 'field', - value: val, - }, - ]); - } - }, - }, - }, - }, - viewModel: { - // parent is set in `initComponents` - formulas: { - typeIsMatchField: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - get: function(record) { - return record?.get('type') === 'match-field'; - }, - }, - isRegex: function(get) { - return get('matchFieldType') === 'regex'; - }, - matchFieldType: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - set: function(value) { - let record = this.get('selectedRecord'); - let currentData = record.get('data'); - - let newValue = []; - - // Build equivalent regular expression if switching - // to 'regex' mode - if (value === 'regex') { - let regexVal = "^"; - if (currentData.value) { - regexVal += `(${currentData.value.join('|')})`; - } - regexVal += "$"; - newValue.push(regexVal); - } - - record.set({ - data: { - ...currentData, - type: value, - value: newValue, - }, - }); - }, - get: function(record) { - return record?.get('data')?.type; - }, - }, - matchFieldField: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - set: function(value) { - let record = this.get('selectedRecord'); - let currentData = record.get('data'); - - record.set({ - data: { - ...currentData, - field: value, - // Reset value if field changes - value: [], - }, - }); - }, - get: function(record) { - return record?.get('data')?.field; - }, - }, - matchFieldValue: { - bind: { - bindTo: '{selectedRecord}', - deep: true, - }, - set: function(value) { - let record = this.get('selectedRecord'); - let currentData = record.get('data'); - record.set({ - data: { - ...currentData, - value: value, - }, - }); - }, - get: function(record) { - return record?.get('data')?.value; - }, - }, - }, - }, - - initComponent: function() { - let me = this; - - let store = Ext.create('Ext.data.Store', { - model: 'pbs-notification-fields', - autoLoad: true, - proxy: { - type: 'proxmox', - url: `/api2/json/${me.baseUrl}/matcher-fields`, - }, - listeners: { - 'load': function() { - this.each(function(record) { - record.set({ - description: - Proxmox.Utils.formatNotificationFieldName( - record.get('name'), - ), - }); - }); - - // Commit changes so that the description field is not marked - // as dirty - this.commitChanges(); - }, - }, - }); - - let valueStore = Ext.create('Ext.data.Store', { - model: 'pbs-notification-field-values', - autoLoad: true, - proxy: { - type: 'proxmox', - - url: `/api2/json/${me.baseUrl}/matcher-field-values`, - }, - listeners: { - 'load': function() { - this.each(function(record) { - if (record.get('field') === 'type') { - record.set({ - comment: - Proxmox.Utils.formatNotificationFieldValue( - record.get('value'), - ), - }); - } - }, this, true); - - // Commit changes so that the description field is not marked - // as dirty - this.commitChanges(); - }, - }, - }); - - Ext.apply(me.viewModel, { - parent: me.up('pmxNotificationMatchRulesEditPanel').getViewModel(), - }); - Ext.apply(me, { - items: [ - { - fieldLabel: gettext('Match Type'), - xtype: 'proxmoxKVComboBox', - reference: 'type', - isFormField: false, - allowBlank: false, - submitValue: false, - field: 'type', - - bind: { - value: '{matchFieldType}', - }, - - comboItems: [ - ['exact', gettext('Exact')], - ['regex', gettext('Regex')], - ], - }, - { - fieldLabel: gettext('Field'), - reference: 'fieldSelector', - xtype: 'proxmoxComboGrid', - isFormField: false, - submitValue: false, - allowBlank: false, - editable: false, - store: store, - queryMode: 'local', - valueField: 'name', - displayField: 'description', - field: 'field', - bind: { - value: '{matchFieldField}', - }, - listConfig: { - columns: [ - { - header: gettext('Description'), - dataIndex: 'description', - flex: 2, - }, - { - header: gettext('Field Name'), - dataIndex: 'name', - flex: 1, - }, - ], - }, - }, - { - fieldLabel: gettext('Value'), - reference: 'valueSelector', - xtype: 'proxmoxComboGrid', - autoSelect: false, - editable: false, - isFormField: false, - submitValue: false, - allowBlank: false, - showClearTrigger: true, - field: 'value', - store: valueStore, - valueField: 'value', - displayField: 'value', - notFoundIsValid: false, - multiSelect: true, - bind: { - value: '{matchFieldValue}', - hidden: '{isRegex}', - }, - listConfig: { - columns: [ - { - header: gettext('Value'), - dataIndex: 'value', - flex: 1, - }, - { - header: gettext('Comment'), - dataIndex: 'comment', - flex: 2, - }, - ], - }, - }, - { - fieldLabel: gettext('Regex'), - xtype: 'proxmoxtextfield', - editable: true, - isFormField: false, - submitValue: false, - allowBlank: false, - field: 'value', - bind: { - value: '{matchFieldValue}', - hidden: '{!isRegex}', - }, - }, - ], - }); - me.callParent(); - }, -}); -- 2.39.5 From f.schauer at proxmox.com Mon Nov 11 14:08:21 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Mon, 11 Nov 2024 14:08:21 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 3/4] use level-based logging instead of println In-Reply-To: <20241111130822.124584-1-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> Message-ID: <20241111130822.124584-4-f.schauer@proxmox.com> Use log level "info" by default and prevent spamming messages for every single chunk uploaded. To re-enable these messages, set the RUST_LOG environment variable to "debug". Signed-off-by: Filip Schauer --- Cargo.toml | 2 ++ src/main.rs | 28 ++++++++++++++++++++++------ src/vma2pbs.rs | 38 ++++++++++++++++++++------------------ 3 files changed, 44 insertions(+), 24 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ad80304..7951bbc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,9 @@ edition = "2021" anyhow = "1.0" bincode = "1.3" chrono = "0.4" +env_logger = "0.10" hyper = "0.14.5" +log = "0.4" pico-args = "0.5" md5 = "0.7.0" regex = "1.7" diff --git a/src/main.rs b/src/main.rs index d4b36fa..203196b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,6 +6,7 @@ use std::path::PathBuf; use anyhow::{bail, Context, Error}; use chrono::NaiveDateTime; +use env_logger::Target; use proxmox_sys::linux::tty; use proxmox_time::epoch_i64; use regex::Regex; @@ -128,7 +129,7 @@ fn parse_args() -> Result { match (encrypt, keyfile.is_some()) { (true, false) => bail!("--encrypt requires a --keyfile!"), - (false, true) => println!( + (false, true) => log::info!( "--keyfile given, but --encrypt not set -> backup will be signed, but not encrypted!" ), _ => {} @@ -190,7 +191,7 @@ fn parse_args() -> Result { Some(key_password) } else if vma_file_path.is_none() { - println!( + log::info!( "Please use --key-password-file to provide the password when passing the VMA file \ to stdin, if required." ); @@ -246,13 +247,17 @@ fn parse_args() -> Result { let Some((_, [backup_id, timestr, ext])) = re.captures(file_name).map(|c| c.extract()) else { - // Skip the file, since it is not a VMA backup + log::debug!("Skip \"{file_name}\", since it is not a VMA backup"); continue; }; if let Some(ref vmid) = vmid { if backup_id != vmid { - // Skip the backup, since it does not match the specified vmid + log::debug!( + "Skip backup with VMID {}, since it does not match specified VMID {}", + backup_id, + vmid + ); continue; } } @@ -308,14 +313,14 @@ fn parse_args() -> Result { bail!("Did not find any backup archives"); } - println!( + log::info!( "Found {} backup archive(s) of {} different VMID(s):", total_vma_count, grouped_vmas.len() ); for (backup_id, vma_group) in &grouped_vmas { - println!("- VMID {}: {} backups", backup_id, vma_group.len()); + log::info!("- VMID {}: {} backups", backup_id, vma_group.len()); } if !yes { @@ -358,7 +363,18 @@ fn parse_args() -> Result { Ok(options) } +fn init_cli_logger() { + env_logger::Builder::from_env(env_logger::Env::new().filter_or("RUST_LOG", "info")) + .format_level(false) + .format_target(false) + .format_timestamp(None) + .target(Target::Stdout) + .init(); +} + fn main() -> Result<(), Error> { + init_cli_logger(); + let args = parse_args()?; vma2pbs(args)?; diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index a5b4027..0517251 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -82,8 +82,8 @@ fn create_pbs_backup_task( pbs_args: &PbsArgs, backup_args: &VmaBackupArgs, ) -> Result<*mut ProxmoxBackupHandle, Error> { - println!( - "backup time: {}", + log::info!( + "\tbackup time: {}", epoch_to_rfc3339(backup_args.backup_time)? ); @@ -152,7 +152,7 @@ where let config_name = config.name; let config_data = config.content; - println!("CFG: size: {} name: {}", config_data.len(), config_name); + log::info!("\tCFG: size: {} name: {}", config_data.len(), config_name); let config_name_cstr = CString::new(config_name)?; @@ -190,9 +190,11 @@ where let device_name = vma_reader.get_device_name(device_id.try_into()?)?; let device_size = vma_reader.get_device_size(device_id.try_into()?)?; - println!( - "DEV: dev_id={} size: {} devname: {}", - device_id, device_size, device_name + log::info!( + "\tDEV: dev_id={} size: {} devname: {}", + device_id, + device_size, + device_name ); let device_name_cstr = CString::new(device_name)?; @@ -276,8 +278,8 @@ where }; let pbs_upload_chunk = |pbs_chunk_buffer: Option<&[u8]>| { - println!( - "Uploading dev_id: {} offset: {:#0X} - {:#0X}", + log::debug!( + "\tUploading dev_id: {} offset: {:#0X} - {:#0X}", dev_id, pbs_chunk_offset, pbs_chunk_offset + pbs_chunk_size, @@ -466,13 +468,13 @@ fn set_notes( pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { let pbs_args = &args.pbs_args; - println!("PBS repository: {}", pbs_args.pbs_repository); + log::info!("PBS repository: {}", pbs_args.pbs_repository); if let Some(ns) = &pbs_args.namespace { - println!("PBS namespace: {}", ns); + log::info!("PBS namespace: {}", ns); } - println!("PBS fingerprint: {}", pbs_args.fingerprint); - println!("compress: {}", pbs_args.compress); - println!("encrypt: {}", pbs_args.encrypt); + log::info!("PBS fingerprint: {}", pbs_args.fingerprint); + log::info!("compress: {}", pbs_args.compress); + log::info!("encrypt: {}", pbs_args.encrypt); let start_transfer_time = SystemTime::now(); @@ -486,8 +488,8 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { ); if args.skip_failed { - eprintln!("{}", err_msg); - println!("Skipping VMID {}", backup_args.backup_id); + log::warn!("{}", err_msg); + log::info!("Skipping VMID {}", backup_args.backup_id); break; } else { bail!(err_msg); @@ -501,15 +503,15 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { let minutes = total_seconds / 60; let seconds = total_seconds % 60; let milliseconds = transfer_duration.as_millis() % 1000; - println!("Backup finished within {minutes} minutes, {seconds} seconds and {milliseconds} ms"); + log::info!("Backup finished within {minutes} minutes, {seconds} seconds and {milliseconds} ms"); Ok(()) } fn upload_vma_file(pbs_args: &PbsArgs, backup_args: &VmaBackupArgs) -> Result<(), Error> { match &backup_args.vma_file_path { - Some(vma_file_path) => println!("Uploading VMA backup from {:?}", vma_file_path), - None => println!("Uploading VMA backup from (stdin)"), + Some(vma_file_path) => log::info!("Uploading VMA backup from {:?}", vma_file_path), + None => log::info!("Uploading VMA backup from (stdin)"), }; let vma_file: Box = match &backup_args.compression { -- 2.39.5 From f.schauer at proxmox.com Mon Nov 11 14:08:18 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Mon, 11 Nov 2024 14:08:18 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 0/4] add support for bulk import of a dump directory Message-ID: <20241111130822.124584-1-f.schauer@proxmox.com> When a path to a directory is provided in the vma_file argument, try to upload all VMA backups in the directory. This also handles compressed VMA files, notes and logs. If a vmid is specified with --vmid, only the backups of that particular vmid are uploaded. Also improve the readability of the log messages to keep track of all imported backups. Changed since v4: * Switch grouped_vmas from Vec> to HashMap> * Remove dependency on itertools * bail when no backups were found * Default to yes on the bulk import confirmation prompt * bail on invalid input to the bulk import confirmation prompt Changed since v3: * Mention in the description of the --vmid argument, that it is required if a single VMA file is provided * Construct grouped_vmas in place * Add debug logs when gathering files for bulk import * Log a summary of the files gathered for bulk import * Remove the "confusing VMA file path" error message in the second commit * Switch chunk_stats from Arc> to Arc<[AtomicU64; 256]> and use fetch_add to atomically increment and fetch the chunk stat * Ask for confirmation before bulk import * Add --yes option to skip the confirmation prompt Changed since v2: * Make skipping a VMID on error optional with the --skip-failed option * Switch log output from stderr to stdout * Bump itertools to 0.13 Changed since v1: * Do not recurse through dump directory * Compile regex once before iterating over the files in the dump directory * Use extract on regex capture groups * Do not use deprecated method `chrono::NaiveDateTime::timestamp` * Use proxmox_sys::fs::file_read_optional_string * Group VMA files by VMID and continue with next VMID on error * Move the BackupVmaToPbsArgs split into its own commit * Remove hard coded occurences of 255 * Use level-based logging instead of println Filip Schauer (4): add support for bulk import of a dump directory add option to skip vmids whose backups failed to upload use level-based logging instead of println log device upload progress as a percentage Cargo.toml | 4 + src/main.rs | 193 +++++++++++++++++++++++++++++++++++++++++++++---- src/vma2pbs.rs | 110 ++++++++++++++++++++++------ 3 files changed, 272 insertions(+), 35 deletions(-) -- 2.39.5 From f.schauer at proxmox.com Mon Nov 11 14:08:19 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Mon, 11 Nov 2024 14:08:19 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 1/4] add support for bulk import of a dump directory In-Reply-To: <20241111130822.124584-1-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> Message-ID: <20241111130822.124584-2-f.schauer@proxmox.com> When a path to a directory is provided in the vma_file argument, try to upload all VMA backups in the directory. This also handles compressed VMA files, notes and logs. If a vmid is specified with --vmid, only the backups of that particular vmid are uploaded. This is intended for use on a dump directory: PBS_FINGERPRINT='PBS_FINGERPRINT' vma-to-pbs \ --repository 'user at realm!token at server:port:datastore' \ /var/lib/vz/dump Signed-off-by: Filip Schauer --- Cargo.toml | 2 + src/main.rs | 167 +++++++++++++++++++++++++++++++++++++++++++++---- src/vma2pbs.rs | 64 ++++++++++++++++--- 3 files changed, 214 insertions(+), 19 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cd13426..ad80304 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,11 @@ edition = "2021" [dependencies] anyhow = "1.0" bincode = "1.3" +chrono = "0.4" hyper = "0.14.5" pico-args = "0.5" md5 = "0.7.0" +regex = "1.7" scopeguard = "1.1.0" serde = "1.0" serde_json = "1.0" diff --git a/src/main.rs b/src/main.rs index 3e25591..a394078 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,26 +1,35 @@ +use std::collections::HashMap; use std::ffi::OsString; +use std::fs::read_dir; +use std::io::{BufRead, BufReader, Write}; +use std::path::PathBuf; use anyhow::{bail, Context, Error}; +use chrono::NaiveDateTime; use proxmox_sys::linux::tty; use proxmox_time::epoch_i64; +use regex::Regex; mod vma; mod vma2pbs; -use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, PbsArgs, VmaBackupArgs}; +use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, Compression, PbsArgs, VmaBackupArgs}; const CMD_HELP: &str = "\ Usage: vma-to-pbs [OPTIONS] --repository --vmid [vma_file] Arguments: - [vma_file] + [vma_file | dump_directory] Options: --repository Repository URL [--ns ] Namespace - --vmid + [--vmid ] Backup ID + This is required if a single VMA file is provided. + If not specified, bulk import all VMA backups in the provided directory. + If specified with a dump directory, only import backups of the specified vmid. [--backup-time ] Backup timestamp --fingerprint @@ -41,6 +50,8 @@ Options: File containing a comment/notes [--log-file ] Log file + -y, --yes + Automatic yes to prompts -h, --help Print help -V, --version @@ -52,7 +63,16 @@ fn parse_args() -> Result { args.remove(0); // remove the executable path. let mut first_later_args_index = 0; - let options = ["-h", "--help", "-c", "--compress", "-e", "--encrypt"]; + let options = [ + "-h", + "--help", + "-c", + "--compress", + "-e", + "--encrypt", + "-y", + "--yes", + ]; for (i, arg) in args.iter().enumerate() { if let Some(arg) = arg.to_str() { @@ -87,7 +107,7 @@ fn parse_args() -> Result { let pbs_repository = args.value_from_str("--repository")?; let namespace = args.opt_value_from_str("--ns")?; - let vmid = args.value_from_str("--vmid")?; + let vmid: Option = args.opt_value_from_str("--vmid")?; let backup_time: Option = args.opt_value_from_str("--backup-time")?; let backup_time = backup_time.unwrap_or_else(epoch_i64); let fingerprint = args.opt_value_from_str("--fingerprint")?; @@ -99,6 +119,7 @@ fn parse_args() -> Result { let key_password_file: Option = args.opt_value_from_str("--key-password-file")?; let notes_file: Option = args.opt_value_from_str("--notes-file")?; let log_file_path: Option = args.opt_value_from_str("--log-file")?; + let yes = args.contains(["-y", "--yes"]); match (encrypt, keyfile.is_some()) { (true, false) => bail!("--encrypt requires a --keyfile!"), @@ -196,15 +217,137 @@ fn parse_args() -> Result { encrypt, }; - let vma_args = VmaBackupArgs { - vma_file_path: vma_file_path.cloned(), - backup_id: vmid, - backup_time, - notes, - log_file_path, + let bulk = + vma_file_path + .map(PathBuf::from) + .and_then(|path| if path.is_dir() { Some(path) } else { None }); + + let grouped_vmas = if let Some(dump_dir_path) = bulk { + let re = Regex::new( + r"vzdump-qemu-(\d+)-(\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2}).vma(|.zst|.lzo|.gz)$", + )?; + + let mut vmas = Vec::new(); + + for entry in read_dir(dump_dir_path)? { + let entry = entry?; + let path = entry.path(); + + if !path.is_file() { + continue; + } + + if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) { + let Some((_, [backup_id, timestr, ext])) = + re.captures(file_name).map(|c| c.extract()) + else { + // Skip the file, since it is not a VMA backup + continue; + }; + + if let Some(ref vmid) = vmid { + if backup_id != vmid { + // Skip the backup, since it does not match the specified vmid + continue; + } + } + + let compression = match ext { + "" => None, + ".zst" => Some(Compression::Zstd), + ".lzo" => Some(Compression::Lzo), + ".gz" => Some(Compression::GZip), + _ => bail!("Unexpected file extension: {ext}"), + }; + + let backup_time = NaiveDateTime::parse_from_str(timestr, "%Y_%m_%d-%H_%M_%S")? + .and_utc() + .timestamp(); + + let notes_path = path.with_file_name(format!("{}.notes", file_name)); + let notes = proxmox_sys::fs::file_read_optional_string(notes_path)?; + + let log_path = path.with_file_name(format!("{}.log", file_name)); + let log_file_path = if log_path.exists() { + Some(log_path.to_path_buf().into_os_string()) + } else { + None + }; + + let backup_args = VmaBackupArgs { + vma_file_path: Some(path.clone().into()), + compression, + backup_id: backup_id.to_string(), + backup_time, + notes, + log_file_path, + }; + vmas.push(backup_args); + } + } + + vmas.sort_by_key(|d| d.backup_time); + let total_vma_count = vmas.len(); + let grouped_vmas = vmas.into_iter().fold( + HashMap::new(), + |mut grouped: HashMap>, vma_args| { + grouped + .entry(vma_args.backup_id.clone()) + .or_default() + .push(vma_args); + grouped + }, + ); + + if grouped_vmas.is_empty() { + bail!("Did not find any backup archives"); + } + + println!( + "Found {} backup archive(s) of {} different VMID(s):", + total_vma_count, + grouped_vmas.len() + ); + + for (backup_id, vma_group) in &grouped_vmas { + println!("- VMID {}: {} backups", backup_id, vma_group.len()); + } + + if !yes { + eprint!("Proceed with the bulk import? (Y/n): "); + std::io::stdout().flush()?; + let mut line = String::new(); + + BufReader::new(std::io::stdin()).read_line(&mut line)?; + let trimmed = line.trim(); + match trimmed { + "y" | "Y" | "" => {} + "n" | "N" => bail!("Bulk import was not confirmed."), + _ => bail!("Unexpected choice '{trimmed}'!"), + } + } + + grouped_vmas + } else if let Some(vmid) = vmid { + HashMap::from([( + vmid.clone(), + vec![VmaBackupArgs { + vma_file_path: vma_file_path.cloned(), + compression: None, + backup_id: vmid, + backup_time, + notes, + log_file_path, + }], + )]) + } else { + bail!("No vmid specified for single backup file"); }; - let options = BackupVmaToPbsArgs { pbs_args, vma_args }; + let options = BackupVmaToPbsArgs { + pbs_args, + grouped_vmas, + }; Ok(options) } diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index a888a7b..95ede9b 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -4,6 +4,7 @@ use std::collections::HashMap; use std::ffi::{c_char, CStr, CString, OsString}; use std::fs::File; use std::io::{stdin, BufRead, BufReader, Read}; +use std::process::{Command, Stdio}; use std::ptr; use std::time::SystemTime; @@ -30,7 +31,7 @@ const VMA_CLUSTER_SIZE: usize = 65536; pub struct BackupVmaToPbsArgs { pub pbs_args: PbsArgs, - pub vma_args: VmaBackupArgs, + pub grouped_vmas: HashMap>, } pub struct PbsArgs { @@ -45,8 +46,15 @@ pub struct PbsArgs { pub encrypt: bool, } +pub enum Compression { + Zstd, + Lzo, + GZip, +} + pub struct VmaBackupArgs { pub vma_file_path: Option, + pub compression: Option, pub backup_id: String, pub backup_time: i64, pub notes: Option, @@ -467,7 +475,19 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { let start_transfer_time = SystemTime::now(); - upload_vma_file(pbs_args, &args.vma_args)?; + for (_, vma_group) in args.grouped_vmas { + for backup_args in vma_group { + if let Err(e) = upload_vma_file(pbs_args, &backup_args) { + eprintln!( + "Failed to upload vma file at {:?} - {}", + backup_args.vma_file_path.unwrap_or("(stdin)".into()), + e + ); + println!("Skipping VMID {}", backup_args.backup_id); + break; + } + } + } let transfer_duration = SystemTime::now().duration_since(start_transfer_time)?; let total_seconds = transfer_duration.as_secs(); @@ -480,13 +500,43 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { } fn upload_vma_file(pbs_args: &PbsArgs, backup_args: &VmaBackupArgs) -> Result<(), Error> { - let vma_file: Box = match &backup_args.vma_file_path { - Some(vma_file_path) => match File::open(vma_file_path) { - Err(why) => return Err(anyhow!("Couldn't open file: {}", why)), - Ok(file) => Box::new(BufReader::new(file)), + match &backup_args.vma_file_path { + Some(vma_file_path) => println!("Uploading VMA backup from {:?}", vma_file_path), + None => println!("Uploading VMA backup from (stdin)"), + }; + + let vma_file: Box = match &backup_args.compression { + Some(compression) => { + let vma_file_path = backup_args + .vma_file_path + .as_ref() + .expect("No VMA file path provided"); + let mut cmd = match compression { + Compression::Zstd => { + let mut cmd = Command::new("zstd"); + cmd.args(["-q", "-d", "-c"]); + cmd + } + Compression::Lzo => { + let mut cmd = Command::new("lzop"); + cmd.args(["-d", "-c"]); + cmd + } + Compression::GZip => Command::new("zcat"), + }; + let process = cmd.arg(vma_file_path).stdout(Stdio::piped()).spawn()?; + let stdout = process.stdout.expect("Failed to capture stdout"); + Box::new(BufReader::new(stdout)) + } + None => match &backup_args.vma_file_path { + Some(vma_file_path) => match File::open(vma_file_path) { + Err(why) => return Err(anyhow!("Couldn't open file: {}", why)), + Ok(file) => Box::new(BufReader::new(file)), + }, + None => Box::new(BufReader::new(stdin())), }, - None => Box::new(BufReader::new(stdin())), }; + let vma_reader = VmaReader::new(vma_file)?; let pbs = create_pbs_backup_task(pbs_args, backup_args)?; -- 2.39.5 From f.schauer at proxmox.com Mon Nov 11 14:08:20 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Mon, 11 Nov 2024 14:08:20 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 2/4] add option to skip vmids whose backups failed to upload In-Reply-To: <20241111130822.124584-1-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> Message-ID: <20241111130822.124584-3-f.schauer@proxmox.com> Signed-off-by: Filip Schauer --- src/main.rs | 6 ++++++ src/vma2pbs.rs | 13 ++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index a394078..d4b36fa 100644 --- a/src/main.rs +++ b/src/main.rs @@ -50,6 +50,9 @@ Options: File containing a comment/notes [--log-file ] Log file + --skip-failed + Skip VMIDs that failed to be uploaded and continue onto the next VMID if a dump directory + is specified. -y, --yes Automatic yes to prompts -h, --help @@ -70,6 +73,7 @@ fn parse_args() -> Result { "--compress", "-e", "--encrypt", + "--skip-failed", "-y", "--yes", ]; @@ -119,6 +123,7 @@ fn parse_args() -> Result { let key_password_file: Option = args.opt_value_from_str("--key-password-file")?; let notes_file: Option = args.opt_value_from_str("--notes-file")?; let log_file_path: Option = args.opt_value_from_str("--log-file")?; + let skip_failed = args.contains("--skip-failed"); let yes = args.contains(["-y", "--yes"]); match (encrypt, keyfile.is_some()) { @@ -347,6 +352,7 @@ fn parse_args() -> Result { let options = BackupVmaToPbsArgs { pbs_args, grouped_vmas, + skip_failed, }; Ok(options) diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index 95ede9b..a5b4027 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -32,6 +32,7 @@ const VMA_CLUSTER_SIZE: usize = 65536; pub struct BackupVmaToPbsArgs { pub pbs_args: PbsArgs, pub grouped_vmas: HashMap>, + pub skip_failed: bool, } pub struct PbsArgs { @@ -478,13 +479,19 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { for (_, vma_group) in args.grouped_vmas { for backup_args in vma_group { if let Err(e) = upload_vma_file(pbs_args, &backup_args) { - eprintln!( + let err_msg = format!( "Failed to upload vma file at {:?} - {}", backup_args.vma_file_path.unwrap_or("(stdin)".into()), e ); - println!("Skipping VMID {}", backup_args.backup_id); - break; + + if args.skip_failed { + eprintln!("{}", err_msg); + println!("Skipping VMID {}", backup_args.backup_id); + break; + } else { + bail!(err_msg); + } } } } -- 2.39.5 From f.schauer at proxmox.com Mon Nov 11 14:08:22 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Mon, 11 Nov 2024 14:08:22 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 4/4] log device upload progress as a percentage In-Reply-To: <20241111130822.124584-1-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> Message-ID: <20241111130822.124584-5-f.schauer@proxmox.com> Log the upload progress of a device as a percentage with log level info every 1000 chunks. Signed-off-by: Filip Schauer --- src/vma2pbs.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index 0517251..f469053 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -6,6 +6,8 @@ use std::fs::File; use std::io::{stdin, BufRead, BufReader, Read}; use std::process::{Command, Stdio}; use std::ptr; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; use std::time::SystemTime; use anyhow::{anyhow, bail, Error}; @@ -234,6 +236,8 @@ where non_zero_mask: u64, } + let chunk_stats = Arc::new([const { AtomicU64::new(0) }; VMA_MAX_DEVICES]); + let images_chunks: RefCell>> = RefCell::new(HashMap::new()); @@ -284,6 +288,11 @@ where pbs_chunk_offset, pbs_chunk_offset + pbs_chunk_size, ); + let chunk_stat = chunk_stats[dev_id as usize].fetch_add(1, Ordering::SeqCst); + if (chunk_stat % 1000) == 0 { + let percentage = 100 * PROXMOX_BACKUP_DEFAULT_CHUNK_SIZE * chunk_stat / device_size; + log::info!("\tUploading dev_id: {} ({}%)", dev_id, percentage); + } let mut pbs_err: *mut c_char = ptr::null_mut(); -- 2.39.5 From f.schauer at proxmox.com Mon Nov 11 14:13:45 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Mon, 11 Nov 2024 14:13:45 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v4 2/6] add support for bulk import of a dump directory In-Reply-To: <1730724842.agk2is6zq8.astroid@yuna.none> References: <20241030135537.92595-1-f.schauer@proxmox.com> <20241030135537.92595-3-f.schauer@proxmox.com> <1730724842.agk2is6zq8.astroid@yuna.none> Message-ID: <976c12d0-482f-4443-9780-21b02ec50b92@proxmox.com> Superseded by: https://lists.proxmox.com/pipermail/pbs-devel/2024-November/011353.html On 04/11/2024 14:06, Fabian Gr?nbichler wrote: > grouped_vmas should still be a map, not a vec of vec.. e.g., something > like this (requires some more adaptation - while this could use > itertools, I don't think it's worth to pull that in if the same can be > had with a single fold invocation): @@ -298,12 +298,16 @@ fn > parse_args() -> Result { > vmas.sort_by_key(|d|d.backup_time); let total_vma_count = vmas.len(); > - let mut grouped_vmas: Vec<_> = vmas - .into_iter() - > .into_group_map_by(|d|d.backup_id.clone()) - .into_values() - > .collect(); - grouped_vmas.sort_by_key(|d|d[0].backup_id.clone()); + > let grouped_vmas = vmas.into_iter().fold( + HashMap::new(), + |mut > grouped: HashMap>, vma_args|{ + grouped + > .entry(vma_args.backup_id.clone()) + .or_default() + .push(vma_args); > + grouped + }, + ); log::info!( "Found {} backup archive(s) of {} > different VMID(s):", @@ -311,12 +315,8 @@ fn parse_args() -> > Result { grouped_vmas.len() ); - for > vma_group in &grouped_vmas { - log::info!( - "- VMID {}: {} backups", > - vma_group[0].backup_id, - vma_group.len() - ); + for (vma_group, > vma_args) in &grouped_vmas { + log::info!("- VMID {}: {} backups", > vma_group, vma_args.len()); } if !yes { done On 04/11/2024 14:06, Fabian Gr?nbichler wrote: >> + println!( >> + "Found {} backup archive(s) of {} different VMID(s):", >> + total_vma_count, >> + grouped_vmas.len() >> + ); > if we don't find any, we should print something else here and exit? done with `bail!` in v5 On 04/11/2024 14:06, Fabian Gr?nbichler wrote: >> + if !yes { >> + loop { >> + eprint!("Proceed with the bulk import? (y/n): "); >> + let mut line = String::new(); >> + >> + BufReader::new(std::io::stdin()).read_line(&mut line)?; >> + let trimmed = line.trim(); >> + if trimmed == "y" || trimmed == "Y" { >> + break; >> + } else if trimmed == "n" || trimmed == "N" { >> + bail!("Bulk import was not confirmed."); >> + } > this maybe should mimic what we do in proxmox_router when prompting > for confirmation? e.g., flush stdout, have a default value, ..? should > we abort after a few loops? Changed in v5 to mimic the behaviour of the confirmation prompt in proxmox_router. (bail on invalid input) Also made Y the default choice. From f.gruenbichler at proxmox.com Mon Nov 11 14:37:26 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 11 Nov 2024 14:37:26 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 4/4] fix #5853: client: pxar: exclude stale files on metadata read In-Reply-To: <20241105140153.282980-5-c.ebner@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> <20241105140153.282980-5-c.ebner@proxmox.com> Message-ID: <1731330267.220wgkqhtc.astroid@yuna.none> behaviour wise this seems okay to me, but if possible, I'd avoid all the return value tuples, see detailed comments below.. On November 5, 2024 3:01 pm, Christian Ebner wrote: > Skip and warn the user for files which returned a stale file handle > error while reading the metadata associated to that file. > > Instead of returning with an error when getting the metadata, return > a boolean flag signaling if a stale file handle has been encountered. > > Link to issue in bugtracker: > https://bugzilla.proxmox.com/show_bug.cgi?id=5853 > > Link to thread in community forum: > https://forum.proxmox.com/threads/156822/ > > Signed-off-by: Christian Ebner > --- > pbs-client/src/pxar/create.rs | 100 ++++++++++++++++++++++------------ > 1 file changed, 66 insertions(+), 34 deletions(-) > > diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs > index 2a844922c..85be00db4 100644 > --- a/pbs-client/src/pxar/create.rs > +++ b/pbs-client/src/pxar/create.rs > @@ -228,7 +228,7 @@ where > let mut fs_feature_flags = Flags::from_magic(fs_magic); > > let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?; > - let metadata = get_metadata( > + let (metadata, stale_fd) = get_metadata( stale_fd here is not used at all.. > source_dir.as_raw_fd(), > &stat, > feature_flags & fs_feature_flags, > @@ -744,7 +744,7 @@ impl Archiver { > return Ok(()); > } > > - let metadata = get_metadata( > + let (metadata, stale_fd) = get_metadata( this one is used > fd.as_raw_fd(), > stat, > self.flags(), > @@ -753,6 +753,11 @@ impl Archiver { > self.skip_e2big_xattr, > )?; > > + if stale_fd { > + log::warn!("Stale filehandle encountered, skip {:?}", self.path); > + return Ok(()); > + } for this warning.. but get_metadata already logs (potentially multiple times ;)) that things are incomplete cause of the stale filehandle, this only adds the path context.. > + > if self.previous_payload_index.is_none() { > return self > .add_entry_to_archive(encoder, &mut None, c_file_name, stat, fd, &metadata, None) > @@ -1301,7 +1306,14 @@ impl Archiver { > file_name: &Path, > metadata: &Metadata, > ) -> Result<(), Error> { > - let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?; > + let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) { > + Ok(dest) => dest, > + Err(Errno::ESTALE) => { > + log::warn!("Stale file handle encountered, skip {file_name:?}"); > + return Ok(()); > + } > + Err(err) => return Err(err.into()), > + }; > encoder.add_symlink(metadata, file_name, dest).await?; > Ok(()) > } > @@ -1397,9 +1409,10 @@ fn get_metadata( > fs_magic: i64, > fs_feature_flags: &mut Flags, > skip_e2big_xattr: bool, > -) -> Result { > +) -> Result<(Metadata, bool), Error> { > // required for some of these > let proc_path = Path::new("/proc/self/fd/").join(fd.to_string()); > + let mut stale_fd = false; > > let mut meta = Metadata { > stat: pxar::Stat { > @@ -1412,18 +1425,27 @@ fn get_metadata( > ..Default::default() > }; > > - get_xattr_fcaps_acl( > + if get_xattr_fcaps_acl( only call site, could just bubble up ESTALE > &mut meta, > fd, > &proc_path, > flags, > fs_feature_flags, > skip_e2big_xattr, > - )?; > - get_chattr(&mut meta, fd)?; > + )? { > + stale_fd = true; > + log::warn!("Stale filehandle, xattrs incomplete"); > + } > + if get_chattr(&mut meta, fd)? { same > + stale_fd = true; > + log::warn!("Stale filehandle, chattr incomplete"); > + } > get_fat_attr(&mut meta, fd, fs_magic)?; > - get_quota_project_id(&mut meta, fd, flags, fs_magic)?; > - Ok(meta) > + if get_quota_project_id(&mut meta, fd, flags, fs_magic)? { same > + stale_fd = true; > + log::warn!("Stale filehandle, quota project id incomplete"); > + } see above and way down below, IMHO all of these could just bubble up the error.. > + Ok((meta, stale_fd)) > } > > fn get_fcaps( > @@ -1431,22 +1453,23 @@ fn get_fcaps( > fd: RawFd, > flags: Flags, > fs_feature_flags: &mut Flags, > -) -> Result<(), Error> { > +) -> Result { this is only called by get_xattr_fcaps_acl, so could just bubble up ESTALE as well.. > if !flags.contains(Flags::WITH_FCAPS) { > - return Ok(()); > + return Ok(false); > } > > match xattr::fgetxattr(fd, xattr::XATTR_NAME_FCAPS) { > Ok(data) => { > meta.fcaps = Some(pxar::format::FCaps { data }); > - Ok(()) > + Ok(false) > } > - Err(Errno::ENODATA) => Ok(()), > + Err(Errno::ENODATA) => Ok(false), > Err(Errno::EOPNOTSUPP) => { > fs_feature_flags.remove(Flags::WITH_FCAPS); > - Ok(()) > + Ok(false) > } > - Err(Errno::EBADF) => Ok(()), // symlinks > + Err(Errno::EBADF) => Ok(false), // symlinks > + Err(Errno::ESTALE) => Ok(true), > Err(err) => Err(err).context("failed to read file capabilities"), > } > } > @@ -1458,32 +1481,35 @@ fn get_xattr_fcaps_acl( > flags: Flags, > fs_feature_flags: &mut Flags, > skip_e2big_xattr: bool, > -) -> Result<(), Error> { > +) -> Result { > if !flags.contains(Flags::WITH_XATTRS) { > - return Ok(()); > + return Ok(false); > } > > let xattrs = match xattr::flistxattr(fd) { > Ok(names) => names, > Err(Errno::EOPNOTSUPP) => { > fs_feature_flags.remove(Flags::WITH_XATTRS); > - return Ok(()); > + return Ok(false); > } > Err(Errno::E2BIG) => { > match skip_e2big_xattr { > - true => return Ok(()), > + true => return Ok(false), > false => { > bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); > } > }; > } > - Err(Errno::EBADF) => return Ok(()), // symlinks > + Err(Errno::EBADF) => return Ok(false), // symlinks > + Err(Errno::ESTALE) => return Ok(true), see above > Err(err) => return Err(err).context("failed to read xattrs"), > }; > > for attr in &xattrs { > if xattr::is_security_capability(attr) { > - get_fcaps(meta, fd, flags, fs_feature_flags)?; > + if get_fcaps(meta, fd, flags, fs_feature_flags)? { > + return Ok(true); see above > + } > continue; > } > > @@ -1505,35 +1531,37 @@ fn get_xattr_fcaps_acl( > Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either > Err(Errno::E2BIG) => { > match skip_e2big_xattr { > - true => return Ok(()), > + true => return Ok(false), > false => { > bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); > } > }; > } > + Err(Errno::ESTALE) => return Ok(true), // symlinks same here (and stray copy-paste comment I guess?) > Err(err) => { > return Err(err).context(format!("error reading extended attribute {attr:?}")) > } > } > } > > - Ok(()) > + Ok(false) > } > > -fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> { > +fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result { > let mut attr: libc::c_long = 0; > > match unsafe { fs::read_attr_fd(fd, &mut attr) } { > Ok(_) => (), > + Err(Errno::ESTALE) => return Ok(true), > Err(errno) if errno_is_unsupported(errno) => { > - return Ok(()); > + return Ok(false); > } > Err(err) => return Err(err).context("failed to read file attributes"), > } > > metadata.stat.flags |= Flags::from_chattr(attr).bits(); > > - Ok(()) > + Ok(false) > } > > fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> { > @@ -1564,30 +1592,34 @@ fn get_quota_project_id( > fd: RawFd, > flags: Flags, > magic: i64, > -) -> Result<(), Error> { > +) -> Result { see above > if !(metadata.is_dir() || metadata.is_regular_file()) { > - return Ok(()); > + return Ok(false); > } > > if !flags.contains(Flags::WITH_QUOTA_PROJID) { > - return Ok(()); > + return Ok(false); > } > > use proxmox_sys::linux::magic::*; > > match magic { > EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (), > - _ => return Ok(()), > + _ => return Ok(false), > } > > let mut fsxattr = fs::FSXAttr::default(); > let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) }; > > + if let Err(Errno::ESTALE) = res { > + return Ok(true); > + } > + > // On some FUSE filesystems it can happen that ioctl is not supported. > // For these cases projid is set to 0 while the error is ignored. > if let Err(errno) = res { > if errno_is_unsupported(errno) { > - return Ok(()); > + return Ok(false); > } else { > return Err(errno).context("error while reading quota project id"); > } > @@ -1597,7 +1629,7 @@ fn get_quota_project_id( > if projid != 0 { > metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid }); > } > - Ok(()) > + Ok(false) > } > > fn get_acl( > @@ -1840,7 +1872,7 @@ mod tests { > let fs_magic = detect_fs_type(dir.as_raw_fd()).unwrap(); > let stat = nix::sys::stat::fstat(dir.as_raw_fd()).unwrap(); > let mut fs_feature_flags = Flags::from_magic(fs_magic); > - let metadata = get_metadata( > + let (metadata, _) = get_metadata( no use of the new return value > dir.as_raw_fd(), > &stat, > fs_feature_flags, > @@ -1937,7 +1969,7 @@ mod tests { > let stat = nix::sys::stat::fstat(source_dir.as_raw_fd()).unwrap(); > let mut fs_feature_flags = Flags::from_magic(fs_magic); > > - let metadata = get_metadata( > + let (metadata, _) = get_metadata( no use either.. so wouldn't it make more sense to pass in a path and log the context right in get_metadata? or treat the stale FD as an error, and add the context/path as part of error handling? the four call sites are: - two related to tests, we can probably treat ESTALE as hard error there - the one for obtaining the metadata of the source dir of the archive, if that is stale we can't create an archive -> hard error as well - adding an entry: for the stale case, we already log a warning and proceed with the next entry, so we don't benefit from the fact that (incomplete) metadata and the staleness is returned, as opposed to just treating ESTALE as an error that we can "catch" and handle.. > source_dir.as_raw_fd(), > &stat, > fs_feature_flags, > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Mon Nov 11 14:37:21 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 11 Nov 2024 14:37:21 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/4] client: pxar: warn user and ignore stale file handles on file open In-Reply-To: <20241105140153.282980-4-c.ebner@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> <20241105140153.282980-4-c.ebner@proxmox.com> Message-ID: <1731332199.oj8zimoo18.astroid@yuna.none> On November 5, 2024 3:01 pm, Christian Ebner wrote: > Do not fail hard if a file open fails because of a stale file handle. > Warn the user and ignore the file, just like the client already does > in case of missing privileges to access the file. > > Signed-off-by: Christian Ebner > --- > pbs-client/src/pxar/create.rs | 4 ++++ > 1 file changed, 4 insertions(+) > > diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs > index 8685e8d42..2a844922c 100644 > --- a/pbs-client/src/pxar/create.rs > +++ b/pbs-client/src/pxar/create.rs > @@ -484,6 +484,10 @@ impl Archiver { > log::warn!("failed to open file: {:?}: access denied", file_name); > Ok(None) > } > + Err(Errno::ESTALE) => { > + log::warn!("failed to open file: {file_name:?}: stale file handle"); > + Ok(None) > + } should we add a report_stale_file in vain of the other report helpers, and use that in places where `self.path` contains the right information? > Err(Errno::EPERM) if !noatime.is_empty() => { > // Retry without O_NOATIME: > noatime = OFlag::empty(); > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Mon Nov 11 16:43:23 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:23 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 01/31] sync: pull: optimize backup group sorting In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-2-c.ebner@proxmox.com> `BackupGroup` implements `cmp::Ord`, so use that implementation for comparing groups during sorting. Furtuher, only sort the list of backup groups after filtering, thereby possibly reducing the number of required comparisons. No functional changes. Signed-off-by: Christian Ebner --- changes since version 6: - not present in previous version src/server/pull.rs | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index d9584776e..c12ecec82 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -829,22 +829,16 @@ pub(crate) async fn pull_ns( namespace: &BackupNamespace, params: &mut PullParameters, ) -> Result<(StoreProgress, SyncStats, bool), Error> { - let mut list: Vec = params.source.list_groups(namespace, ¶ms.owner).await?; - - list.sort_unstable_by(|a, b| { - let type_order = a.ty.cmp(&b.ty); - if type_order == std::cmp::Ordering::Equal { - a.id.cmp(&b.id) - } else { - type_order - } - }); + let list: Vec = params.source.list_groups(namespace, ¶ms.owner).await?; let unfiltered_count = list.len(); - let list: Vec = list + let mut list: Vec = list .into_iter() .filter(|group| group.apply_filters(¶ms.group_filter)) .collect(); + + list.sort_unstable(); + info!( "found {} groups to sync (out of {unfiltered_count} total)", list.len() -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:28 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:28 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 06/31] config: acl: refactor acl path component check for datastore In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-7-c.ebner@proxmox.com> Combine the two if statements checking the datastores ACL path components, which can be represented more concisely as one. Further, extend the pre-existing comment to clarify that `datastore` ACL paths are not limited to the datastore name, but might have further sub-components specifying the namespace. Suggested-by: Fabian Gr?nbichler Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-config/src/acl.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pbs-config/src/acl.rs b/pbs-config/src/acl.rs index 4ce4c13c0..29ad3e8c9 100644 --- a/pbs-config/src/acl.rs +++ b/pbs-config/src/acl.rs @@ -80,11 +80,8 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> { } } "datastore" => { - // /datastore/{store} - if components_len <= 2 { - return Ok(()); - } - if components_len > 2 && components_len <= 2 + pbs_api_types::MAX_NAMESPACE_DEPTH { + // /datastore/{store}/{namespace} + if components_len <= 2 + pbs_api_types::MAX_NAMESPACE_DEPTH { return Ok(()); } } -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:29 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:29 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 07/31] config: acl: allow namespace components for remote datastores In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-8-c.ebner@proxmox.com> Extend the component limit for ACL paths of `remote` to include possible namespace components. This allows to limit the permissions for sync jobs in push direction to a namespace subset on the remote datastore. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-config/src/acl.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pbs-config/src/acl.rs b/pbs-config/src/acl.rs index 29ad3e8c9..a06b918ad 100644 --- a/pbs-config/src/acl.rs +++ b/pbs-config/src/acl.rs @@ -86,8 +86,8 @@ pub fn check_acl_path(path: &str) -> Result<(), Error> { } } "remote" => { - // /remote/{remote}/{store} - if components_len <= 3 { + // /remote/{remote}/{store}/{namespace} + if components_len <= 3 + pbs_api_types::MAX_NAMESPACE_DEPTH { return Ok(()); } } -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:26 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:26 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 04/31] client: backup writer: factor out merged chunk stream upload In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-5-c.ebner@proxmox.com> In preparation for implementing push support for sync jobs. Factor out the upload stream for merged chunks, which can be reused to upload the local chunks to a remote target datastore during a snapshot sync operation in push direction. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-client/src/backup_writer.rs | 88 +++++++++++++++++++++------------ 1 file changed, 56 insertions(+), 32 deletions(-) diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 8b9afdb95..f1bad4128 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -7,6 +7,7 @@ use std::time::Instant; use anyhow::{bail, format_err, Error}; use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt}; use futures::stream::{Stream, StreamExt, TryStreamExt}; +use openssl::sha::Sha256; use serde_json::{json, Value}; use tokio::io::AsyncReadExt; use tokio::sync::{mpsc, oneshot}; @@ -648,42 +649,14 @@ impl BackupWriter { archive: &str, ) -> impl Future> { let mut counters = UploadCounters::new(); - let uploaded_len = Arc::new(std::sync::atomic::AtomicUsize::new(0)); let counters_readonly = counters.clone(); - let append_chunk_path = format!("{}_index", prefix); - let upload_chunk_path = format!("{}_chunk", prefix); let is_fixed_chunk_size = prefix == "fixed"; - let (upload_queue, upload_result) = - Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone()); - - let start_time = std::time::Instant::now(); - let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new()))); let index_csum_2 = index_csum.clone(); - let progress_handle = if archive.ends_with(".img") - || archive.ends_with(".pxar") - || archive.ends_with(".ppxar") - { - let counters = counters.clone(); - Some(tokio::spawn(async move { - loop { - tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; - - let size = HumanByte::from(counters.total_stream_len()); - let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst)); - let elapsed = TimeSpan::from(start_time.elapsed()); - - log::info!("processed {size} in {elapsed}, uploaded {size_uploaded}"); - } - })) - } else { - None - }; - - stream + let stream = stream .inject_reused_chunks(injections, counters.clone()) .and_then(move |chunk_info| match chunk_info { InjectedChunksInfo::Known(chunks) => { @@ -749,7 +722,58 @@ impl BackupWriter { future::ok(res) } }) - .merge_known_chunks() + .merge_known_chunks(); + + Self::upload_merged_chunk_stream( + h2, + wid, + archive, + prefix, + stream, + index_csum_2, + counters_readonly, + ) + } + + fn upload_merged_chunk_stream( + h2: H2Client, + wid: u64, + archive: &str, + prefix: &str, + stream: impl Stream>, + index_csum: Arc>>, + counters: UploadCounters, + ) -> impl Future> { + let append_chunk_path = format!("{prefix}_index"); + let upload_chunk_path = format!("{prefix}_chunk"); + + let start_time = std::time::Instant::now(); + let uploaded_len = Arc::new(AtomicUsize::new(0)); + + let (upload_queue, upload_result) = + Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone()); + + let progress_handle = if archive.ends_with(".img") + || archive.ends_with(".pxar") + || archive.ends_with(".ppxar") + { + let counters = counters.clone(); + Some(tokio::spawn(async move { + loop { + tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; + + let size = HumanByte::from(counters.total_stream_len()); + let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst)); + let elapsed = TimeSpan::from(start_time.elapsed()); + + log::info!("processed {size} in {elapsed}, uploaded {size_uploaded}"); + } + })) + } else { + None + }; + + stream .try_for_each(move |merged_chunk_info| { let upload_queue = upload_queue.clone(); @@ -813,14 +837,14 @@ impl BackupWriter { }) .then(move |result| async move { upload_result.await?.and(result) }.boxed()) .and_then(move |_| { - let mut guard = index_csum_2.lock().unwrap(); + let mut guard = index_csum.lock().unwrap(); let csum = guard.take().unwrap().finish(); if let Some(handle) = progress_handle { handle.abort(); } - futures::future::ok(counters_readonly.to_upload_stats(csum, start_time.elapsed())) + futures::future::ok(counters.to_upload_stats(csum, start_time.elapsed())) }) } -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:27 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:27 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 05/31] client: backup writer: allow push uploading index and chunks In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-6-c.ebner@proxmox.com> Add a method `upload_index_chunk_info` to be used for uploading an existing index and the corresponding chunk stream. Instead of taking an input stream of raw bytes as the `upload_stream`, this takes a stream of `MergedChunkInfo` object provided by the local chunk reader of the sync jobs source. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-client/src/backup_writer.rs | 93 +++++++++++++++++++++++++++++++++ pbs-client/src/lib.rs | 1 + 2 files changed, 94 insertions(+) diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index f1bad4128..685510da3 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -266,6 +266,99 @@ impl BackupWriter { .await } + /// Upload chunks and index + pub async fn upload_index_chunk_info( + &self, + archive_name: &str, + stream: impl Stream>, + options: UploadOptions, + ) -> Result { + let mut param = json!({ "archive-name": archive_name }); + let prefix = if let Some(size) = options.fixed_size { + param["size"] = size.into(); + "fixed" + } else { + "dynamic" + }; + + if options.encrypt && self.crypt_config.is_none() { + bail!("requested encryption without a crypt config"); + } + + let wid = self + .h2 + .post(&format!("{prefix}_index"), Some(param)) + .await? + .as_u64() + .unwrap(); + + let mut counters = UploadCounters::new(); + let counters_readonly = counters.clone(); + + let is_fixed_chunk_size = prefix == "fixed"; + + let index_csum = Arc::new(Mutex::new(Some(Sha256::new()))); + let index_csum_2 = index_csum.clone(); + + let stream = stream + .and_then(move |mut merged_chunk_info| { + match merged_chunk_info { + MergedChunkInfo::New(ref chunk_info) => { + let chunk_len = chunk_info.chunk_len; + let offset = + counters.add_new_chunk(chunk_len as usize, chunk_info.chunk.raw_size()); + let end_offset = offset as u64 + chunk_len; + let mut guard = index_csum.lock().unwrap(); + let csum = guard.as_mut().unwrap(); + if !is_fixed_chunk_size { + csum.update(&end_offset.to_le_bytes()); + } + csum.update(&chunk_info.digest); + } + MergedChunkInfo::Known(ref mut known_chunk_list) => { + for (chunk_len, digest) in known_chunk_list { + let offset = counters.add_known_chunk(*chunk_len as usize); + let end_offset = offset as u64 + *chunk_len; + let mut guard = index_csum.lock().unwrap(); + let csum = guard.as_mut().unwrap(); + if !is_fixed_chunk_size { + csum.update(&end_offset.to_le_bytes()); + } + csum.update(digest); + // Replace size with offset, expected by further stream + *chunk_len = offset as u64; + } + } + } + future::ok(merged_chunk_info) + }) + .merge_known_chunks(); + + let upload_stats = Self::upload_merged_chunk_stream( + self.h2.clone(), + wid, + archive_name, + prefix, + stream, + index_csum_2, + counters_readonly, + ) + .await?; + + let param = json!({ + "wid": wid , + "chunk-count": upload_stats.chunk_count, + "size": upload_stats.size, + "csum": hex::encode(upload_stats.csum), + }); + let _value = self + .h2 + .post(&format!("{prefix}_close"), Some(param)) + .await?; + + Ok(upload_stats.to_backup_stats()) + } + pub async fn upload_stream( &self, archive_name: &str, diff --git a/pbs-client/src/lib.rs b/pbs-client/src/lib.rs index b875347bb..4b8e4e4f4 100644 --- a/pbs-client/src/lib.rs +++ b/pbs-client/src/lib.rs @@ -9,6 +9,7 @@ pub mod tools; mod inject_reused_chunks; mod merge_known_chunks; +pub use merge_known_chunks::MergedChunkInfo; pub mod pipe_to_stream; mod http_client; -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:34 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:34 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 12/31] api types: implement api type for `BackupGroupDeleteStats` In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-13-c.ebner@proxmox.com> Make the `BackupGroupDeleteStats` exposable via the API by implementing the ApiTypes trait via the api macro invocation and add an additional field to account for the number of deleted groups. Further, add a method to add up the statistics. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-api-types/src/datastore.rs | 36 +++++++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 9e866bef2..ec100a703 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1576,8 +1576,28 @@ pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { } } -#[derive(Default)] +pub const DELETE_STATS_COUNT_SCHEMA: Schema = + IntegerSchema::new("Number of entities").minimum(0).schema(); + +#[api( + properties: { + "removed-groups": { + schema: DELETE_STATS_COUNT_SCHEMA, + }, + "protected-snapshots": { + schema: DELETE_STATS_COUNT_SCHEMA, + }, + "removed-snapshots": { + schema: DELETE_STATS_COUNT_SCHEMA, + }, + }, +)] +#[derive(Default, Deserialize, Serialize)] +#[serde(rename_all = "kebab-case")] +/// Statistics for removed backup groups pub struct BackupGroupDeleteStats { + // Count of removed groups + removed_groups: usize, // Count of protected snapshots, therefore not removed protected_snapshots: usize, // Count of deleted snapshots @@ -1589,6 +1609,10 @@ impl BackupGroupDeleteStats { self.protected_snapshots == 0 } + pub fn removed_groups(&self) -> usize { + self.removed_groups + } + pub fn removed_snapshots(&self) -> usize { self.removed_snapshots } @@ -1597,6 +1621,16 @@ impl BackupGroupDeleteStats { self.protected_snapshots } + pub fn add(&mut self, rhs: &Self) { + self.removed_groups += rhs.removed_groups; + self.protected_snapshots += rhs.protected_snapshots; + self.removed_snapshots += rhs.removed_snapshots; + } + + pub fn increment_removed_groups(&mut self) { + self.removed_groups += 1; + } + pub fn increment_removed_snapshots(&mut self) { self.removed_snapshots += 1; } -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:33 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:33 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 11/31] datastore: move `BackupGroupDeleteStats` to api types In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-12-c.ebner@proxmox.com> In preparation for the delete stats to be exposed as return type to the backup group delete api endpoint. Also, rename the private field `unremoved_protected` to a better fitting `protected_snapshots` to be in line with the method names. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-api-types/src/datastore.rs | 30 +++++++++++++++++++++++++++++ pbs-datastore/src/backup_info.rs | 33 ++------------------------------ pbs-datastore/src/datastore.rs | 7 ++++--- 3 files changed, 36 insertions(+), 34 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index b037b6fec..9e866bef2 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1575,3 +1575,33 @@ pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { format!("datastore '{}', namespace '{}'", store, ns) } } + +#[derive(Default)] +pub struct BackupGroupDeleteStats { + // Count of protected snapshots, therefore not removed + protected_snapshots: usize, + // Count of deleted snapshots + removed_snapshots: usize, +} + +impl BackupGroupDeleteStats { + pub fn all_removed(&self) -> bool { + self.protected_snapshots == 0 + } + + pub fn removed_snapshots(&self) -> usize { + self.removed_snapshots + } + + pub fn protected_snapshots(&self) -> usize { + self.protected_snapshots + } + + pub fn increment_removed_snapshots(&mut self) { + self.removed_snapshots += 1; + } + + pub fn increment_protected_snapshots(&mut self) { + self.protected_snapshots += 1; + } +} diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 414ec878d..222134074 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, + Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, + BACKUP_FILE_REGEX, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; @@ -17,36 +18,6 @@ use crate::manifest::{ }; use crate::{DataBlob, DataStore}; -#[derive(Default)] -pub struct BackupGroupDeleteStats { - // Count of protected snapshots, therefore not removed - unremoved_protected: usize, - // Count of deleted snapshots - removed_snapshots: usize, -} - -impl BackupGroupDeleteStats { - pub fn all_removed(&self) -> bool { - self.unremoved_protected == 0 - } - - pub fn removed_snapshots(&self) -> usize { - self.removed_snapshots - } - - pub fn protected_snapshots(&self) -> usize { - self.unremoved_protected - } - - fn increment_removed_snapshots(&mut self) { - self.removed_snapshots += 1; - } - - fn increment_protected_snapshots(&mut self) { - self.unremoved_protected += 1; - } -} - /// BackupGroup is a directory containing a list of BackupDir #[derive(Clone)] pub struct BackupGroup { diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index d0f3c53ac..c8701d2dd 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -18,11 +18,12 @@ use proxmox_sys::process_locker::ProcessLockSharedGuard; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel, - DatastoreTuning, GarbageCollectionStatus, MaintenanceMode, MaintenanceType, Operation, UPID, + Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, + DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus, MaintenanceMode, + MaintenanceType, Operation, UPID, }; -use crate::backup_info::{BackupDir, BackupGroup, BackupGroupDeleteStats}; +use crate::backup_info::{BackupDir, BackupGroup}; use crate::chunk_store::ChunkStore; use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; use crate::fixed_index::{FixedIndexReader, FixedIndexWriter}; -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:32 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:32 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 10/31] api types: define remote permissions and roles for push sync In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-11-c.ebner@proxmox.com> Adding the privileges to allow backup, namespace creation and prune on remote targets, to be used for sync jobs in push direction. Also adds dedicated roles setting the required privileges. Signed-off-by: Christian Ebner --- changes since version 6: - adapted remote datastore roles to mimic local datastore access roles pbs-api-types/src/acl.rs | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/pbs-api-types/src/acl.rs b/pbs-api-types/src/acl.rs index a8ae57a9d..e2f97f06d 100644 --- a/pbs-api-types/src/acl.rs +++ b/pbs-api-types/src/acl.rs @@ -58,6 +58,12 @@ constnamedbitmap! { PRIV_REMOTE_MODIFY("Remote.Modify"); /// Remote.Read allows reading data from a configured `Remote` PRIV_REMOTE_READ("Remote.Read"); + /// Remote.DatastoreBackup allows creating new snapshots on remote datastores + PRIV_REMOTE_DATASTORE_BACKUP("Remote.DatastoreBackup"); + /// Remote.DatastoreModify allows to modify remote datastores + PRIV_REMOTE_DATASTORE_MODIFY("Remote.DatastoreModify"); + /// Remote.DatastorePrune allows deleting snapshots on remote datastores + PRIV_REMOTE_DATASTORE_PRUNE("Remote.DatastorePrune"); /// Sys.Console allows access to the system's console PRIV_SYS_CONSOLE("Sys.Console"); @@ -160,6 +166,32 @@ pub const ROLE_REMOTE_SYNC_OPERATOR: u64 = 0 | PRIV_REMOTE_AUDIT | PRIV_REMOTE_READ; +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.SyncPushOperator can read and push snapshots to the remote. +pub const ROLE_REMOTE_SYNC_PUSH_OPERATOR: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_DATASTORE_BACKUP; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.DatastorePowerUser can read and push snapshots to the remote, and prune owned snapshots +/// and groups but not create or remove namespaces. +pub const ROLE_REMOTE_DATASTORE_POWERUSER: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_DATASTORE_BACKUP + | PRIV_REMOTE_DATASTORE_PRUNE; + +#[rustfmt::skip] +#[allow(clippy::identity_op)] +/// Remote.DatastoreAdmin can read and push snapshots to the remote, prune owned snapshots +/// and groups, as well as create or remove namespaces. +pub const ROLE_REMOTE_DATASTORE_ADMIN: u64 = 0 + | PRIV_REMOTE_AUDIT + | PRIV_REMOTE_DATASTORE_BACKUP + | PRIV_REMOTE_DATASTORE_MODIFY + | PRIV_REMOTE_DATASTORE_PRUNE; + #[rustfmt::skip] #[allow(clippy::identity_op)] /// Tape.Audit can audit the tape backup configuration and media content @@ -225,6 +257,12 @@ pub enum Role { RemoteAdmin = ROLE_REMOTE_ADMIN, /// Synchronization Operator RemoteSyncOperator = ROLE_REMOTE_SYNC_OPERATOR, + /// Synchronisation Operator (push direction) + RemoteSyncPushOperator = ROLE_REMOTE_SYNC_PUSH_OPERATOR, + /// Remote Datastore Prune + RemoteDatastorePowerUser = ROLE_REMOTE_DATASTORE_POWERUSER, + /// Remote Datastore Admin + RemoteDatastoreAdmin = ROLE_REMOTE_DATASTORE_ADMIN, /// Tape Auditor TapeAudit = ROLE_TAPE_AUDIT, /// Tape Administrator -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:41 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:41 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 19/31] api: config: Require PRIV_DATASTORE_AUDIT to modify sync job In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-20-c.ebner@proxmox.com> Read access to sync jobs is not granted to users not having at least PRIV_DATASTORE_AUDIT permissions on the datastore. However a user is able to create or modify such jobs, without having the audit permission. Therefore, further restrict the modify check by also including the audit permissions. Signed-off-by: Christian Ebner --- changes since version 6: - no changes src/api2/config/sync.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 6fdc69a9e..38325f5b2 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -44,7 +44,7 @@ pub fn check_sync_job_modify_access( job: &SyncJobConfig, ) -> bool { let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); - if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 { + if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { return false; } @@ -502,7 +502,7 @@ user: write at pbs r###" acl:1:/datastore/localstore1:read at pbs,write at pbs:DatastoreAudit acl:1:/datastore/localstore1:write at pbs:DatastoreBackup -acl:1:/datastore/localstore2:write at pbs:DatastorePowerUser +acl:1:/datastore/localstore2:write at pbs:DatastoreAudit,DatastorePowerUser acl:1:/datastore/localstore3:write at pbs:DatastoreAdmin acl:1:/remote/remote1:read at pbs,write at pbs:RemoteAudit acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:47 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:47 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 25/31] ui: sync edit: source group filters based on sync direction In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-26-c.ebner@proxmox.com> Switch to the local datastore, used as sync source for jobs in push direction, to get the available group filter options. Signed-off-by: Christian Ebner --- changes since version 6: - no changes www/window/SyncJobEdit.js | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 6543995e8..9ca79eaa9 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -238,7 +238,13 @@ Ext.define('PBS.window.SyncJobEdit', { let remoteNamespaceField = me.up('pbsSyncJobEdit').down('field[name=remote-ns]'); remoteNamespaceField.setRemote(remote); remoteNamespaceField.setRemoteStore(value); - me.up('tabpanel').down('pbsGroupFilter').setRemoteDatastore(remote, value); + + if (!me.syncDirectionPush) { + me.up('tabpanel').down('pbsGroupFilter').setRemoteDatastore(remote, value); + } else { + let localStore = me.up('pbsSyncJobEdit').down('field[name=store]').getValue(); + me.up('tabpanel').down('pbsGroupFilter').setLocalDatastore(localStore); + } }, }, }, -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:44 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:44 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 22/31] api: admin: avoid duplicate name for list sync jobs api method In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-23-c.ebner@proxmox.com> `list_sync_jobs` exists as api method in `api2::admin::sync` and `api2::config::sync`. Rename the admin api endpoint method to `list_config_sync_jobs` in order to reduce possible confusion when searching/reviewing. No functional change intended. Suggested-by: Fabian Gr?nbichler Signed-off-by: Christian Ebner --- changes since version 6: - no changes src/api2/admin/sync.rs | 6 +++--- src/api2/config/datastore.rs | 6 ++++-- src/api2/config/notifications/mod.rs | 4 ++-- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index 8a242b1c3..3a41aa2c7 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -46,8 +46,8 @@ use crate::{ permission: &Permission::Anybody, }, )] -/// List all sync jobs -pub fn list_sync_jobs( +/// List all configured sync jobs +pub fn list_config_sync_jobs( store: Option, sync_direction: Option, _param: Value, @@ -144,5 +144,5 @@ const SYNC_INFO_ROUTER: Router = Router::new() .subdirs(SYNC_INFO_SUBDIRS); pub const ROUTER: Router = Router::new() - .get(&API_METHOD_LIST_SYNC_JOBS) + .get(&API_METHOD_LIST_CONFIG_SYNC_JOBS) .match_all("id", &SYNC_INFO_ROUTER); diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index c151eda10..ec7cc1909 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -21,7 +21,7 @@ use pbs_config::BackupLockGuard; use pbs_datastore::chunk_store::ChunkStore; use crate::api2::admin::{ - prune::list_prune_jobs, sync::list_sync_jobs, verify::list_verification_jobs, + prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, }; use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; use crate::api2::config::sync::delete_sync_job; @@ -500,7 +500,9 @@ pub async fn delete_datastore( delete_verification_job(job.config.id, None, rpcenv)? } for direction in [SyncDirection::Pull, SyncDirection::Push] { - for job in list_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? { + for job in + list_config_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? + { delete_sync_job(job.config.id, None, rpcenv)? } } diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs index 31c4851c1..f156c8cfd 100644 --- a/src/api2/config/notifications/mod.rs +++ b/src/api2/config/notifications/mod.rs @@ -12,7 +12,7 @@ use crate::api2::admin::datastore::get_datastore_list; use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT}; use crate::api2::admin::prune::list_prune_jobs; -use crate::api2::admin::sync::list_sync_jobs; +use crate::api2::admin::sync::list_config_sync_jobs; use crate::api2::admin::verify::list_verification_jobs; use crate::api2::config::media_pool::list_pools; use crate::api2::tape::backup::list_tape_backup_jobs; @@ -155,7 +155,7 @@ pub fn get_values( } for direction in [SyncDirection::Pull, SyncDirection::Push] { - let sync_jobs = list_sync_jobs(None, Some(direction), param.clone(), rpcenv)?; + let sync_jobs = list_config_sync_jobs(None, Some(direction), param.clone(), rpcenv)?; for job in sync_jobs { values.push(MatchableValue { field: "job-id".into(), -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:39 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:39 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 17/31] api: push: implement endpoint for sync in push direction In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-18-c.ebner@proxmox.com> Expose the sync job in push direction via a dedicated API endpoint, analogous to the pull direction. Signed-off-by: Christian Ebner --- changes since version 6: - Allow access on Datastore.Read or Datastore.Backup - take remote namespace uncoditionally - use acl_path helper src/api2/mod.rs | 2 + src/api2/push.rs | 175 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 177 insertions(+) create mode 100644 src/api2/push.rs diff --git a/src/api2/mod.rs b/src/api2/mod.rs index a83e4c205..03596326b 100644 --- a/src/api2/mod.rs +++ b/src/api2/mod.rs @@ -12,6 +12,7 @@ pub mod helpers; pub mod node; pub mod ping; pub mod pull; +pub mod push; pub mod reader; pub mod status; pub mod tape; @@ -29,6 +30,7 @@ const SUBDIRS: SubdirMap = &sorted!([ ("nodes", &node::ROUTER), ("ping", &ping::ROUTER), ("pull", &pull::ROUTER), + ("push", &push::ROUTER), ("reader", &reader::ROUTER), ("status", &status::ROUTER), ("tape", &tape::ROUTER), diff --git a/src/api2/push.rs b/src/api2/push.rs new file mode 100644 index 000000000..bf846bb37 --- /dev/null +++ b/src/api2/push.rs @@ -0,0 +1,175 @@ +use anyhow::{format_err, Error}; +use futures::{future::FutureExt, select}; + +use pbs_api_types::{ + Authid, BackupNamespace, GroupFilter, RateLimitConfig, DATASTORE_SCHEMA, + GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, +}; +use proxmox_rest_server::WorkerTask; +use proxmox_router::{Permission, Router, RpcEnvironment}; +use proxmox_schema::api; + +use pbs_config::CachedUserInfo; + +use crate::server::push::{push_store, PushParameters}; + +/// Check if the provided user is allowed to read from the local source and act on the remote +/// target for pushing content +fn check_push_privs( + auth_id: &Authid, + store: &str, + namespace: &BackupNamespace, + remote: &str, + remote_store: &str, + remote_ns: &BackupNamespace, + delete: bool, +) -> Result<(), Error> { + let user_info = CachedUserInfo::new()?; + + let target_acl_path = remote_ns.remote_acl_path(remote, remote_store); + + // Check user is allowed to backup to remote/// + user_info.check_privs( + auth_id, + &target_acl_path, + PRIV_REMOTE_DATASTORE_BACKUP, + false, + )?; + + if delete { + // Check user is allowed to prune remote datastore + user_info.check_privs( + auth_id, + &target_acl_path, + PRIV_REMOTE_DATASTORE_PRUNE, + false, + )?; + } + + // Check user is allowed to read source datastore + user_info.check_privs( + auth_id, + &namespace.acl_path(store), + PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, + true, + )?; + + Ok(()) +} + +#[api( + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + ns: { + type: BackupNamespace, + optional: true, + }, + remote: { + schema: REMOTE_ID_SCHEMA, + }, + "remote-store": { + schema: DATASTORE_SCHEMA, + }, + "remote-ns": { + type: BackupNamespace, + optional: true, + }, + "remove-vanished": { + schema: REMOVE_VANISHED_BACKUPS_SCHEMA, + optional: true, + }, + "max-depth": { + schema: NS_MAX_DEPTH_REDUCED_SCHEMA, + optional: true, + }, + "group-filter": { + schema: GROUP_FILTER_LIST_SCHEMA, + optional: true, + }, + limit: { + type: RateLimitConfig, + flatten: true, + }, + "transfer-last": { + schema: TRANSFER_LAST_SCHEMA, + optional: true, + }, + }, + }, + access: { + description: r###"The user needs (at least) Remote.DatastoreBackup on ". + "'/remote/{remote}/{remote-store}[/{remote-ns}]', and either Datastore.Backup or ". + "Datastore.Read on '/datastore/{store}[/{ns}]'. The 'remove-vanished' parameter might ". + "require additional privileges."###, + permission: &Permission::Anybody, + }, +)] +/// Push store to other repository +#[allow(clippy::too_many_arguments)] +async fn push( + store: String, + ns: Option, + remote: String, + remote_store: String, + remote_ns: Option, + remove_vanished: Option, + max_depth: Option, + group_filter: Option>, + limit: RateLimitConfig, + transfer_last: Option, + rpcenv: &mut dyn RpcEnvironment, +) -> Result { + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let delete = remove_vanished.unwrap_or(false); + let ns = ns.unwrap_or_default(); + let remote_ns = remote_ns.unwrap_or_default(); + + check_push_privs( + &auth_id, + &store, + &ns, + &remote, + &remote_store, + &remote_ns, + delete, + )?; + + let push_params = PushParameters::new( + &store, + ns, + &remote, + &remote_store, + remote_ns, + auth_id.clone(), + remove_vanished, + max_depth, + group_filter, + limit, + transfer_last, + ) + .await?; + + let upid_str = WorkerTask::spawn( + "sync", + Some(store.clone()), + auth_id.to_string(), + true, + move |worker| async move { + let push_future = push_store(push_params); + (select! { + success = push_future.fuse() => success, + abort = worker.abort_future().map(|_| Err(format_err!("push aborted"))) => abort, + })?; + Ok(()) + }, + )?; + + Ok(upid_str) +} + +pub const ROUTER: Router = Router::new().post(&API_METHOD_PUSH); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:53 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:53 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 31/31] docs: add section for sync jobs in push direction In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-32-c.ebner@proxmox.com> Documents the caveats of sync jobs in push direction, explicitly recommending setting up dedicted remotes for these sync jobs. Signed-off-by: Christian Ebner --- changes since version 6: - Adapt to changes and extend with namespace min version requirement docs/managing-remotes.rst | 40 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/docs/managing-remotes.rst b/docs/managing-remotes.rst index dd43ccd2b..a7fd5143d 100644 --- a/docs/managing-remotes.rst +++ b/docs/managing-remotes.rst @@ -227,3 +227,43 @@ the web interface or using the ``proxmox-backup-manager`` command-line tool: .. code-block:: console # proxmox-backup-manager sync-job update ID --rate-in 20MiB + +Sync Direction Push +^^^^^^^^^^^^^^^^^^^ + +Sync jobs can be configured for pull or push direction. Sync jobs in push +direction are not identical in behaviour because of the limited access to the +target datastore via the remote servers API. Most notably, pushed content will +always be owned by the user configured in the remote configuration, being +independent from the local user as configured in the sync job. Latter is used +exclusively for permission check and scope checks on the pushing side. + +.. note:: It is strongly advised to create a dedicated remote configuration for + each individual sync job in push direction, using a dedicated user on the + remote. Otherwise, sync jobs pushing to the same target might remove each + others snapshots and/or groups, if the remove vanished flag is set or skip + snapshots if the backup time is not incremental. + This is because the backup groups on the target are owned by the user + given in the remote configuration. + +The following permissions are required for a sync job in push direction: + +#. ``Remote.Audit`` on ``/remote/{remote}`` and ``Remote.DatastoreBackup`` on + ``/remote/{remote}/{remote-store}/{remote-ns}`` path or subnamespace. +#. At least ``Datastore.Read`` on the local source datastore namespace + (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if owner of the sync + job. +#. ``Remote.DatastorePrune`` on ``/remote/{remote}/{remote-store}/{remote-ns}`` + path to remove vanished snapshots and groups. Make sure to use a dedicated + remote for each sync job in push direction as noted above. +#. ``Remote.DatastoreModify`` on ``/remote/{remote}/{remote-store}/{remote-ns}`` + path to remove vanished namespaces. A remote user with limited access should + be used on the remote backup server instance. Consider the implications as + noted below. + +.. note:: ``Remote.DatastoreModify`` will allow to remove whole namespaces on the + remote target datastore, independent of ownership. Make sure the user as + configured in remote.cfg has limited permissions on the remote side. + +.. note:: Sync jobs in push direction require namespace support on the remote + Proxmox Backup Server instance (minimum version 2.2). -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:25 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:25 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 03/31] client: backup writer: refactor backup and upload stats counters In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-4-c.ebner@proxmox.com> In preparation for push support in sync jobs. Extend and move `BackupStats` into `backup_stats` submodule and add method to create them from `UploadStats`. Further, introduce `UploadCounters` struct to hold the Arc clones of the chunk upload statistics counters, simplifying the house keeping. By bundling the counters into the struct, they can be passed as single function parameter when factoring out the common stream future in the subsequent implementation of the chunk upload for sync jobs in push direction. Co-developed-by: Fabian Gr?nbichler Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-client/src/backup_stats.rs | 119 ++++++++++++++++++++ pbs-client/src/backup_writer.rs | 145 +++++++++---------------- pbs-client/src/inject_reused_chunks.rs | 14 +-- pbs-client/src/lib.rs | 3 + 4 files changed, 180 insertions(+), 101 deletions(-) create mode 100644 pbs-client/src/backup_stats.rs diff --git a/pbs-client/src/backup_stats.rs b/pbs-client/src/backup_stats.rs new file mode 100644 index 000000000..f0563a001 --- /dev/null +++ b/pbs-client/src/backup_stats.rs @@ -0,0 +1,119 @@ +//! Implements counters to generate statistics for log outputs during uploads with backup writer + +use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; +use std::sync::Arc; +use std::time::Duration; + +use crate::pxar::create::ReusableDynamicEntry; + +/// Basic backup run statistics and archive checksum +pub struct BackupStats { + pub size: u64, + pub csum: [u8; 32], + pub duration: Duration, + pub chunk_count: u64, +} + +/// Extended backup run statistics and archive checksum +pub(crate) struct UploadStats { + pub(crate) chunk_count: usize, + pub(crate) chunk_reused: usize, + pub(crate) chunk_injected: usize, + pub(crate) size: usize, + pub(crate) size_reused: usize, + pub(crate) size_injected: usize, + pub(crate) size_compressed: usize, + pub(crate) duration: Duration, + pub(crate) csum: [u8; 32], +} + +impl UploadStats { + /// Convert the upload stats to the more concise [`BackupStats`] + #[inline(always)] + pub(crate) fn to_backup_stats(&self) -> BackupStats { + BackupStats { + chunk_count: self.chunk_count as u64, + size: self.size as u64, + duration: self.duration, + csum: self.csum, + } + } +} + +/// Atomic counters for accounting upload stream progress information +#[derive(Clone)] +pub(crate) struct UploadCounters { + injected_chunk_count: Arc, + known_chunk_count: Arc, + total_chunk_count: Arc, + compressed_stream_len: Arc, + injected_stream_len: Arc, + reused_stream_len: Arc, + total_stream_len: Arc, +} + +impl UploadCounters { + /// Create and zero init new upload counters + pub(crate) fn new() -> Self { + Self { + total_chunk_count: Arc::new(AtomicUsize::new(0)), + injected_chunk_count: Arc::new(AtomicUsize::new(0)), + known_chunk_count: Arc::new(AtomicUsize::new(0)), + compressed_stream_len: Arc::new(AtomicU64::new(0)), + injected_stream_len: Arc::new(AtomicUsize::new(0)), + reused_stream_len: Arc::new(AtomicUsize::new(0)), + total_stream_len: Arc::new(AtomicUsize::new(0)), + } + } + + #[inline(always)] + pub(crate) fn add_known_chunk(&mut self, chunk_len: usize) -> usize { + self.known_chunk_count.fetch_add(1, Ordering::SeqCst); + self.total_chunk_count.fetch_add(1, Ordering::SeqCst); + self.reused_stream_len + .fetch_add(chunk_len, Ordering::SeqCst); + self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst) + } + + #[inline(always)] + pub(crate) fn add_new_chunk(&mut self, chunk_len: usize, chunk_raw_size: u64) -> usize { + self.total_chunk_count.fetch_add(1, Ordering::SeqCst); + self.compressed_stream_len + .fetch_add(chunk_raw_size, Ordering::SeqCst); + self.total_stream_len.fetch_add(chunk_len, Ordering::SeqCst) + } + + #[inline(always)] + pub(crate) fn add_injected_chunk(&mut self, chunk: &ReusableDynamicEntry) -> usize { + self.total_chunk_count.fetch_add(1, Ordering::SeqCst); + self.injected_chunk_count.fetch_add(1, Ordering::SeqCst); + + self.reused_stream_len + .fetch_add(chunk.size() as usize, Ordering::SeqCst); + self.injected_stream_len + .fetch_add(chunk.size() as usize, Ordering::SeqCst); + self.total_stream_len + .fetch_add(chunk.size() as usize, Ordering::SeqCst) + } + + #[inline(always)] + pub(crate) fn total_stream_len(&self) -> usize { + self.total_stream_len.load(Ordering::SeqCst) + } + + /// Convert the counters to [`UploadStats`], including given archive checksum and runtime. + #[inline(always)] + pub(crate) fn to_upload_stats(&self, csum: [u8; 32], duration: Duration) -> UploadStats { + UploadStats { + chunk_count: self.total_chunk_count.load(Ordering::SeqCst), + chunk_reused: self.known_chunk_count.load(Ordering::SeqCst), + chunk_injected: self.injected_chunk_count.load(Ordering::SeqCst), + size: self.total_stream_len.load(Ordering::SeqCst), + size_reused: self.reused_stream_len.load(Ordering::SeqCst), + size_injected: self.injected_stream_len.load(Ordering::SeqCst), + size_compressed: self.compressed_stream_len.load(Ordering::SeqCst) as usize, + duration, + csum, + } + } +} diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 4d2e8a801..8b9afdb95 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -1,7 +1,8 @@ use std::collections::HashSet; use std::future::Future; -use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; +use std::time::Instant; use anyhow::{bail, format_err, Error}; use futures::future::{self, AbortHandle, Either, FutureExt, TryFutureExt}; @@ -23,6 +24,7 @@ use pbs_tools::crypt_config::CryptConfig; use proxmox_human_byte::HumanByte; use proxmox_time::TimeSpan; +use super::backup_stats::{BackupStats, UploadCounters, UploadStats}; use super::inject_reused_chunks::{InjectChunks, InjectReusedChunks, InjectedChunksInfo}; use super::merge_known_chunks::{MergeKnownChunks, MergedChunkInfo}; @@ -40,11 +42,6 @@ impl Drop for BackupWriter { } } -pub struct BackupStats { - pub size: u64, - pub csum: [u8; 32], -} - /// Options for uploading blobs/streams to the server #[derive(Default, Clone)] pub struct UploadOptions { @@ -54,18 +51,6 @@ pub struct UploadOptions { pub fixed_size: Option, } -struct UploadStats { - chunk_count: usize, - chunk_reused: usize, - chunk_injected: usize, - size: usize, - size_reused: usize, - size_injected: usize, - size_compressed: usize, - duration: std::time::Duration, - csum: [u8; 32], -} - struct ChunkUploadResponse { future: h2::client::ResponseFuture, size: usize, @@ -194,6 +179,7 @@ impl BackupWriter { mut reader: R, file_name: &str, ) -> Result { + let start_time = Instant::now(); let mut raw_data = Vec::new(); // fixme: avoid loading into memory reader.read_to_end(&mut raw_data)?; @@ -211,7 +197,12 @@ impl BackupWriter { raw_data, ) .await?; - Ok(BackupStats { size, csum }) + Ok(BackupStats { + size, + csum, + duration: start_time.elapsed(), + chunk_count: 0, + }) } pub async fn upload_blob_from_data( @@ -220,6 +211,7 @@ impl BackupWriter { file_name: &str, options: UploadOptions, ) -> Result { + let start_time = Instant::now(); let blob = match (options.encrypt, &self.crypt_config) { (false, _) => DataBlob::encode(&data, None, options.compress)?, (true, None) => bail!("requested encryption without a crypt config"), @@ -243,7 +235,12 @@ impl BackupWriter { raw_data, ) .await?; - Ok(BackupStats { size, csum }) + Ok(BackupStats { + size, + csum, + duration: start_time.elapsed(), + chunk_count: 0, + }) } pub async fn upload_blob_from_file>( @@ -421,10 +418,7 @@ impl BackupWriter { "csum": hex::encode(upload_stats.csum), }); let _value = self.h2.post(&close_path, Some(param)).await?; - Ok(BackupStats { - size: upload_stats.size as u64, - csum: upload_stats.csum, - }) + Ok(upload_stats.to_backup_stats()) } fn response_queue() -> ( @@ -653,23 +647,9 @@ impl BackupWriter { injections: Option>, archive: &str, ) -> impl Future> { - let total_chunks = Arc::new(AtomicUsize::new(0)); - let total_chunks2 = total_chunks.clone(); - let known_chunk_count = Arc::new(AtomicUsize::new(0)); - let known_chunk_count2 = known_chunk_count.clone(); - let injected_chunk_count = Arc::new(AtomicUsize::new(0)); - let injected_chunk_count2 = injected_chunk_count.clone(); - - let stream_len = Arc::new(AtomicUsize::new(0)); - let stream_len2 = stream_len.clone(); - let stream_len3 = stream_len.clone(); - let compressed_stream_len = Arc::new(AtomicU64::new(0)); - let compressed_stream_len2 = compressed_stream_len.clone(); - let reused_len = Arc::new(AtomicUsize::new(0)); - let reused_len2 = reused_len.clone(); - let injected_len = Arc::new(AtomicUsize::new(0)); - let injected_len2 = injected_len.clone(); - let uploaded_len = Arc::new(AtomicUsize::new(0)); + let mut counters = UploadCounters::new(); + let uploaded_len = Arc::new(std::sync::atomic::AtomicUsize::new(0)); + let counters_readonly = counters.clone(); let append_chunk_path = format!("{}_index", prefix); let upload_chunk_path = format!("{}_chunk", prefix); @@ -687,11 +667,12 @@ impl BackupWriter { || archive.ends_with(".pxar") || archive.ends_with(".ppxar") { + let counters = counters.clone(); Some(tokio::spawn(async move { loop { tokio::time::sleep(tokio::time::Duration::from_secs(60)).await; - let size = HumanByte::from(stream_len3.load(Ordering::SeqCst)); + let size = HumanByte::from(counters.total_stream_len()); let size_uploaded = HumanByte::from(uploaded_len.load(Ordering::SeqCst)); let elapsed = TimeSpan::from(start_time.elapsed()); @@ -703,22 +684,15 @@ impl BackupWriter { }; stream - .inject_reused_chunks(injections, stream_len.clone()) + .inject_reused_chunks(injections, counters.clone()) .and_then(move |chunk_info| match chunk_info { InjectedChunksInfo::Known(chunks) => { // account for injected chunks - let count = chunks.len(); - total_chunks.fetch_add(count, Ordering::SeqCst); - injected_chunk_count.fetch_add(count, Ordering::SeqCst); - let mut known = Vec::new(); let mut guard = index_csum.lock().unwrap(); let csum = guard.as_mut().unwrap(); for chunk in chunks { - let offset = - stream_len.fetch_add(chunk.size() as usize, Ordering::SeqCst) as u64; - reused_len.fetch_add(chunk.size() as usize, Ordering::SeqCst); - injected_len.fetch_add(chunk.size() as usize, Ordering::SeqCst); + let offset = counters.add_injected_chunk(&chunk) as u64; let digest = chunk.digest(); known.push((offset, digest)); let end_offset = offset + chunk.size(); @@ -731,9 +705,6 @@ impl BackupWriter { // account for not injected chunks (new and known) let chunk_len = data.len(); - total_chunks.fetch_add(1, Ordering::SeqCst); - let offset = stream_len.fetch_add(chunk_len, Ordering::SeqCst) as u64; - let mut chunk_builder = DataChunkBuilder::new(data.as_ref()).compress(compress); if let Some(ref crypt_config) = crypt_config { @@ -741,7 +712,29 @@ impl BackupWriter { } let mut known_chunks = known_chunks.lock().unwrap(); - let digest = chunk_builder.digest(); + let digest = *chunk_builder.digest(); + let (offset, res) = if known_chunks.contains(&digest) { + let offset = counters.add_known_chunk(chunk_len) as u64; + (offset, MergedChunkInfo::Known(vec![(offset, digest)])) + } else { + match chunk_builder.build() { + Ok((chunk, digest)) => { + let offset = + counters.add_new_chunk(chunk_len, chunk.raw_size()) as u64; + known_chunks.insert(digest); + ( + offset, + MergedChunkInfo::New(ChunkInfo { + chunk, + digest, + chunk_len: chunk_len as u64, + offset, + }), + ) + } + Err(err) => return future::err(err), + } + }; let mut guard = index_csum.lock().unwrap(); let csum = guard.as_mut().unwrap(); @@ -751,26 +744,9 @@ impl BackupWriter { if !is_fixed_chunk_size { csum.update(&chunk_end.to_le_bytes()); } - csum.update(digest); + csum.update(&digest); - let chunk_is_known = known_chunks.contains(digest); - if chunk_is_known { - known_chunk_count.fetch_add(1, Ordering::SeqCst); - reused_len.fetch_add(chunk_len, Ordering::SeqCst); - future::ok(MergedChunkInfo::Known(vec![(offset, *digest)])) - } else { - let compressed_stream_len2 = compressed_stream_len.clone(); - known_chunks.insert(*digest); - future::ready(chunk_builder.build().map(move |(chunk, digest)| { - compressed_stream_len2.fetch_add(chunk.raw_size(), Ordering::SeqCst); - MergedChunkInfo::New(ChunkInfo { - chunk, - digest, - chunk_len: chunk_len as u64, - offset, - }) - })) - } + future::ok(res) } }) .merge_known_chunks() @@ -837,15 +813,6 @@ impl BackupWriter { }) .then(move |result| async move { upload_result.await?.and(result) }.boxed()) .and_then(move |_| { - let duration = start_time.elapsed(); - let chunk_count = total_chunks2.load(Ordering::SeqCst); - let chunk_reused = known_chunk_count2.load(Ordering::SeqCst); - let chunk_injected = injected_chunk_count2.load(Ordering::SeqCst); - let size = stream_len2.load(Ordering::SeqCst); - let size_reused = reused_len2.load(Ordering::SeqCst); - let size_injected = injected_len2.load(Ordering::SeqCst); - let size_compressed = compressed_stream_len2.load(Ordering::SeqCst) as usize; - let mut guard = index_csum_2.lock().unwrap(); let csum = guard.take().unwrap().finish(); @@ -853,17 +820,7 @@ impl BackupWriter { handle.abort(); } - futures::future::ok(UploadStats { - chunk_count, - chunk_reused, - chunk_injected, - size, - size_reused, - size_injected, - size_compressed, - duration, - csum, - }) + futures::future::ok(counters_readonly.to_upload_stats(csum, start_time.elapsed())) }) } diff --git a/pbs-client/src/inject_reused_chunks.rs b/pbs-client/src/inject_reused_chunks.rs index 4b2922012..6da2bcd16 100644 --- a/pbs-client/src/inject_reused_chunks.rs +++ b/pbs-client/src/inject_reused_chunks.rs @@ -1,13 +1,13 @@ use std::cmp; use std::pin::Pin; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{mpsc, Arc}; +use std::sync::mpsc; use std::task::{Context, Poll}; use anyhow::{anyhow, Error}; use futures::{ready, Stream}; use pin_project_lite::pin_project; +use crate::backup_stats::UploadCounters; use crate::pxar::create::ReusableDynamicEntry; pin_project! { @@ -16,7 +16,7 @@ pin_project! { input: S, next_injection: Option, injections: Option>, - stream_len: Arc, + counters: UploadCounters, } } @@ -42,7 +42,7 @@ pub trait InjectReusedChunks: Sized { fn inject_reused_chunks( self, injections: Option>, - stream_len: Arc, + counters: UploadCounters, ) -> InjectReusedChunksQueue; } @@ -53,13 +53,13 @@ where fn inject_reused_chunks( self, injections: Option>, - stream_len: Arc, + counters: UploadCounters, ) -> InjectReusedChunksQueue { InjectReusedChunksQueue { input: self, next_injection: None, injections, - stream_len, + counters, } } } @@ -85,7 +85,7 @@ where if let Some(inject) = this.next_injection.take() { // got reusable dynamic entries to inject - let offset = this.stream_len.load(Ordering::SeqCst) as u64; + let offset = this.counters.total_stream_len() as u64; match inject.boundary.cmp(&offset) { // inject now diff --git a/pbs-client/src/lib.rs b/pbs-client/src/lib.rs index 3d2da27b9..b875347bb 100644 --- a/pbs-client/src/lib.rs +++ b/pbs-client/src/lib.rs @@ -41,4 +41,7 @@ pub use backup_specification::*; mod chunk_stream; pub use chunk_stream::{ChunkStream, FixedChunkStream, InjectionData}; +mod backup_stats; +pub use backup_stats::BackupStats; + pub const PROXMOX_BACKUP_TCP_KEEPALIVE_TIME: u32 = 120; -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:43 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:43 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 21/31] api: sync jobs: expose optional `sync-direction` parameter In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-22-c.ebner@proxmox.com> Exposes and switch the config type for sync job operations based on the `sync-direction` parameter, exposed on required api endpoints. If not set, the default config type is `sync` and the default sync direction is `pull` for full backwards compatibility. Whenever possible, determine the sync direction and config type from the sync job config directly rather than requiring it as optional api parameter. Further, extend read and modify access checks by sync direction to conditionally check for the required permissions in pull and push direction. Signed-off-by: Christian Ebner --- changes since version 6: - Fix permission check, require at least local datastore read access if not owner of the sync job src/api2/admin/sync.rs | 34 ++-- src/api2/config/datastore.rs | 11 +- src/api2/config/notifications/mod.rs | 19 +- src/api2/config/sync.rs | 278 ++++++++++++++++++++------- src/bin/proxmox-backup-proxy.rs | 11 +- 5 files changed, 259 insertions(+), 94 deletions(-) diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index be324564c..8a242b1c3 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -1,6 +1,7 @@ //! Datastore Synchronization Job Management use anyhow::{bail, format_err, Error}; +use serde::Deserialize; use serde_json::Value; use proxmox_router::{ @@ -29,6 +30,10 @@ use crate::{ schema: DATASTORE_SCHEMA, optional: true, }, + "sync-direction": { + type: SyncDirection, + optional: true, + }, }, }, returns: { @@ -44,6 +49,7 @@ use crate::{ /// List all sync jobs pub fn list_sync_jobs( store: Option, + sync_direction: Option, _param: Value, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { @@ -52,8 +58,9 @@ pub fn list_sync_jobs( let (config, digest) = sync::config()?; + let sync_direction = sync_direction.unwrap_or_default(); let job_config_iter = config - .convert_to_typed_array("sync")? + .convert_to_typed_array(sync_direction.as_config_type_str())? .into_iter() .filter(|job: &SyncJobConfig| { if let Some(store) = &store { @@ -62,7 +69,9 @@ pub fn list_sync_jobs( true } }) - .filter(|job: &SyncJobConfig| check_sync_job_read_access(&user_info, &auth_id, job)); + .filter(|job: &SyncJobConfig| { + check_sync_job_read_access(&user_info, &auth_id, job, sync_direction) + }); let mut list = Vec::new(); @@ -106,24 +115,23 @@ pub fn run_sync_job( let user_info = CachedUserInfo::new()?; let (config, _digest) = sync::config()?; - let sync_job: SyncJobConfig = config.lookup("sync", &id)?; + let (config_type, config_section) = config + .sections + .get(&id) + .ok_or_else(|| format_err!("No sync job with id '{id}' found in config"))?; + + let sync_direction = SyncDirection::from_config_type_str(config_type)?; + let sync_job = SyncJobConfig::deserialize(config_section)?; - if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) { - bail!("permission check failed"); + if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job, sync_direction) { + bail!("permission check failed, '{auth_id}' is missing access"); } let job = Job::new("syncjob", &id)?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let upid_str = do_sync_job( - job, - sync_job, - &auth_id, - None, - SyncDirection::Pull, - to_stdout, - )?; + let upid_str = do_sync_job(job, sync_job, &auth_id, None, sync_direction, to_stdout)?; Ok(upid_str) } diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index ca6edf05a..c151eda10 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -13,8 +13,9 @@ use proxmox_uuid::Uuid; use pbs_api_types::{ Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions, - MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, + MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA, + PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, + PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, }; use pbs_config::BackupLockGuard; use pbs_datastore::chunk_store::ChunkStore; @@ -498,8 +499,10 @@ pub async fn delete_datastore( for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { delete_verification_job(job.config.id, None, rpcenv)? } - for job in list_sync_jobs(Some(name.clone()), Value::Null, rpcenv)? { - delete_sync_job(job.config.id, None, rpcenv)? + for direction in [SyncDirection::Pull, SyncDirection::Push] { + for job in list_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? { + delete_sync_job(job.config.id, None, rpcenv)? + } } for job in list_prune_jobs(Some(name.clone()), Value::Null, rpcenv)? { delete_prune_job(job.config.id, None, rpcenv)? diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs index dfe82ed03..31c4851c1 100644 --- a/src/api2/config/notifications/mod.rs +++ b/src/api2/config/notifications/mod.rs @@ -9,7 +9,7 @@ use proxmox_schema::api; use proxmox_sortable_macro::sortable; use crate::api2::admin::datastore::get_datastore_list; -use pbs_api_types::PRIV_SYS_AUDIT; +use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT}; use crate::api2::admin::prune::list_prune_jobs; use crate::api2::admin::sync::list_sync_jobs; @@ -154,13 +154,15 @@ pub fn get_values( }); } - let sync_jobs = list_sync_jobs(None, param.clone(), rpcenv)?; - for job in sync_jobs { - values.push(MatchableValue { - field: "job-id".into(), - value: job.config.id, - comment: job.config.comment, - }); + for direction in [SyncDirection::Pull, SyncDirection::Push] { + let sync_jobs = list_sync_jobs(None, Some(direction), param.clone(), rpcenv)?; + for job in sync_jobs { + values.push(MatchableValue { + field: "job-id".into(), + value: job.config.id, + comment: job.config.comment, + }); + } } let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?; @@ -184,6 +186,7 @@ pub fn get_values( "package-updates", "prune", "sync", + "sync-push", "system-mail", "tape-backup", "tape-load", diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 3963049e9..97d599e30 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -1,6 +1,7 @@ use ::serde::{Deserialize, Serialize}; use anyhow::{bail, Error}; use hex::FromHex; +use pbs_api_types::SyncDirection; use serde_json::Value; use proxmox_router::{http_bail, Permission, Router, RpcEnvironment}; @@ -8,8 +9,9 @@ use proxmox_schema::{api, param_bail}; use pbs_api_types::{ Authid, SyncJobConfig, SyncJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT, - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_AUDIT, - PRIV_REMOTE_READ, PROXMOX_CONFIG_DIGEST_SCHEMA, + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, + PRIV_REMOTE_AUDIT, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_PRUNE, PRIV_REMOTE_READ, + PROXMOX_CONFIG_DIGEST_SCHEMA, }; use pbs_config::sync; @@ -20,18 +22,35 @@ pub fn check_sync_job_read_access( user_info: &CachedUserInfo, auth_id: &Authid, job: &SyncJobConfig, + sync_direction: SyncDirection, ) -> bool { + // check for audit access on datastore/namespace, applies for pull and push direction let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { return false; } - if let Some(remote) = &job.remote { - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); - remote_privs & PRIV_REMOTE_AUDIT != 0 - } else { - let source_ds_privs = user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); - source_ds_privs & PRIV_DATASTORE_AUDIT != 0 + match sync_direction { + SyncDirection::Pull => { + if let Some(remote) = &job.remote { + let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote]); + remote_privs & PRIV_REMOTE_AUDIT != 0 + } else { + let source_ds_privs = + user_info.lookup_privs(auth_id, &["datastore", &job.remote_store]); + source_ds_privs & PRIV_DATASTORE_AUDIT != 0 + } + } + SyncDirection::Push => { + // check for audit access on remote/datastore/namespace + if let Some(target_acl_path) = job.remote_acl_path() { + let remote_privs = user_info.lookup_privs(auth_id, &target_acl_path); + remote_privs & PRIV_REMOTE_AUDIT != 0 + } else { + // Remote must always be present for sync in push direction, fail otherwise + false + } + } } } @@ -43,41 +62,91 @@ fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool { } } -/// checks whether user can run the corresponding pull job +/// checks whether user can run the corresponding sync job, depending on sync direction /// -/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly. +/// namespace creation/deletion ACL and backup group ownership checks happen in the pull/push code +/// directly. /// remote side checks/filters remote datastore/namespace/group access. pub fn check_sync_job_modify_access( user_info: &CachedUserInfo, auth_id: &Authid, job: &SyncJobConfig, + sync_direction: SyncDirection, ) -> bool { - let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); - if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { - return false; - } + match sync_direction { + SyncDirection::Pull => { + let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); + if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 + || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 + { + return false; + } + + if let Some(true) = job.remove_vanished { + if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { + return false; + } + } - if let Some(true) = job.remove_vanished { - if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { - return false; + // same permission as changing ownership after syncing + if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { + return false; + } + + if let Some(remote) = &job.remote { + let remote_privs = + user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); + return remote_privs & PRIV_REMOTE_READ != 0; + } + true } - } + SyncDirection::Push => { + // Remote must always be present for sync in push direction, fail otherwise + let target_privs = if let Some(target_acl_path) = job.remote_acl_path() { + user_info.lookup_privs(auth_id, &target_acl_path) + } else { + return false; + }; + + // check user is allowed to create backups on remote datastore + if target_privs & PRIV_REMOTE_DATASTORE_BACKUP == 0 { + return false; + } - // same permission as changing ownership after syncing - if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { - return false; - } + if let Some(true) = job.remove_vanished { + // check user is allowed to prune backup snapshots on remote datastore + if target_privs & PRIV_REMOTE_DATASTORE_PRUNE == 0 { + return false; + } + } + + let source_privs = user_info.lookup_privs(auth_id, &job.acl_path()); + // check user is allowed to read from (local) source datastore/namespace, independent + // of job ownership + if source_privs & PRIV_DATASTORE_READ != 0 { + return true; + } + + // check user is not the owner of the sync job, but has datastore modify permissions, + // which implies permissions to change group ownership + if !is_correct_owner(auth_id, job) && source_privs & PRIV_DATASTORE_MODIFY == 0 { + return false; + } - if let Some(remote) = &job.remote { - let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); - return remote_privs & PRIV_REMOTE_READ != 0; + // user has Datastore.Modify, check also for Datastore.Backup to allow modify access + source_privs & PRIV_DATASTORE_BACKUP != 0 + } } - true } #[api( input: { - properties: {}, + properties: { + "sync-direction": { + type: SyncDirection, + optional: true, + }, + }, }, returns: { description: "List configured jobs.", @@ -92,6 +161,7 @@ pub fn check_sync_job_modify_access( /// List all sync jobs pub fn list_sync_jobs( _param: Value, + sync_direction: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -99,13 +169,16 @@ pub fn list_sync_jobs( let (config, digest) = sync::config()?; - let list = config.convert_to_typed_array("sync")?; + let sync_direction = sync_direction.unwrap_or_default(); + let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?; rpcenv["digest"] = hex::encode(digest).into(); let list = list .into_iter() - .filter(|sync_job| check_sync_job_read_access(&user_info, &auth_id, sync_job)) + .filter(|sync_job| { + check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction) + }) .collect(); Ok(list) } @@ -118,6 +191,10 @@ pub fn list_sync_jobs( type: SyncJobConfig, flatten: true, }, + "sync-direction": { + type: SyncDirection, + optional: true, + }, }, }, access: { @@ -128,14 +205,16 @@ pub fn list_sync_jobs( /// Create a new sync job. pub fn create_sync_job( config: SyncJobConfig, + sync_direction: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result<(), Error> { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let user_info = CachedUserInfo::new()?; + let sync_direction = sync_direction.unwrap_or_default(); let _lock = sync::lock_config()?; - if !check_sync_job_modify_access(&user_info, &auth_id, &config) { + if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) { bail!("permission check failed"); } @@ -158,7 +237,7 @@ pub fn create_sync_job( param_bail!("id", "job '{}' already exists.", config.id); } - section_config.set_data(&config.id, "sync", &config)?; + section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?; sync::save_config(§ion_config)?; @@ -188,8 +267,17 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result { - if !check_sync_job_modify_access(&user_info, &auth_id, &job) { - bail!("permission check failed"); - } - config.sections.remove(&id); + if let Some((config_type, config_section)) = config.sections.get(&id) { + let sync_direction = SyncDirection::from_config_type_str(config_type)?; + let job = SyncJobConfig::deserialize(config_section)?; + if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) { + bail!("permission check failed"); } - Err(_) => { - http_bail!(NOT_FOUND, "job '{}' does not exist.", id) - } - }; + config.sections.remove(&id); + } else { + http_bail!(NOT_FOUND, "job '{}' does not exist.", id) + } sync::save_config(&config)?; @@ -536,39 +631,67 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator }; // should work without ACLs - assert!(check_sync_job_read_access(&user_info, root_auth_id, &job)); - assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job)); + assert!(check_sync_job_read_access( + &user_info, + root_auth_id, + &job, + SyncDirection::Pull, + )); + assert!(check_sync_job_modify_access( + &user_info, + root_auth_id, + &job, + SyncDirection::Pull, + )); // user without permissions must fail assert!(!check_sync_job_read_access( &user_info, &no_perm_auth_id, - &job + &job, + SyncDirection::Pull, )); assert!(!check_sync_job_modify_access( &user_info, &no_perm_auth_id, - &job + &job, + SyncDirection::Pull, )); // reading without proper read permissions on either remote or local must fail - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); + assert!(!check_sync_job_read_access( + &user_info, + &read_auth_id, + &job, + SyncDirection::Pull, + )); // reading without proper read permissions on local end must fail job.remote = Some("remote1".to_string()); - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); + assert!(!check_sync_job_read_access( + &user_info, + &read_auth_id, + &job, + SyncDirection::Pull, + )); // reading without proper read permissions on remote end must fail job.remote = Some("remote0".to_string()); job.store = "localstore1".to_string(); - assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job)); + assert!(!check_sync_job_read_access( + &user_info, + &read_auth_id, + &job, + SyncDirection::Pull, + )); // writing without proper write permissions on either end must fail job.store = "localstore0".to_string(); assert!(!check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); // writing without proper write permissions on local end must fail @@ -580,39 +703,54 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator assert!(!check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); // reset remote to one where users have access job.remote = Some("remote1".to_string()); // user with read permission can only read, but not modify/run - assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job)); + assert!(check_sync_job_read_access( + &user_info, + &read_auth_id, + &job, + SyncDirection::Pull, + )); job.owner = Some(read_auth_id.clone()); assert!(!check_sync_job_modify_access( &user_info, &read_auth_id, - &job + &job, + SyncDirection::Pull, )); job.owner = None; assert!(!check_sync_job_modify_access( &user_info, &read_auth_id, - &job + &job, + SyncDirection::Pull, )); job.owner = Some(write_auth_id.clone()); assert!(!check_sync_job_modify_access( &user_info, &read_auth_id, - &job + &job, + SyncDirection::Pull, )); // user with simple write permission can modify/run - assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job)); + assert!(check_sync_job_read_access( + &user_info, + &write_auth_id, + &job, + SyncDirection::Pull, + )); assert!(check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); // but can't modify/run with deletion @@ -620,7 +758,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator assert!(!check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); // unless they have Datastore.Prune as well @@ -628,7 +767,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator assert!(check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); // changing owner is not possible @@ -636,7 +776,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator assert!(!check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); // also not to the default 'root at pam' @@ -644,7 +785,8 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator assert!(!check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); // unless they have Datastore.Modify as well @@ -653,13 +795,15 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator assert!(check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); job.owner = None; assert!(check_sync_job_modify_access( &user_info, &write_auth_id, - &job + &job, + SyncDirection::Pull, )); Ok(()) diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index 6f19a3fbd..70283510d 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -589,7 +589,14 @@ async fn schedule_datastore_sync_jobs() { Ok((config, _digest)) => config, }; - for (job_id, (_, job_config)) in config.sections { + for (job_id, (job_type, job_config)) in config.sections { + let sync_direction = match SyncDirection::from_config_type_str(&job_type) { + Ok(direction) => direction, + Err(err) => { + eprintln!("unexpected config type in sync job config - {err}"); + continue; + } + }; let job_config: SyncJobConfig = match serde_json::from_value(job_config) { Ok(c) => c, Err(err) => { @@ -616,7 +623,7 @@ async fn schedule_datastore_sync_jobs() { job_config, &auth_id, Some(event_str), - SyncDirection::Pull, + sync_direction, false, ) { eprintln!("unable to start datastore sync job {job_id} - {err}"); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:24 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:24 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 02/31] sync: extend sync source's list namespaces method by filter callback In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-3-c.ebner@proxmox.com> Allow to filter namespaces by given callback function. This will be used to pre-filter the list of namespaces to push to a remote target for sync jobs in push direction, based on the privs of the sync jobs local user on the source datastore. Signed-off-by: Christian Ebner --- changes since version 6: - not present in previous version src/server/pull.rs | 11 ++++++++++- src/server/sync.rs | 29 +++++++++++++++++++++++++++-- 2 files changed, 37 insertions(+), 3 deletions(-) diff --git a/src/server/pull.rs b/src/server/pull.rs index c12ecec82..d059c3ff6 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -737,7 +737,16 @@ pub(crate) async fn pull_store(mut params: PullParameters) -> Result bool + Send>; + #[async_trait::async_trait] /// `SyncSource` is a trait that provides an interface for synchronizing data/information from a /// source. @@ -218,6 +222,9 @@ pub(crate) trait SyncSource: Send + Sync { async fn list_namespaces( &self, max_depth: &mut Option, + auth_id: &Authid, + user_info: &CachedUserInfo, + filter_callback: NamespaceFilter, ) -> Result, Error>; /// Lists groups within a specific namespace from the source. @@ -260,6 +267,9 @@ impl SyncSource for RemoteSource { async fn list_namespaces( &self, max_depth: &mut Option, + auth_id: &Authid, + user_info: &CachedUserInfo, + mut filter_callback: NamespaceFilter, ) -> Result, Error> { if self.ns.is_root() && max_depth.map_or(false, |depth| depth == 0) { return Ok(vec![self.ns.clone()]); @@ -307,6 +317,11 @@ impl SyncSource for RemoteSource { .map(|list_item| list_item.ns) .collect(); + let list = list + .into_iter() + .filter(|namespace| filter_callback((namespace, self.get_store(), auth_id, user_info))) + .collect(); + Ok(list) } @@ -400,13 +415,23 @@ impl SyncSource for LocalSource { async fn list_namespaces( &self, max_depth: &mut Option, + auth_id: &Authid, + user_info: &CachedUserInfo, + mut filter_callback: NamespaceFilter, ) -> Result, Error> { - ListNamespacesRecursive::new_max_depth( + let list: Result, Error> = ListNamespacesRecursive::new_max_depth( self.store.clone(), self.ns.clone(), max_depth.unwrap_or(MAX_NAMESPACE_DEPTH), )? - .collect() + .collect(); + + let list = list? + .into_iter() + .filter(|namespace| filter_callback((namespace, self.get_store(), auth_id, user_info))) + .collect(); + + Ok(list) } async fn list_groups( -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:30 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:30 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 08/31] api types: add remote acl path method for `BackupNamespace` In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-9-c.ebner@proxmox.com> Add a `remote_acl_path` helper method for creating acl paths for remote namespaces, to be used by the priv checks on remote datastore namespaces for e.g. the sync job in push direction. Factor out the common path extension into a dedicated method. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-api-types/src/datastore.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 31767417a..b037b6fec 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -717,9 +717,7 @@ impl BackupNamespace { Ok(()) } - pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { - let mut path: Vec<&str> = vec!["datastore", store]; - + fn acl_path_extend<'a>(&'a self, mut path: Vec<&'a str>) -> Vec<&'a str> { if self.is_root() { path } else { @@ -728,6 +726,14 @@ impl BackupNamespace { } } + pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> { + self.acl_path_extend(vec!["datastore", store]) + } + + pub fn remote_acl_path<'a>(&'a self, remote: &'a str, store: &'a str) -> Vec<&'a str> { + self.acl_path_extend(vec!["remote", remote, store]) + } + /// Check whether this namespace contains another namespace. /// /// If so, the depth is returned. -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:31 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:31 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 09/31] api types: implement remote acl path method for sync job In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-10-c.ebner@proxmox.com> Add `remote_acl_path` method which generates the acl path from the sync job configuration. This helper allows to easily generate the acl path from a given sync job config for privilege checks. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-api-types/src/jobs.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 868702bc0..bf7a6bd5a 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -594,6 +594,14 @@ impl SyncJobConfig { None => vec!["datastore", &self.store], } } + + pub fn remote_acl_path(&self) -> Option> { + let remote = self.remote.as_ref()?; + match &self.remote_ns { + Some(remote_ns) => Some(remote_ns.remote_acl_path(remote, &self.remote_store)), + None => Some(vec!["remote", remote, &self.remote_store]), + } + } } #[api( -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:22 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:22 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 00/31] fix #3044: push datastore to remote target Message-ID: <20241111154353.482734-1-c.ebner@proxmox.com> This patch series implements the functionality to extend the current sync jobs in pull direction by an additional push direction, allowing to push contents of a local source datastore to a remote target. The series implements this by using the REST API of the remote target for fetching, creating and/or deleting namespaces, groups and backups, and reuses the clients backup writer functionality to create snapshots by writing a manifeset on the remote target and sync the fixed index, dynamic index or blobs contained in the source manifest to the remote, preserving also encryption information. Thanks to Fabian for further feedback to the previous patch series version. Changes since version 6 of the patch series: - Fix permission check for sync job modify access, correctly check local datastore access if job not owned by sync user. - Pre-filter source namespaces, so namespaces which the sync user has no access to cannot be leaked. - Avoid possibly removing unrelated target namespaces during remove vanished by only removing sub-namespaces of the remote target namespace. - Fix issues with local/target namespace mapping, make clear which are which by adapting variable names accordingly. - Adapt roles related to remote datastore access to mimic roles for local datastore access. - Uncoditionally pass namespace parameter and early check and fail if remote does not support namespaces. - Fetch previous snapshots index to initialize known chunks correctly. - Adapt snapshot filter for excluding snapshots older than current last snapshot already present on target. - Fix incorrect owner header label in sync job grid for push direction. - Use `BackupGroup`s `cmp::Ord` for sorting, for pull and push - Update some comments and docs. Changes since version 5 of the patch series: - Split roles and permissions for separate remote datastore prune and remote datastoe modify roles. - Fetch target groups filtered by ownership, so to not try to push or remove unowned groups. - Add documentation, highlight the caveats of conflicting push jobs when using shared remotes. - Check also for optional `PRIV_DATASTORE_BACKUP` as opposed to only `PRIV_DATASTORE_READ` on source datastore namespace, that user can read the contents from there as well. - Drop `sync-direction` parameter from API endpoints where not needed, determine it from the corresponding jobs configuration instead. - Adapt layout of split job view in WebUI to use more general, less component specific values - Introduce `remote_acl_path` helpers for `BackupNamespace` and `SyncJobConfig`. - Refactor upload counters to bundle and update counters by chunk variant. - Rework `version` endpoint and supported api feature check to be based on `supported_features` rather than a hardcoded version, allowing for more flexibility. - `PushParameters` now always have the remote version for check stored unconditionally. - Renamed `igonre-protected` to a less misinterpretable `error-on-protected` and inverted boolean logic. - Squashed and reorderd patches, the delete stats are not followup patches as they are now fully backwards compatible. Changes since version 4 of the patch series: - Rebased onto current master Most notable changes since version 3 of the patch series include: - Rework access control permission checks to resemble the pull based logic more closely. In order to perform a full sync in push direction, including permissions for pruning contents with remove vansished, a acl.cfg looks like below: ``` acl:1:/datastore/source-store:syncoperator at pbs:DatastoreReader acl:1:/remote:syncoperator at pbs:RemoteAudit acl:1:/remote/remote-target/target-store:syncoperator at pbs:RemoteDatastorePrune,RemoteSyncPushOperator ``` - Modify access to sync jobs now requires `DatastoreAudit` for both, pull and push sync jobs - Fix previously incorrect privs required for removing target namespaces - Fix performance bottleneck by not reading known chunks from source, by sending `MergedChunkInfo` instead of `ChunkInfo` over to the upload stream - Factor upload statistic counters and structs out into their own module and provide methods for easy conversion - Implement `map_to_target` helper for easier/more readable source to target mapping for namespaces - Optimize namespace creation on target, only try creating non pre-existing namespace components. - Avoid temp file for manifest and upload source manifest directly - Not failing on deletion for protected snapshots is now opt-in - Refactor api endpoint `version` in order to be able to fetch api version for target - Reworked `SyncDirection` api type, use `api` macro to reduce code Most notable changes since version 2 of the patch series include: - Add checks and extend roles and privs to allow for restricting a local users access to remote datastore operations. In order to perform a full sync in push direction, including permissions for namespace creation and deleting contents with remove vansished, a acl.cfg looks like below: ``` acl:1:/datastore/datastore:syncoperator at pbs:DatastoreAudit acl:1:/remote:syncoperator at pbs:RemoteSyncOperator acl:1:/remote/local/pushme:syncoperator at pbs:RemoteDatastoreModify,RemoteDatastorePrune,RemoteSyncPushOperator ``` Based on further feedback, privs might get further grouped or an additional role containing most of these can be created. - Drop patch introducing `no-timestamp-check` flag for backup client, as pointed out by Fabian this is not needed, as only backups newer than the currently last available will be pushed. - Fix read snapshots from source by using the correct namespace. - Rename PullParameters `owner` to more fitting `local_user`. - Fix typos in remote sync push operator comment. - Fix comments not matching the functionality for the cli implementations. Link to issue on bugtracker: https://bugzilla.proxmox.com/show_bug.cgi?id=3044 Christian Ebner (31): sync: pull: optimize backup group sorting sync: extend sync source's list namespaces method by filter callback client: backup writer: refactor backup and upload stats counters client: backup writer: factor out merged chunk stream upload client: backup writer: allow push uploading index and chunks config: acl: refactor acl path component check for datastore config: acl: allow namespace components for remote datastores api types: add remote acl path method for `BackupNamespace` api types: implement remote acl path method for sync job api types: define remote permissions and roles for push sync datastore: move `BackupGroupDeleteStats` to api types api types: implement api type for `BackupGroupDeleteStats` datastore: increment deleted group counter when removing group api/api-types: refactor api endpoint version, add api types fix #3044: server: implement push support for sync operations api types/config: add `sync-push` config type for push sync jobs api: push: implement endpoint for sync in push direction api: sync: move sync job invocation to server sync module api: config: Require PRIV_DATASTORE_AUDIT to modify sync job api: config: factor out sync job owner check api: sync jobs: expose optional `sync-direction` parameter api: admin: avoid duplicate name for list sync jobs api method bin: manager: add datastore push cli command ui: group filter: allow to set namespace for local datastore ui: sync edit: source group filters based on sync direction ui: add view with separate grids for pull and push sync jobs ui: sync job: adapt edit window to be used for pull and push ui: sync view: set proxy on view instead of model api: datastore/namespace: return backup groups delete stats on remove api: version: add 'prune-delete-stats' as supported feature docs: add section for sync jobs in push direction docs/managing-remotes.rst | 40 + pbs-api-types/src/acl.rs | 38 + pbs-api-types/src/datastore.rs | 76 +- pbs-api-types/src/jobs.rs | 46 ++ pbs-api-types/src/lib.rs | 3 + pbs-api-types/src/version.rs | 88 +++ pbs-client/src/backup_stats.rs | 119 +++ pbs-client/src/backup_writer.rs | 318 +++++--- pbs-client/src/inject_reused_chunks.rs | 14 +- pbs-client/src/lib.rs | 4 + pbs-config/src/acl.rs | 11 +- pbs-config/src/sync.rs | 16 +- pbs-datastore/src/backup_info.rs | 34 +- pbs-datastore/src/datastore.rs | 27 +- src/api2/admin/datastore.rs | 29 +- src/api2/admin/namespace.rs | 31 +- src/api2/admin/sync.rs | 43 +- src/api2/config/datastore.rs | 15 +- src/api2/config/notifications/mod.rs | 21 +- src/api2/config/sync.rs | 296 ++++++-- src/api2/mod.rs | 2 + src/api2/pull.rs | 108 --- src/api2/push.rs | 175 +++++ src/api2/version.rs | 42 +- src/bin/proxmox-backup-manager.rs | 216 ++++-- src/bin/proxmox-backup-proxy.rs | 24 +- src/server/mod.rs | 2 + src/server/pull.rs | 33 +- src/server/push.rs | 994 +++++++++++++++++++++++++ src/server/sync.rs | 179 ++++- www/Makefile | 1 + www/config/SyncPullPushView.js | 61 ++ www/config/SyncView.js | 29 +- www/datastore/DataStoreList.js | 2 +- www/datastore/Panel.js | 2 +- www/form/GroupFilter.js | 21 +- www/window/SyncJobEdit.js | 49 +- 37 files changed, 2694 insertions(+), 515 deletions(-) create mode 100644 pbs-api-types/src/version.rs create mode 100644 pbs-client/src/backup_stats.rs create mode 100644 src/api2/push.rs create mode 100644 src/server/push.rs create mode 100644 www/config/SyncPullPushView.js -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:35 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:35 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 13/31] datastore: increment deleted group counter when removing group In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-14-c.ebner@proxmox.com> To correctly account also for the number of deleted backup groups, in preparation to correctly return the delete statistics when removing contents via the REST API. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-datastore/src/backup_info.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 222134074..62d12b118 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -221,6 +221,7 @@ impl BackupGroup { std::fs::remove_dir_all(&path).map_err(|err| { format_err!("removing group directory {:?} failed - {}", path, err) })?; + delete_stats.increment_removed_groups(); } Ok(delete_stats) -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:38 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:38 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 16/31] api types/config: add `sync-push` config type for push sync jobs In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-17-c.ebner@proxmox.com> In order for sync jobs to be either pull or push jobs, allow to configure the direction of the job. Adds an additional config type `sync-push` to the sync job config, to clearly distinguish sync jobs configured in pull and in push direction and defines and implements the required `SyncDirection` api type. This approach was chosen in order to limit possible misconfiguration, as unintentionally switching the sync direction could potentially delete still required snapshots. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-api-types/src/jobs.rs | 38 ++++++++++++++++++++++++++++++++++++++ pbs-config/src/sync.rs | 16 +++++++++++++--- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index bf7a6bd5a..e8056beb0 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -498,6 +498,44 @@ pub const TRANSFER_LAST_SCHEMA: Schema = .minimum(1) .schema(); +#[api()] +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// Direction of the sync job, push or pull +pub enum SyncDirection { + /// Sync direction pull + #[default] + Pull, + /// Sync direction push + Push, +} + +impl std::fmt::Display for SyncDirection { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SyncDirection::Pull => f.write_str("pull"), + SyncDirection::Push => f.write_str("push"), + } + } +} + +impl SyncDirection { + pub fn as_config_type_str(&self) -> &'static str { + match self { + SyncDirection::Pull => "sync", + SyncDirection::Push => "sync-push", + } + } + + pub fn from_config_type_str(config_type: &str) -> Result { + match config_type { + "sync" => Ok(SyncDirection::Pull), + "sync-push" => Ok(SyncDirection::Push), + _ => bail!("invalid config type for sync job"), + } + } +} + #[api( properties: { id: { diff --git a/pbs-config/src/sync.rs b/pbs-config/src/sync.rs index 45453abb1..7fc977e77 100644 --- a/pbs-config/src/sync.rs +++ b/pbs-config/src/sync.rs @@ -6,7 +6,7 @@ use anyhow::Error; use proxmox_schema::{ApiType, Schema}; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; -use pbs_api_types::{SyncJobConfig, JOB_ID_SCHEMA}; +use pbs_api_types::{SyncDirection, SyncJobConfig, JOB_ID_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; @@ -18,9 +18,19 @@ fn init() -> SectionConfig { _ => unreachable!(), }; - let plugin = SectionConfigPlugin::new("sync".to_string(), Some(String::from("id")), obj_schema); + let pull_plugin = SectionConfigPlugin::new( + SyncDirection::Pull.as_config_type_str().to_string(), + Some(String::from("id")), + obj_schema, + ); + let push_plugin = SectionConfigPlugin::new( + SyncDirection::Push.as_config_type_str().to_string(), + Some(String::from("id")), + obj_schema, + ); let mut config = SectionConfig::new(&JOB_ID_SCHEMA); - config.register_plugin(plugin); + config.register_plugin(pull_plugin); + config.register_plugin(push_plugin); config } -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:42 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:42 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 20/31] api: config: factor out sync job owner check In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-21-c.ebner@proxmox.com> Move the sync job owner check to its own helper function, for it to be reused for the owner check for sync jobs in push direction. No functional change intended. Signed-off-by: Christian Ebner --- changes since version 6: - no changes src/api2/config/sync.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 38325f5b2..3963049e9 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -14,6 +14,7 @@ use pbs_api_types::{ use pbs_config::sync; use pbs_config::CachedUserInfo; +use pbs_datastore::check_backup_owner; pub fn check_sync_job_read_access( user_info: &CachedUserInfo, @@ -34,6 +35,14 @@ pub fn check_sync_job_read_access( } } +fn is_correct_owner(auth_id: &Authid, job: &SyncJobConfig) -> bool { + match job.owner { + Some(ref owner) => check_backup_owner(owner, auth_id).is_ok(), + // default sync owner + None => auth_id == Authid::root_auth_id(), + } +} + /// checks whether user can run the corresponding pull job /// /// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly. @@ -54,17 +63,8 @@ pub fn check_sync_job_modify_access( } } - let correct_owner = match job.owner { - Some(ref owner) => { - owner == auth_id - || (owner.is_token() && !auth_id.is_token() && owner.user() == auth_id.user()) - } - // default sync owner - None => auth_id == Authid::root_auth_id(), - }; - // same permission as changing ownership after syncing - if !correct_owner && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { + if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { return false; } -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:36 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:36 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 14/31] api/api-types: refactor api endpoint version, add api types In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-15-c.ebner@proxmox.com> Add a dedicated api type for the `version` api endpoint and helper methods for supported feature comparison. This will be used to detect api incompatibility of older hosts, not supporting some features. Use the new api type to refactor the version endpoint and set it as return type. Signed-off-by: Christian Ebner --- changes since version 6: - make fields pub for both, `ApiVersion` and `ApiVersionInfo` - refactor `ApiVersion` and its impl pbs-api-types/src/lib.rs | 3 ++ pbs-api-types/src/version.rs | 88 ++++++++++++++++++++++++++++++++++++ src/api2/version.rs | 42 ++++++++++------- 3 files changed, 116 insertions(+), 17 deletions(-) create mode 100644 pbs-api-types/src/version.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 460c7da7c..6bae4a52b 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -155,6 +155,9 @@ pub use zfs::*; mod metrics; pub use metrics::*; +mod version; +pub use version::*; + const_regex! { // just a rough check - dummy acceptor is used before persisting pub OPENSSL_CIPHERS_REGEX = r"^[0-9A-Za-z_:, +!\-@=.]+$"; diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs new file mode 100644 index 000000000..7a4c6cb74 --- /dev/null +++ b/pbs-api-types/src/version.rs @@ -0,0 +1,88 @@ +//! Defines the types for the api version info endpoint +use std::convert::TryFrom; + +use anyhow::{format_err, Context}; + +use proxmox_schema::api; + +#[api( + description: "Api version information", + properties: { + "version": { + description: "Version 'major.minor'", + type: String, + }, + "release": { + description: "Version release", + type: String, + }, + "repoid": { + description: "Version repository id", + type: String, + }, + "features": { + description: "List of supported features", + type: Array, + items: { + type: String, + description: "Feature id", + }, + }, + } +)] +#[derive(serde::Deserialize, serde::Serialize)] +pub struct ApiVersionInfo { + pub version: String, + pub release: String, + pub repoid: String, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub features: Vec, +} + +pub type ApiVersionMajor = u64; +pub type ApiVersionMinor = u64; +pub type ApiVersionRelease = u64; + +pub struct ApiVersion { + pub major: ApiVersionMajor, + pub minor: ApiVersionMinor, + pub release: ApiVersionRelease, + pub repoid: String, + pub features: Vec, +} + +impl TryFrom for ApiVersion { + type Error = anyhow::Error; + + fn try_from(value: ApiVersionInfo) -> Result { + let (major, minor) = value + .version + .split_once('.') + .ok_or_else(|| format_err!("malformed API version {}", value.version))?; + + let major: ApiVersionMajor = major + .parse() + .with_context(|| "failed to parse major version")?; + let minor: ApiVersionMinor = minor + .parse() + .with_context(|| "failed to parse minor version")?; + let release: ApiVersionRelease = value + .release + .parse() + .with_context(|| "failed to parse release version")?; + + Ok(Self { + major, + minor, + release, + repoid: value.repoid.clone(), + features: value.features.clone(), + }) + } +} + +impl ApiVersion { + pub fn supports_feature(&self, feature: &str) -> bool { + self.features.iter().any(|f| f == feature) + } +} diff --git a/src/api2/version.rs b/src/api2/version.rs index 0e91688b5..a6cec5216 100644 --- a/src/api2/version.rs +++ b/src/api2/version.rs @@ -1,27 +1,35 @@ //! Version information use anyhow::Error; -use serde_json::{json, Value}; +use serde_json::Value; -use proxmox_router::{ApiHandler, ApiMethod, Permission, Router, RpcEnvironment}; -use proxmox_schema::ObjectSchema; +use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment}; +use proxmox_schema::api; -fn get_version( +use pbs_api_types::ApiVersionInfo; + +const FEATURES: &'static [&'static str] = &[]; + +#[api( + returns: { + type: ApiVersionInfo, + }, + access: { + permission: &Permission::Anybody, + } +)] +///Proxmox Backup Server API version. +fn version( _param: Value, _info: &ApiMethod, _rpcenv: &mut dyn RpcEnvironment, -) -> Result { - Ok(json!({ - "version": pbs_buildcfg::PROXMOX_PKG_VERSION, - "release": pbs_buildcfg::PROXMOX_PKG_RELEASE, - "repoid": pbs_buildcfg::PROXMOX_PKG_REPOID - })) +) -> Result { + Ok(ApiVersionInfo { + version: pbs_buildcfg::PROXMOX_PKG_VERSION.to_string(), + release: pbs_buildcfg::PROXMOX_PKG_RELEASE.to_string(), + repoid: pbs_buildcfg::PROXMOX_PKG_REPOID.to_string(), + features: FEATURES.iter().map(|feature| feature.to_string()).collect(), + }) } -pub const ROUTER: Router = Router::new().get( - &ApiMethod::new( - &ApiHandler::Sync(&get_version), - &ObjectSchema::new("Proxmox Backup Server API version.", &[]), - ) - .access(None, &Permission::Anybody), -); +pub const ROUTER: Router = Router::new().get(&API_METHOD_VERSION); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:46 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:46 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 24/31] ui: group filter: allow to set namespace for local datastore In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-25-c.ebner@proxmox.com> The namespace has to be set in order to get the correct groups to be used as group filter options with a local datastore as source, required for sync jobs in push direction. Signed-off-by: Christian Ebner --- changes since version 6: - no changes www/form/GroupFilter.js | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/www/form/GroupFilter.js b/www/form/GroupFilter.js index c9c2d913e..22f889752 100644 --- a/www/form/GroupFilter.js +++ b/www/form/GroupFilter.js @@ -252,14 +252,17 @@ Ext.define('PBS.form.GroupFilter', { let url; if (me.remote) { url = `/api2/json/config/remote/${me.remote}/scan/${me.datastore}/groups`; + if (me.namespace) { + url += `?namespace=${me.namespace}`; + } } else if (me.datastore) { url = `/api2/json/admin/datastore/${me.datastore}/groups`; + if (me.namespace) { + url += `?ns=${me.namespace}`; + } } else { return; } - if (me.namespace) { - url += `?namespace=${me.namespace}`; - } me.setDsStoreUrl(url); me.dsStore.load({ callback: (records) => { @@ -279,6 +282,18 @@ Ext.define('PBS.form.GroupFilter', { } me.remote = undefined; me.datastore = datastore; + me.namespace = undefined; + me.updateGroupSelectors(); + }, + + setLocalNamespace: function(datastore, namespace) { + let me = this; + if (me.datastore === datastore && me.namespace === namespace) { + return; + } + me.remote = undefined; + me.datastore = datastore; + me.namespace = namespace; me.updateGroupSelectors(); }, -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:50 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:50 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 28/31] ui: sync view: set proxy on view instead of model In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-29-c.ebner@proxmox.com> In order to load data using the same model from different sources, set the proxy on the store instead of the model. This allows to use the view to display sync jobs in either pull or push direction, by setting the `sync-direction` ont the view. Signed-off-by: Christian Ebner --- changes since version 6: - no changes www/config/SyncView.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/www/config/SyncView.js b/www/config/SyncView.js index c1b8fddc2..c8b2181c4 100644 --- a/www/config/SyncView.js +++ b/www/config/SyncView.js @@ -16,10 +16,6 @@ Ext.define('pbs-sync-jobs-status', { 'comment', ], idProperty: 'id', - proxy: { - type: 'proxmox', - url: '/api2/json/admin/sync', - }, }); Ext.define('PBS.config.SyncJobView', { @@ -153,6 +149,10 @@ Ext.define('PBS.config.SyncJobView', { storeid: 'pbs-sync-jobs-status', model: 'pbs-sync-jobs-status', interval: 5000, + proxy: { + type: 'proxmox', + url: '/api2/json/admin/sync', + }, }, }, -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:45 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:45 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 23/31] bin: manager: add datastore push cli command In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-24-c.ebner@proxmox.com> Expose the push api endpoint to be callable via the command line interface. Signed-off-by: Christian Ebner --- changes since version 6: - no changes src/bin/proxmox-backup-manager.rs | 216 +++++++++++++++++++++++------- 1 file changed, 169 insertions(+), 47 deletions(-) diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index 420e96665..f91d5bf29 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -12,7 +12,7 @@ use proxmox_sys::fs::CreateOptions; use pbs_api_types::percent_encoding::percent_encode_component; use pbs_api_types::{ - BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, + BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, @@ -294,6 +294,72 @@ fn task_mgmt_cli() -> CommandLineInterface { cmd_def.into() } +/// Sync datastore by pulling from or pushing to another repository +#[allow(clippy::too_many_arguments)] +async fn sync_datastore( + remote: String, + remote_store: String, + remote_ns: Option, + store: String, + ns: Option, + remove_vanished: Option, + max_depth: Option, + group_filter: Option>, + limit: RateLimitConfig, + transfer_last: Option, + param: Value, + sync_direction: SyncDirection, +) -> Result { + let output_format = get_output_format(¶m); + + let client = connect_to_localhost()?; + let mut args = json!({ + "store": store, + "remote": remote, + "remote-store": remote_store, + }); + + if remote_ns.is_some() { + args["remote-ns"] = json!(remote_ns); + } + + if ns.is_some() { + args["ns"] = json!(ns); + } + + if max_depth.is_some() { + args["max-depth"] = json!(max_depth); + } + + if group_filter.is_some() { + args["group-filter"] = json!(group_filter); + } + + if let Some(remove_vanished) = remove_vanished { + args["remove-vanished"] = Value::from(remove_vanished); + } + + if transfer_last.is_some() { + args["transfer-last"] = json!(transfer_last) + } + + let mut limit_json = json!(limit); + let limit_map = limit_json + .as_object_mut() + .ok_or_else(|| format_err!("limit is not an Object"))?; + + args.as_object_mut().unwrap().append(limit_map); + + let result = match sync_direction { + SyncDirection::Pull => client.post("api2/json/pull", Some(args)).await?, + SyncDirection::Push => client.post("api2/json/push", Some(args)).await?, + }; + + view_task_result(&client, result, &output_format).await?; + + Ok(Value::Null) +} + // fixme: avoid API redefinition #[api( input: { @@ -342,7 +408,7 @@ fn task_mgmt_cli() -> CommandLineInterface { } } )] -/// Sync datastore from another repository +/// Sync datastore by pulling from another repository #[allow(clippy::too_many_arguments)] async fn pull_datastore( remote: String, @@ -357,52 +423,100 @@ async fn pull_datastore( transfer_last: Option, param: Value, ) -> Result { - let output_format = get_output_format(¶m); - - let client = connect_to_localhost()?; - - let mut args = json!({ - "store": store, - "remote": remote, - "remote-store": remote_store, - }); - - if remote_ns.is_some() { - args["remote-ns"] = json!(remote_ns); - } - - if ns.is_some() { - args["ns"] = json!(ns); - } - - if max_depth.is_some() { - args["max-depth"] = json!(max_depth); - } - - if group_filter.is_some() { - args["group-filter"] = json!(group_filter); - } - - if let Some(remove_vanished) = remove_vanished { - args["remove-vanished"] = Value::from(remove_vanished); - } - - if transfer_last.is_some() { - args["transfer-last"] = json!(transfer_last) - } - - let mut limit_json = json!(limit); - let limit_map = limit_json - .as_object_mut() - .ok_or_else(|| format_err!("limit is not an Object"))?; - - args.as_object_mut().unwrap().append(limit_map); - - let result = client.post("api2/json/pull", Some(args)).await?; - - view_task_result(&client, result, &output_format).await?; + sync_datastore( + remote, + remote_store, + remote_ns, + store, + ns, + remove_vanished, + max_depth, + group_filter, + limit, + transfer_last, + param, + SyncDirection::Pull, + ) + .await +} - Ok(Value::Null) +#[api( + input: { + properties: { + "store": { + schema: DATASTORE_SCHEMA, + }, + "ns": { + type: BackupNamespace, + optional: true, + }, + remote: { + schema: REMOTE_ID_SCHEMA, + }, + "remote-store": { + schema: DATASTORE_SCHEMA, + }, + "remote-ns": { + type: BackupNamespace, + optional: true, + }, + "remove-vanished": { + schema: REMOVE_VANISHED_BACKUPS_SCHEMA, + optional: true, + }, + "max-depth": { + schema: NS_MAX_DEPTH_SCHEMA, + optional: true, + }, + "group-filter": { + schema: GROUP_FILTER_LIST_SCHEMA, + optional: true, + }, + limit: { + type: RateLimitConfig, + flatten: true, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + "transfer-last": { + schema: TRANSFER_LAST_SCHEMA, + optional: true, + }, + } + } +)] +/// Sync datastore by pushing to another repository +#[allow(clippy::too_many_arguments)] +async fn push_datastore( + remote: String, + remote_store: String, + remote_ns: Option, + store: String, + ns: Option, + remove_vanished: Option, + max_depth: Option, + group_filter: Option>, + limit: RateLimitConfig, + transfer_last: Option, + param: Value, +) -> Result { + sync_datastore( + remote, + remote_store, + remote_ns, + store, + ns, + remove_vanished, + max_depth, + group_filter, + limit, + transfer_last, + param, + SyncDirection::Push, + ) + .await } #[api( @@ -528,6 +642,14 @@ async fn run() -> Result<(), Error> { .completion_cb("group-filter", complete_remote_datastore_group_filter) .completion_cb("remote-ns", complete_remote_datastore_namespace), ) + .insert( + "push", + CliCommand::new(&API_METHOD_PUSH_DATASTORE) + .arg_param(&["store", "remote", "remote-store"]) + .completion_cb("store", pbs_config::datastore::complete_datastore_name) + .completion_cb("remote", pbs_config::remote::complete_remote_name) + .completion_cb("remote-store", complete_remote_datastore_name), + ) .insert( "verify", CliCommand::new(&API_METHOD_VERIFY) -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:40 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:40 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 18/31] api: sync: move sync job invocation to server sync module In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-19-c.ebner@proxmox.com> Moves and refactores the sync_job_do function into the common server sync module so that it can be reused for both sync directions, pull and push. Signed-off-by: Christian Ebner --- changes since version 6: - no changes src/api2/admin/sync.rs | 19 ++-- src/api2/pull.rs | 108 ----------------------- src/bin/proxmox-backup-proxy.rs | 15 +++- src/server/mod.rs | 1 + src/server/sync.rs | 150 +++++++++++++++++++++++++++++++- 5 files changed, 173 insertions(+), 120 deletions(-) diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index 4e2ba0be8..be324564c 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -10,16 +10,16 @@ use proxmox_router::{ use proxmox_schema::api; use proxmox_sortable_macro::sortable; -use pbs_api_types::{Authid, SyncJobConfig, SyncJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA}; +use pbs_api_types::{ + Authid, SyncDirection, SyncJobConfig, SyncJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA, +}; use pbs_config::sync; use pbs_config::CachedUserInfo; use crate::{ - api2::{ - config::sync::{check_sync_job_modify_access, check_sync_job_read_access}, - pull::do_sync_job, - }, + api2::config::sync::{check_sync_job_modify_access, check_sync_job_read_access}, server::jobstate::{compute_schedule_status, Job, JobState}, + server::sync::do_sync_job, }; #[api( @@ -116,7 +116,14 @@ pub fn run_sync_job( let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let upid_str = do_sync_job(job, sync_job, &auth_id, None, to_stdout)?; + let upid_str = do_sync_job( + job, + sync_job, + &auth_id, + None, + SyncDirection::Pull, + to_stdout, + )?; Ok(upid_str) } diff --git a/src/api2/pull.rs b/src/api2/pull.rs index e733c9839..d039dab59 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -13,10 +13,8 @@ use pbs_api_types::{ TRANSFER_LAST_SCHEMA, }; use pbs_config::CachedUserInfo; -use proxmox_human_byte::HumanByte; use proxmox_rest_server::WorkerTask; -use crate::server::jobstate::Job; use crate::server::pull::{pull_store, PullParameters}; pub fn check_pull_privs( @@ -93,112 +91,6 @@ impl TryFrom<&SyncJobConfig> for PullParameters { } } -pub fn do_sync_job( - mut job: Job, - sync_job: SyncJobConfig, - auth_id: &Authid, - schedule: Option, - to_stdout: bool, -) -> Result { - let job_id = format!( - "{}:{}:{}:{}:{}", - sync_job.remote.as_deref().unwrap_or("-"), - sync_job.remote_store, - sync_job.store, - sync_job.ns.clone().unwrap_or_default(), - job.jobname() - ); - let worker_type = job.jobtype().to_string(); - - if sync_job.remote.is_none() && sync_job.store == sync_job.remote_store { - bail!("can't sync to same datastore"); - } - - let upid_str = WorkerTask::spawn( - &worker_type, - Some(job_id.clone()), - auth_id.to_string(), - to_stdout, - move |worker| async move { - job.start(&worker.upid().to_string())?; - - let worker2 = worker.clone(); - let sync_job2 = sync_job.clone(); - - let worker_future = async move { - let pull_params = PullParameters::try_from(&sync_job)?; - - info!("Starting datastore sync job '{job_id}'"); - if let Some(event_str) = schedule { - info!("task triggered by schedule '{event_str}'"); - } - - info!( - "sync datastore '{}' from '{}{}'", - sync_job.store, - sync_job - .remote - .as_deref() - .map_or(String::new(), |remote| format!("{remote}/")), - sync_job.remote_store, - ); - - let pull_stats = pull_store(pull_params).await?; - - if pull_stats.bytes != 0 { - let amount = HumanByte::from(pull_stats.bytes); - let rate = HumanByte::new_binary( - pull_stats.bytes as f64 / pull_stats.elapsed.as_secs_f64(), - ); - info!( - "Summary: sync job pulled {amount} in {} chunks (average rate: {rate}/s)", - pull_stats.chunk_count, - ); - } else { - info!("Summary: sync job found no new data to pull"); - } - - if let Some(removed) = pull_stats.removed { - info!( - "Summary: removed vanished: snapshots: {}, groups: {}, namespaces: {}", - removed.snapshots, removed.groups, removed.namespaces, - ); - } - - info!("sync job '{}' end", &job_id); - - Ok(()) - }; - - let mut abort_future = worker2 - .abort_future() - .map(|_| Err(format_err!("sync aborted"))); - - let result = select! { - worker = worker_future.fuse() => worker, - abort = abort_future => abort, - }; - - let status = worker2.create_state(&result); - - match job.finish(status) { - Ok(_) => {} - Err(err) => { - eprintln!("could not finish job state: {}", err); - } - } - - if let Err(err) = crate::server::send_sync_status(&sync_job2, &result) { - eprintln!("send sync notification failed: {err}"); - } - - result - }, - )?; - - Ok(upid_str) -} - #[api( input: { properties: { diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index 859f5b0f8..6f19a3fbd 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -40,17 +40,17 @@ use pbs_buildcfg::configdir; use proxmox_time::CalendarEvent; use pbs_api_types::{ - Authid, DataStoreConfig, Operation, PruneJobConfig, SyncJobConfig, TapeBackupJobConfig, - VerificationJobConfig, + Authid, DataStoreConfig, Operation, PruneJobConfig, SyncDirection, SyncJobConfig, + TapeBackupJobConfig, VerificationJobConfig, }; use proxmox_backup::auth_helpers::*; use proxmox_backup::server::{self, metric_collection}; use proxmox_backup::tools::PROXMOX_BACKUP_TCP_KEEPALIVE_TIME; -use proxmox_backup::api2::pull::do_sync_job; use proxmox_backup::api2::tape::backup::do_tape_backup_job; use proxmox_backup::server::do_prune_job; +use proxmox_backup::server::do_sync_job; use proxmox_backup::server::do_verification_job; fn main() -> Result<(), Error> { @@ -611,7 +611,14 @@ async fn schedule_datastore_sync_jobs() { }; let auth_id = Authid::root_auth_id().clone(); - if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str), false) { + if let Err(err) = do_sync_job( + job, + job_config, + &auth_id, + Some(event_str), + SyncDirection::Pull, + false, + ) { eprintln!("unable to start datastore sync job {job_id} - {err}"); } }; diff --git a/src/server/mod.rs b/src/server/mod.rs index 7c14ed4b8..b9398d21f 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -38,6 +38,7 @@ pub mod metric_collection; pub(crate) mod pull; pub(crate) mod push; pub(crate) mod sync; +pub use sync::do_sync_job; pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> { let proxy_pid = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?; diff --git a/src/server/sync.rs b/src/server/sync.rs index 19b244f5a..4ce0777bf 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -6,16 +6,19 @@ use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; use std::time::Duration; -use anyhow::{bail, format_err, Error}; +use anyhow::{bail, format_err, Context, Error}; +use futures::{future::FutureExt, select}; use http::StatusCode; use serde_json::json; use tracing::{info, warn}; +use proxmox_human_byte::HumanByte; +use proxmox_rest_server::WorkerTask; use proxmox_router::HttpError; use pbs_api_types::{ Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem, - MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + SyncDirection, SyncJobConfig, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, }; use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; use pbs_config::CachedUserInfo; @@ -25,6 +28,9 @@ use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader}; use crate::backup::ListAccessibleBackupGroups; +use crate::server::jobstate::Job; +use crate::server::pull::{pull_store, PullParameters}; +use crate::server::push::{push_store, PushParameters}; #[derive(Default)] pub(crate) struct RemovedVanishedStats { @@ -593,3 +599,143 @@ pub(crate) fn check_namespace_depth_limit( } Ok(()) } + +/// Run a sync job in given direction +pub fn do_sync_job( + mut job: Job, + sync_job: SyncJobConfig, + auth_id: &Authid, + schedule: Option, + sync_direction: SyncDirection, + to_stdout: bool, +) -> Result { + let job_id = format!( + "{}:{}:{}:{}:{}", + sync_job.remote.as_deref().unwrap_or("-"), + sync_job.remote_store, + sync_job.store, + sync_job.ns.clone().unwrap_or_default(), + job.jobname(), + ); + let worker_type = job.jobtype().to_string(); + + if sync_job.remote.is_none() && sync_job.store == sync_job.remote_store { + bail!("can't sync to same datastore"); + } + + let upid_str = WorkerTask::spawn( + &worker_type, + Some(job_id.clone()), + auth_id.to_string(), + to_stdout, + move |worker| async move { + job.start(&worker.upid().to_string())?; + + let worker2 = worker.clone(); + let sync_job2 = sync_job.clone(); + + let worker_future = async move { + info!("Starting datastore sync job '{job_id}'"); + if let Some(event_str) = schedule { + info!("task triggered by schedule '{event_str}'"); + } + let sync_stats = match sync_direction { + SyncDirection::Pull => { + info!( + "sync datastore '{}' from '{}{}'", + sync_job.store, + sync_job + .remote + .as_deref() + .map_or(String::new(), |remote| format!("{remote}/")), + sync_job.remote_store, + ); + let pull_params = PullParameters::try_from(&sync_job)?; + pull_store(pull_params).await? + } + SyncDirection::Push => { + info!( + "sync datastore '{}' to '{}{}'", + sync_job.store, + sync_job + .remote + .as_deref() + .map_or(String::new(), |remote| format!("{remote}/")), + sync_job.remote_store, + ); + let push_params = PushParameters::new( + &sync_job.store, + sync_job.ns.clone().unwrap_or_default(), + sync_job + .remote + .as_deref() + .context("missing required remote")?, + &sync_job.remote_store, + sync_job.remote_ns.clone().unwrap_or_default(), + sync_job + .owner + .as_ref() + .unwrap_or_else(|| Authid::root_auth_id()) + .clone(), + sync_job.remove_vanished, + sync_job.max_depth, + sync_job.group_filter.clone(), + sync_job.limit.clone(), + sync_job.transfer_last, + ) + .await?; + push_store(push_params).await? + } + }; + + if sync_stats.bytes != 0 { + let amount = HumanByte::from(sync_stats.bytes); + let rate = HumanByte::new_binary( + sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64(), + ); + info!( + "Summary: sync job {sync_direction}ed {amount} in {} chunks (average rate: {rate}/s)", + sync_stats.chunk_count, + ); + } else { + info!("Summary: sync job found no new data to {sync_direction}"); + } + + if let Some(removed) = sync_stats.removed { + info!( + "Summary: removed vanished: snapshots: {}, groups: {}, namespaces: {}", + removed.snapshots, removed.groups, removed.namespaces, + ); + } + + info!("sync job '{job_id}' end"); + + Ok(()) + }; + + let mut abort_future = worker2 + .abort_future() + .map(|_| Err(format_err!("sync aborted"))); + + let result = select! { + worker = worker_future.fuse() => worker, + abort = abort_future => abort, + }; + + let status = worker2.create_state(&result); + + match job.finish(status) { + Ok(_) => {} + Err(err) => eprintln!("could not finish job state: {err}"), + } + + if let Err(err) = crate::server::send_sync_status(&sync_job2, &result) { + eprintln!("send sync notification failed: {err}"); + } + + result + }, + )?; + + Ok(upid_str) +} -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:51 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:51 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 29/31] api: datastore/namespace: return backup groups delete stats on remove In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-30-c.ebner@proxmox.com> Add and expose the backup group delete statistics by adding the return type to the corresponding REST API endpoints. Further, add a `error-on-protected` flag to the api endpoints, allowing to return without error when set to false. Default remains enabled. Signed-off-by: Christian Ebner --- changes since version 6: - no changes pbs-datastore/src/datastore.rs | 20 ++++++++++++++------ src/api2/admin/datastore.rs | 29 +++++++++++++++++++++-------- src/api2/admin/namespace.rs | 31 ++++++++++++++++++++++--------- src/server/pull.rs | 6 ++++-- 4 files changed, 61 insertions(+), 25 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index c8701d2dd..68c7f2934 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -489,16 +489,22 @@ impl DataStore { /// /// Does *not* descends into child-namespaces and doesn't remoes the namespace itself either. /// - /// Returns true if all the groups were removed, and false if some were protected. - pub fn remove_namespace_groups(self: &Arc, ns: &BackupNamespace) -> Result { + /// Returns a tuple with the first item being true if all the groups were removed, and false if some were protected. + /// The second item returns the remove statistics. + pub fn remove_namespace_groups( + self: &Arc, + ns: &BackupNamespace, + ) -> Result<(bool, BackupGroupDeleteStats), Error> { // FIXME: locking? The single groups/snapshots are already protected, so may not be // necessary (depends on what we all allow to do with namespaces) log::info!("removing all groups in namespace {}:/{ns}", self.name()); let mut removed_all_groups = true; + let mut stats = BackupGroupDeleteStats::default(); for group in self.iter_backup_groups(ns.to_owned())? { let delete_stats = group?.destroy()?; + stats.add(&delete_stats); removed_all_groups = removed_all_groups && delete_stats.all_removed(); } @@ -515,7 +521,7 @@ impl DataStore { } } - Ok(removed_all_groups) + Ok((removed_all_groups, stats)) } /// Remove a complete backup namespace optionally including all it's, and child namespaces', @@ -527,13 +533,15 @@ impl DataStore { self: &Arc, ns: &BackupNamespace, delete_groups: bool, - ) -> Result { + ) -> Result<(bool, BackupGroupDeleteStats), Error> { let store = self.name(); let mut removed_all_requested = true; + let mut stats = BackupGroupDeleteStats::default(); if delete_groups { log::info!("removing whole namespace recursively below {store}:/{ns}",); for ns in self.recursive_iter_backup_ns(ns.to_owned())? { - let removed_ns_groups = self.remove_namespace_groups(&ns?)?; + let (removed_ns_groups, delete_stats) = self.remove_namespace_groups(&ns?)?; + stats.add(&delete_stats); removed_all_requested = removed_all_requested && removed_ns_groups; } } else { @@ -574,7 +582,7 @@ impl DataStore { } } - Ok(removed_all_requested) + Ok((removed_all_requested, stats)) } /// Remove a complete backup group including all snapshots. diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index b73ad0ff0..99b579f02 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -34,10 +34,10 @@ use pxar::accessor::aio::Accessor; use pxar::EntryKind; use pbs_api_types::{ - print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, - Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, - GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, - PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, + print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupGroupDeleteStats, + BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, + DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, + Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, @@ -267,8 +267,17 @@ pub fn list_groups( type: pbs_api_types::BackupGroup, flatten: true, }, + "error-on-protected": { + type: bool, + optional: true, + default: true, + description: "Return error when group cannot be deleted because of protected snapshots", + } }, }, + returns: { + type: BackupGroupDeleteStats, + }, access: { permission: &Permission::Anybody, description: "Requires on /datastore/{store}[/{namespace}] either DATASTORE_MODIFY for any \ @@ -279,9 +288,10 @@ pub fn list_groups( pub async fn delete_group( store: String, ns: Option, + error_on_protected: bool, group: pbs_api_types::BackupGroup, rpcenv: &mut dyn RpcEnvironment, -) -> Result { +) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; tokio::task::spawn_blocking(move || { @@ -299,10 +309,13 @@ pub async fn delete_group( let delete_stats = datastore.remove_backup_group(&ns, &group)?; if !delete_stats.all_removed() { - bail!("group only partially deleted due to protected snapshots"); + if error_on_protected { + bail!("group only partially deleted due to protected snapshots"); + } else { + warn!("group only partially deleted due to protected snapshots"); + } } - - Ok(Value::Null) + Ok(delete_stats) }) .await? } diff --git a/src/api2/admin/namespace.rs b/src/api2/admin/namespace.rs index 889dc1a3d..e2a5ccd54 100644 --- a/src/api2/admin/namespace.rs +++ b/src/api2/admin/namespace.rs @@ -1,13 +1,12 @@ use anyhow::{bail, Error}; -use serde_json::Value; use pbs_config::CachedUserInfo; use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment}; use proxmox_schema::*; use pbs_api_types::{ - Authid, BackupNamespace, NamespaceListItem, Operation, DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, - PROXMOX_SAFE_ID_FORMAT, + Authid, BackupGroupDeleteStats, BackupNamespace, NamespaceListItem, Operation, + DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PROXMOX_SAFE_ID_FORMAT, }; use pbs_datastore::DataStore; @@ -138,6 +137,12 @@ pub fn list_namespaces( optional: true, default: false, }, + "error-on-protected": { + type: bool, + optional: true, + default: true, + description: "Return error when namespace cannot be deleted because of protected snapshots", + } }, }, access: { @@ -149,24 +154,32 @@ pub fn delete_namespace( store: String, ns: BackupNamespace, delete_groups: bool, + error_on_protected: bool, _info: &ApiMethod, rpcenv: &mut dyn RpcEnvironment, -) -> Result { +) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; check_ns_modification_privs(&store, &ns, &auth_id)?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; - if !datastore.remove_namespace_recursive(&ns, delete_groups)? { - if delete_groups { - bail!("group only partially deleted due to protected snapshots"); + let (removed_all, stats) = datastore.remove_namespace_recursive(&ns, delete_groups)?; + if !removed_all { + let err_msg = if delete_groups { + "group only partially deleted due to protected snapshots" + } else { + "only partially deleted due to existing groups but `delete-groups` not true" + }; + + if error_on_protected { + bail!(err_msg); } else { - bail!("only partially deleted due to existing groups but `delete-groups` not true "); + log::warn!("{err_msg}"); } } - Ok(Value::Null) + Ok(stats) } pub const ROUTER: Router = Router::new() diff --git a/src/server/pull.rs b/src/server/pull.rs index d059c3ff6..e00187764 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -650,10 +650,12 @@ fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> R check_ns_modification_privs(params.target.store.name(), local_ns, ¶ms.owner) .map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?; - params + let (removed_all, _delete_stats) = params .target .store - .remove_namespace_recursive(local_ns, true) + .remove_namespace_recursive(local_ns, true)?; + + Ok(removed_all) } fn check_and_remove_vanished_ns( -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:37 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:37 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 15/31] fix #3044: server: implement push support for sync operations In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-16-c.ebner@proxmox.com> Adds the functionality required to push datastore contents from a source to a remote target. This includes syncing of the namespaces, backup groups and snapshots based on the provided filters as well as removing vanished contents from the target when requested. While trying to mimic the pull direction of sync jobs, the implementation is different as access to the remote must be performed via the REST API, not needed for the pull job which can access the local datastore via the filesystem directly. Signed-off-by: Christian Ebner --- changes since version 6: - Fix issues with local/target namespace mapping, make clear which are which by adapting variable names accordingly. - Pre-filter source namespaces, so namespaces which the sync user has no access to cannot be leaked. - Avoid possibly removing unrelated target namespaces during remove vanished by only removing sub-namespaces of the remote target namespace. - Uncoditionally pass namespace parameter and early check and fail if remote does not support namespaces. - Fetch previous snapshots index to initialize known chunks correctly. - Adapt snapshot filter for excluding snapshots older than current last snapshot already present on target. - Use `BackupGroup`s `cmp::Ord` for sorting, for pull and push src/server/mod.rs | 1 + src/server/push.rs | 994 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 995 insertions(+) create mode 100644 src/server/push.rs diff --git a/src/server/mod.rs b/src/server/mod.rs index 2e40bde3c..7c14ed4b8 100644 --- a/src/server/mod.rs +++ b/src/server/mod.rs @@ -36,6 +36,7 @@ pub mod auth; pub mod metric_collection; pub(crate) mod pull; +pub(crate) mod push; pub(crate) mod sync; pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> { diff --git a/src/server/push.rs b/src/server/push.rs new file mode 100644 index 000000000..f6dd02fc9 --- /dev/null +++ b/src/server/push.rs @@ -0,0 +1,994 @@ +//! Sync datastore by pushing contents to remote server + +use std::collections::HashSet; +use std::sync::{Arc, Mutex}; + +use anyhow::{bail, format_err, Error}; +use futures::stream::{self, StreamExt, TryStreamExt}; +use tokio::sync::mpsc; +use tokio_stream::wrappers::ReceiverStream; +use tracing::{info, warn}; + +use pbs_api_types::{ + print_store_and_ns, ApiVersion, ApiVersionInfo, Authid, BackupDir, BackupGroup, + BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem, + Operation, RateLimitConfig, Remote, SnapshotListItem, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, + PRIV_REMOTE_DATASTORE_PRUNE, +}; +use pbs_client::{BackupRepository, BackupWriter, HttpClient, MergedChunkInfo, UploadOptions}; +use pbs_config::CachedUserInfo; +use pbs_datastore::data_blob::ChunkInfo; +use pbs_datastore::dynamic_index::DynamicIndexReader; +use pbs_datastore::fixed_index::FixedIndexReader; +use pbs_datastore::index::IndexFile; +use pbs_datastore::manifest::{ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::read_chunk::AsyncReadChunk; +use pbs_datastore::{DataStore, StoreProgress}; + +use super::sync::{ + check_namespace_depth_limit, LocalSource, RemovedVanishedStats, SkipInfo, SkipReason, + SyncSource, SyncStats, +}; +use crate::api2::config::remote; + +/// Target for backups to be pushed to +pub(crate) struct PushTarget { + // Remote as found in remote.cfg + remote: Remote, + // Target repository on remote + repo: BackupRepository, + // Target namespace on remote + ns: BackupNamespace, + // Http client to connect to remote + client: HttpClient, + // Remote target api supports prune delete stats + supports_prune_delete_stats: bool, +} + +impl PushTarget { + fn remote_user(&self) -> Authid { + self.remote.config.auth_id.clone() + } +} + +/// Parameters for a push operation +pub(crate) struct PushParameters { + /// Source of backups to be pushed to remote + source: Arc, + /// Target for backups to be pushed to + target: PushTarget, + /// User used for permission checks on the source side, including potentially filtering visible + /// namespaces and backup groups. + local_user: Authid, + /// Whether to remove groups and namespaces which exist locally, but not on the remote end + remove_vanished: bool, + /// How many levels of sub-namespaces to push (0 == no recursion, None == maximum recursion) + max_depth: Option, + /// Filters for reducing the push scope + group_filter: Vec, + /// How many snapshots should be transferred at most (taking the newest N snapshots) + transfer_last: Option, +} + +impl PushParameters { + /// Creates a new instance of `PushParameters`. + #[allow(clippy::too_many_arguments)] + pub(crate) async fn new( + store: &str, + ns: BackupNamespace, + remote_id: &str, + remote_store: &str, + remote_ns: BackupNamespace, + local_user: Authid, + remove_vanished: Option, + max_depth: Option, + group_filter: Option>, + limit: RateLimitConfig, + transfer_last: Option, + ) -> Result { + if let Some(max_depth) = max_depth { + ns.check_max_depth(max_depth)?; + remote_ns.check_max_depth(max_depth)?; + }; + let remove_vanished = remove_vanished.unwrap_or(false); + + let source = Arc::new(LocalSource { + store: DataStore::lookup_datastore(store, Some(Operation::Read))?, + ns, + }); + + let (remote_config, _digest) = pbs_config::remote::config()?; + let remote: Remote = remote_config.lookup("remote", remote_id)?; + + let repo = BackupRepository::new( + Some(remote.config.auth_id.clone()), + Some(remote.config.host.clone()), + remote.config.port, + remote_store.to_string(), + ); + + let client = remote::remote_client_config(&remote, Some(limit))?; + + let mut result = client.get("api2/json/version", None).await?; + let data = result["data"].take(); + let version_info: ApiVersionInfo = serde_json::from_value(data)?; + let api_version = ApiVersion::try_from(version_info)?; + let supports_prune_delete_stats = api_version.supports_feature("prune-delete-stats"); + if api_version.major < 2 || (api_version.major == 2 && api_version.minor < 2) { + bail!("unsupported remote api version, minimum v2.2 required"); + } + + let target = PushTarget { + remote, + repo, + ns: remote_ns, + client, + supports_prune_delete_stats, + }; + let group_filter = group_filter.unwrap_or_default(); + + Ok(Self { + source, + target, + local_user, + remove_vanished, + max_depth, + group_filter, + transfer_last, + }) + } + + // Map the given namespace from source to target by adapting the prefix + fn map_to_target(&self, namespace: &BackupNamespace) -> Result { + namespace.map_prefix(&self.source.ns, &self.target.ns) + } +} + +// Check if the job user given in the push parameters has the provided privs on the remote +// datastore namespace +fn check_ns_remote_datastore_privs( + params: &PushParameters, + target_namespace: &BackupNamespace, + privs: u64, +) -> Result<(), Error> { + let user_info = CachedUserInfo::new()?; + let acl_path = + target_namespace.remote_acl_path(¶ms.target.remote.name, params.target.repo.store()); + + user_info.check_privs(¶ms.local_user, &acl_path, privs, false)?; + + Ok(()) +} + +// Fetch the list of namespaces found on target +async fn fetch_target_namespaces(params: &PushParameters) -> Result, Error> { + let api_path = format!( + "api2/json/admin/datastore/{store}/namespace", + store = params.target.repo.store(), + ); + let mut result = params.target.client.get(&api_path, None).await?; + let namespaces: Vec = serde_json::from_value(result["data"].take())?; + let mut namespaces: Vec = namespaces + .into_iter() + .map(|namespace| namespace.ns) + .collect(); + namespaces.sort_unstable_by_key(|a| a.name_len()); + + Ok(namespaces) +} + +// Remove the provided namespace from the target +async fn remove_target_namespace( + params: &PushParameters, + target_namespace: &BackupNamespace, +) -> Result { + if target_namespace.is_root() { + bail!("cannot remove root namespace from target"); + } + + check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) + .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; + + let api_path = format!( + "api2/json/admin/datastore/{store}/namespace", + store = params.target.repo.store(), + ); + + let mut args = serde_json::json!({ + "ns": target_namespace.name(), + "delete-groups": true, + }); + + if params.target.supports_prune_delete_stats { + args["error-on-protected"] = serde_json::to_value(false)?; + } + + let mut result = params.target.client.delete(&api_path, Some(args)).await?; + + if params.target.supports_prune_delete_stats { + let data = result["data"].take(); + serde_json::from_value(data).map_err(|err| { + format_err!("removing target namespace {target_namespace} failed - {err}") + }) + } else { + Ok(BackupGroupDeleteStats::default()) + } +} + +// Fetch the list of groups found on target in given namespace +// Returns sorted list of owned groups and a hashset containing not owned backup groups on target. +async fn fetch_target_groups( + params: &PushParameters, + target_namespace: &BackupNamespace, +) -> Result<(Vec, HashSet), Error> { + let api_path = format!( + "api2/json/admin/datastore/{store}/groups", + store = params.target.repo.store(), + ); + let args = Some(serde_json::json!({ "ns": target_namespace.name() })); + + let mut result = params.target.client.get(&api_path, args).await?; + let groups: Vec = serde_json::from_value(result["data"].take())?; + + let (mut owned, not_owned) = groups.iter().fold( + (Vec::new(), HashSet::new()), + |(mut owned, mut not_owned), group| { + if let Some(ref owner) = group.owner { + if params.target.remote_user() == *owner { + owned.push(group.backup.clone()); + return (owned, not_owned); + } + } + not_owned.insert(group.backup.clone()); + (owned, not_owned) + }, + ); + + owned.sort_unstable(); + + Ok((owned, not_owned)) +} + +// Remove the provided backup group in given namespace from the target +async fn remove_target_group( + params: &PushParameters, + target_namespace: &BackupNamespace, + backup_group: &BackupGroup, +) -> Result { + check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) + .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; + + let api_path = format!( + "api2/json/admin/datastore/{store}/groups", + store = params.target.repo.store(), + ); + + let mut args = serde_json::json!({ + "backup-id": backup_group.id, + "backup-type": backup_group.ty, + }); + + if params.target.supports_prune_delete_stats { + args["error-on-protected"] = serde_json::to_value(false)?; + } + + args["ns"] = serde_json::to_value(target_namespace.name())?; + + let mut result = params.target.client.delete(&api_path, Some(args)).await?; + + if params.target.supports_prune_delete_stats { + let data = result["data"].take(); + serde_json::from_value(data) + .map_err(|err| format_err!("removing target group {backup_group} failed - {err}")) + } else { + Ok(BackupGroupDeleteStats::default()) + } +} + +// Check if the namespace is already present on the target, create it otherwise +async fn check_or_create_target_namespace( + params: &PushParameters, + target_namespaces: &[BackupNamespace], + target_namespace: &BackupNamespace, +) -> Result<(), Error> { + if !target_namespace.is_root() && !target_namespaces.contains(target_namespace) { + // Namespace not present on target, create namespace. + // Sub-namespaces have to be created by creating parent components first. + + check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) + .map_err(|err| format_err!("Creating namespace not allowed - {err}"))?; + + let mut parent = BackupNamespace::root(); + for component in target_namespace.components() { + let current = BackupNamespace::from_parent_ns(&parent, component.to_string())?; + // Skip over pre-existing parent namespaces on target + if target_namespaces.contains(¤t) { + parent = current; + continue; + } + let api_path = format!( + "api2/json/admin/datastore/{store}/namespace", + store = params.target.repo.store(), + ); + let mut args = serde_json::json!({ "name": component.to_string() }); + if !parent.is_root() { + args["parent"] = serde_json::to_value(parent.clone())?; + } + let target_store_and_ns = print_store_and_ns(params.target.repo.store(), ¤t); + match params.target.client.post(&api_path, Some(args)).await { + Ok(_) => info!("Created new namespace on target: {target_store_and_ns}"), + Err(err) => bail!( + "Sync into {target_store_and_ns} failed - namespace creation failed: {err}" + ), + } + parent = current; + } + } + + Ok(()) +} + +/// Push contents of source datastore matched by given push parameters to target. +pub(crate) async fn push_store(mut params: PushParameters) -> Result { + let mut errors = false; + + let user_info = CachedUserInfo::new()?; + // Generate list of source namespaces to push to target, limited by max-depth and pre-filtered + // by local user access privs. + let mut namespaces = params + .source + .list_namespaces( + &mut params.max_depth, + ¶ms.local_user, + &user_info, + Box::new(|(namespace, store, auth_id, user_info)| { + let acl_path = namespace.acl_path(&store); + let privs = PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP; + user_info + .check_privs(auth_id, &acl_path, privs, true) + .is_ok() + }), + ) + .await?; + + check_namespace_depth_limit(¶ms.source.get_ns(), ¶ms.target.ns, &namespaces)?; + + namespaces.sort_unstable_by_key(|a| a.name_len()); + + // Fetch all accessible namespaces already present on the target + let target_namespaces = fetch_target_namespaces(¶ms).await?; + // Remember synced namespaces, removing non-synced ones when remove vanished flag is set + let mut synced_namespaces = HashSet::with_capacity(namespaces.len()); + + let (mut groups, mut snapshots) = (0, 0); + let mut stats = SyncStats::default(); + for namespace in &namespaces { + let source_store_and_ns = print_store_and_ns(params.source.store.name(), namespace); + let target_namespace = params.map_to_target(&namespace)?; + let target_store_and_ns = print_store_and_ns(params.target.repo.store(), &target_namespace); + + info!("----"); + info!("Syncing {source_store_and_ns} into {target_store_and_ns}"); + + synced_namespaces.insert(target_namespace.clone()); + + if let Err(err) = + check_or_create_target_namespace(¶ms, &target_namespaces, &target_namespace).await + { + info!("Cannot sync {source_store_and_ns} into {target_store_and_ns} - {err}"); + errors = true; + continue; + } + + match push_namespace(namespace, ¶ms).await { + Ok((sync_progress, sync_stats, sync_errors)) => { + errors |= sync_errors; + stats.add(sync_stats); + + if params.max_depth != Some(0) { + groups += sync_progress.done_groups; + snapshots += sync_progress.done_snapshots; + + let ns = if namespace.is_root() { + "root namespace".into() + } else { + format!("namespace {namespace}") + }; + info!( + "Finished syncing {ns}, current progress: {groups} groups, {snapshots} snapshots" + ); + } + } + Err(err) => { + errors = true; + info!("Encountered errors while syncing namespace {namespace} - {err}"); + } + } + } + + if params.remove_vanished { + // Attention: Filter out all namespaces which are not sub-namespaces of the sync target + // namespace, or not included in the sync because of the depth limit. + // Without this pre-filtering, all namespaces unrelated to the sync would be removed! + let mut target_sub_namespaces = Vec::new(); + for namespace in &namespaces { + let target_namespace = params.map_to_target(&namespace)?; + let mut sub_namespaces = target_namespaces + .iter() + .filter(|namespace| { + if let Some(depth) = target_namespace.contains(namespace) { + if let Some(max_depth) = params.max_depth { + return depth <= max_depth; + } + return true; + } + false + }) + .collect(); + target_sub_namespaces.append(&mut sub_namespaces); + } + + // Sort by namespace length and revert for sub-namespaces to be removed before parents + target_sub_namespaces.sort_unstable_by_key(|a| a.name_len()); + target_sub_namespaces.reverse(); + + for target_namespace in target_sub_namespaces { + if synced_namespaces.contains(&target_namespace) { + continue; + } + match remove_target_namespace(¶ms, &target_namespace).await { + Ok(delete_stats) => { + stats.add(SyncStats::from(RemovedVanishedStats { + snapshots: delete_stats.removed_snapshots(), + groups: delete_stats.removed_groups(), + namespaces: 1, + })); + if delete_stats.protected_snapshots() > 0 { + warn!( + "kept {protected_count} protected snapshots of namespace '{target_namespace}'", + protected_count = delete_stats.protected_snapshots(), + ); + continue; + } + } + Err(err) => { + warn!("failed to remove vanished namespace {target_namespace} - {err}"); + continue; + } + } + info!("removed vanished namespace {target_namespace}"); + } + + if !params.target.supports_prune_delete_stats { + info!("Older api version on remote detected, delete stats might be incomplete"); + } + } + + if errors { + bail!("sync failed with some errors."); + } + + Ok(stats) +} + +/// Push namespace including all backup groups to target +/// +/// Iterate over all backup groups in the namespace and push them to the target. +pub(crate) async fn push_namespace( + namespace: &BackupNamespace, + params: &PushParameters, +) -> Result<(StoreProgress, SyncStats, bool), Error> { + let target_namespace = params.map_to_target(namespace)?; + // Check if user is allowed to perform backups on remote datastore + check_ns_remote_datastore_privs(params, &target_namespace, PRIV_REMOTE_DATASTORE_BACKUP) + .map_err(|err| format_err!("Pushing to remote not allowed - {err}"))?; + + let mut list: Vec = params + .source + .list_groups(namespace, ¶ms.local_user) + .await?; + + list.sort_unstable(); + + let total = list.len(); + let list: Vec = list + .into_iter() + .filter(|group| group.apply_filters(¶ms.group_filter)) + .collect(); + + info!( + "found {filtered} groups to sync (out of {total} total)", + filtered = list.len() + ); + + let mut errors = false; + // Remember synced groups, remove others when the remove vanished flag is set + let mut synced_groups = HashSet::new(); + let mut progress = StoreProgress::new(list.len() as u64); + let mut stats = SyncStats::default(); + + let (owned_target_groups, not_owned_target_groups) = + fetch_target_groups(params, &target_namespace).await?; + + for (done, group) in list.into_iter().enumerate() { + progress.done_groups = done as u64; + progress.done_snapshots = 0; + progress.group_snapshots = 0; + + if not_owned_target_groups.contains(&group) { + warn!( + "group '{group}' not owned by remote user '{}' on target, skip", + params.target.remote_user(), + ); + continue; + } + synced_groups.insert(group.clone()); + + match push_group(params, namespace, &group, &mut progress).await { + Ok(sync_stats) => stats.add(sync_stats), + Err(err) => { + warn!("sync group '{group}' failed - {err}"); + errors = true; + } + } + } + + if params.remove_vanished { + // only ever allow to prune owned groups on target + for target_group in owned_target_groups { + if synced_groups.contains(&target_group) { + continue; + } + if !target_group.apply_filters(¶ms.group_filter) { + continue; + } + + info!("delete vanished group '{target_group}'"); + + match remove_target_group(params, &target_namespace, &target_group).await { + Ok(delete_stats) => { + if delete_stats.protected_snapshots() > 0 { + warn!( + "kept {protected_count} protected snapshots of group '{target_group}'", + protected_count = delete_stats.protected_snapshots(), + ); + } + stats.add(SyncStats::from(RemovedVanishedStats { + snapshots: delete_stats.removed_snapshots(), + groups: delete_stats.removed_groups(), + namespaces: 0, + })); + } + Err(err) => { + warn!("failed to delete vanished group - {err}"); + errors = true; + continue; + } + } + } + } + + Ok((progress, stats, errors)) +} + +async fn fetch_target_snapshots( + params: &PushParameters, + target_namespace: &BackupNamespace, + group: &BackupGroup, +) -> Result, Error> { + let api_path = format!( + "api2/json/admin/datastore/{store}/snapshots", + store = params.target.repo.store(), + ); + let mut args = serde_json::to_value(group)?; + if !target_namespace.is_root() { + args["ns"] = serde_json::to_value(target_namespace)?; + } + let mut result = params.target.client.get(&api_path, Some(args)).await?; + let snapshots: Vec = serde_json::from_value(result["data"].take())?; + + Ok(snapshots) +} + +async fn fetch_previous_backup_time( + params: &PushParameters, + target_namespace: &BackupNamespace, + group: &BackupGroup, +) -> Result, Error> { + let mut snapshots = fetch_target_snapshots(params, target_namespace, group).await?; + snapshots.sort_unstable_by(|a, b| a.backup.time.cmp(&b.backup.time)); + Ok(snapshots.last().map(|snapshot| snapshot.backup.time)) +} + +async fn forget_target_snapshot( + params: &PushParameters, + target_namespace: &BackupNamespace, + snapshot: &BackupDir, +) -> Result<(), Error> { + check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) + .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; + + let api_path = format!( + "api2/json/admin/datastore/{store}/snapshots", + store = params.target.repo.store(), + ); + let mut args = serde_json::to_value(snapshot)?; + if !target_namespace.is_root() { + args["ns"] = serde_json::to_value(target_namespace)?; + } + params.target.client.delete(&api_path, Some(args)).await?; + + Ok(()) +} + +/// Push group including all snaphshots to target +/// +/// Iterate over all snapshots in the group and push them to the target. +/// The group sync operation consists of the following steps: +/// - Query snapshots of given group from the source +/// - Sort snapshots by time +/// - Apply transfer last cutoff and filters to list +/// - Iterate the snapshot list and push each snapshot individually +/// - (Optional): Remove vanished groups if `remove_vanished` flag is set +pub(crate) async fn push_group( + params: &PushParameters, + namespace: &BackupNamespace, + group: &BackupGroup, + progress: &mut StoreProgress, +) -> Result { + let mut already_synced_skip_info = SkipInfo::new(SkipReason::AlreadySynced); + let mut transfer_last_skip_info = SkipInfo::new(SkipReason::TransferLast); + + let mut snapshots: Vec = params.source.list_backup_dirs(namespace, group).await?; + snapshots.sort_unstable_by(|a, b| a.time.cmp(&b.time)); + + let total_snapshots = snapshots.len(); + let cutoff = params + .transfer_last + .map(|count| total_snapshots.saturating_sub(count)) + .unwrap_or_default(); + + let target_namespace = params.map_to_target(namespace)?; + let last_snapshot_time = fetch_previous_backup_time(params, &target_namespace, group) + .await? + .unwrap_or(i64::MIN); + + let mut source_snapshots = HashSet::new(); + let snapshots: Vec = snapshots + .into_iter() + .enumerate() + .filter(|&(pos, ref snapshot)| { + source_snapshots.insert(snapshot.time); + if last_snapshot_time >= snapshot.time { + already_synced_skip_info.update(snapshot.time); + return false; + } + if pos < cutoff { + transfer_last_skip_info.update(snapshot.time); + return false; + } + true + }) + .map(|(_, dir)| dir) + .collect(); + + if already_synced_skip_info.count > 0 { + info!("{already_synced_skip_info}"); + already_synced_skip_info.reset(); + } + if transfer_last_skip_info.count > 0 { + info!("{transfer_last_skip_info}"); + transfer_last_skip_info.reset(); + } + + progress.group_snapshots = snapshots.len() as u64; + + let target_snapshots = fetch_target_snapshots(params, &target_namespace, group).await?; + let target_snapshots: Vec = target_snapshots + .into_iter() + .map(|snapshot| snapshot.backup) + .collect(); + + let mut stats = SyncStats::default(); + let mut fetch_previous_manifest = !target_snapshots.is_empty(); + for (pos, source_snapshot) in snapshots.into_iter().enumerate() { + if target_snapshots.contains(&source_snapshot) { + progress.done_snapshots = pos as u64 + 1; + info!("percentage done: {progress}"); + continue; + } + let result = + push_snapshot(params, namespace, &source_snapshot, fetch_previous_manifest).await; + fetch_previous_manifest = true; + + progress.done_snapshots = pos as u64 + 1; + info!("percentage done: {progress}"); + + // stop on error + let sync_stats = result?; + stats.add(sync_stats); + } + + if params.remove_vanished { + let target_snapshots = fetch_target_snapshots(params, &target_namespace, group).await?; + for snapshot in target_snapshots { + if source_snapshots.contains(&snapshot.backup.time) { + continue; + } + if snapshot.protected { + info!( + "don't delete vanished snapshot {name} (protected)", + name = snapshot.backup + ); + continue; + } + if let Err(err) = + forget_target_snapshot(params, &target_namespace, &snapshot.backup).await + { + info!( + "could not delete vanished snapshot {name} - {err}", + name = snapshot.backup + ); + } + info!("delete vanished snapshot {name}", name = snapshot.backup); + stats.add(SyncStats::from(RemovedVanishedStats { + snapshots: 1, + groups: 0, + namespaces: 0, + })); + } + } + + Ok(stats) +} + +/// Push snapshot to target +/// +/// Creates a new snapshot on the target and pushes the content of the source snapshot to the +/// target by creating a new manifest file and connecting to the remote as backup writer client. +/// Chunks are written by recreating the index by uploading the chunk stream as read from the +/// source. Data blobs are uploaded as such. +pub(crate) async fn push_snapshot( + params: &PushParameters, + namespace: &BackupNamespace, + snapshot: &BackupDir, + fetch_previous_manifest: bool, +) -> Result { + let mut stats = SyncStats::default(); + let target_ns = params.map_to_target(namespace)?; + let backup_dir = params + .source + .store + .backup_dir(namespace.clone(), snapshot.clone())?; + + // Reader locks the snapshot + let reader = params.source.reader(namespace, snapshot).await?; + + // Does not lock the manifest, but the reader already assures a locked snapshot + let source_manifest = match backup_dir.load_manifest() { + Ok((manifest, _raw_size)) => manifest, + Err(err) => { + // No manifest in snapshot or failed to read, warn and skip + log::warn!("failed to load manifest - {err}"); + return Ok(stats); + } + }; + + // Writer instance locks the snapshot on the remote side + let backup_writer = BackupWriter::start( + ¶ms.target.client, + None, + params.target.repo.store(), + &target_ns, + snapshot, + false, + false, + ) + .await?; + + let mut previous_manifest = None; + // Use manifest of previous snapshots in group on target for chunk upload deduplication + if fetch_previous_manifest { + match backup_writer.download_previous_manifest().await { + Ok(manifest) => previous_manifest = Some(Arc::new(manifest)), + Err(err) => log::info!("Could not download previous manifest - {err}"), + } + }; + + // Dummy upload options: the actual compression and/or encryption already happened while + // the chunks were generated during creation of the backup snapshot, therefore pre-existing + // chunks (already compressed and/or encrypted) can be pushed to the target. + // Further, these steps are skipped in the backup writer upload stream. + // + // Therefore, these values do not need to fit the values given in the manifest. + // The original manifest is uploaded in the end anyways. + // + // Compression is set to true so that the uploaded manifest will be compressed. + // Encrypt is set to assure that above files are not encrypted. + let upload_options = UploadOptions { + compress: true, + encrypt: false, + previous_manifest, + ..UploadOptions::default() + }; + + // Avoid double upload penalty by remembering already seen chunks + let known_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024 * 1024))); + + for entry in source_manifest.files() { + let mut path = backup_dir.full_path(); + path.push(&entry.filename); + if path.try_exists()? { + match ArchiveType::from_path(&entry.filename)? { + ArchiveType::Blob => { + let file = std::fs::File::open(path.clone())?; + let backup_stats = backup_writer.upload_blob(file, &entry.filename).await?; + stats.add(SyncStats { + chunk_count: backup_stats.chunk_count as usize, + bytes: backup_stats.size as usize, + elapsed: backup_stats.duration, + removed: None, + }); + } + ArchiveType::DynamicIndex => { + if let Some(manifest) = upload_options.previous_manifest.as_ref() { + // Add known chunks, ignore errors since archive might not be present + let _res = backup_writer + .download_previous_dynamic_index( + &entry.filename, + manifest, + known_chunks.clone(), + ) + .await; + } + let index = DynamicIndexReader::open(&path)?; + let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); + let sync_stats = push_index( + &entry.filename, + index, + chunk_reader, + &backup_writer, + None, + known_chunks.clone(), + ) + .await?; + stats.add(sync_stats); + } + ArchiveType::FixedIndex => { + if let Some(manifest) = upload_options.previous_manifest.as_ref() { + // Add known chunks, ignore errors since archive might not be present + let _res = backup_writer + .download_previous_fixed_index( + &entry.filename, + manifest, + known_chunks.clone(), + ) + .await; + } + let index = FixedIndexReader::open(&path)?; + let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); + let size = index.index_bytes(); + let sync_stats = push_index( + &entry.filename, + index, + chunk_reader, + &backup_writer, + Some(size), + known_chunks.clone(), + ) + .await?; + stats.add(sync_stats); + } + } + } else { + warn!("{path:?} does not exist, skipped."); + } + } + + // Fetch client log from source and push to target + // this has to be handled individually since the log is never part of the manifest + let mut client_log_path = backup_dir.full_path(); + client_log_path.push(CLIENT_LOG_BLOB_NAME); + if client_log_path.is_file() { + backup_writer + .upload_blob_from_file( + &client_log_path, + CLIENT_LOG_BLOB_NAME, + upload_options.clone(), + ) + .await?; + } + + // Rewrite manifest for pushed snapshot, recreating manifest from source on target + let manifest_json = serde_json::to_value(source_manifest)?; + let manifest_string = serde_json::to_string_pretty(&manifest_json)?; + let backup_stats = backup_writer + .upload_blob_from_data( + manifest_string.into_bytes(), + MANIFEST_BLOB_NAME, + upload_options, + ) + .await?; + backup_writer.finish().await?; + + stats.add(SyncStats { + chunk_count: backup_stats.chunk_count as usize, + bytes: backup_stats.size as usize, + elapsed: backup_stats.duration, + removed: None, + }); + + Ok(stats) +} + +// Read fixed or dynamic index and push to target by uploading via the backup writer instance +// +// For fixed indexes, the size must be provided as given by the index reader. +#[allow(clippy::too_many_arguments)] +async fn push_index<'a>( + filename: &'a str, + index: impl IndexFile + Send + 'static, + chunk_reader: Arc, + backup_writer: &BackupWriter, + size: Option, + known_chunks: Arc>>, +) -> Result { + let (upload_channel_tx, upload_channel_rx) = mpsc::channel(20); + let mut chunk_infos = + stream::iter(0..index.index_count()).map(move |pos| index.chunk_info(pos).unwrap()); + + tokio::spawn(async move { + while let Some(chunk_info) = chunk_infos.next().await { + // Avoid reading known chunks, as they are not uploaded by the backup writer anyways + let needs_upload = { + // Need to limit the scope of the lock, otherwise the async block is not `Send` + let mut known_chunks = known_chunks.lock().unwrap(); + // Check if present and insert, chunk will be read and uploaded below if not present + known_chunks.insert(chunk_info.digest) + }; + + let merged_chunk_info = if needs_upload { + chunk_reader + .read_raw_chunk(&chunk_info.digest) + .await + .map(|chunk| { + MergedChunkInfo::New(ChunkInfo { + chunk, + digest: chunk_info.digest, + chunk_len: chunk_info.size(), + offset: chunk_info.range.start, + }) + }) + } else { + Ok(MergedChunkInfo::Known(vec![( + // Pass size instead of offset, will be replaced with offset by the backup + // writer + chunk_info.size(), + chunk_info.digest, + )])) + }; + let _ = upload_channel_tx.send(merged_chunk_info).await; + } + }); + + let merged_chunk_info_stream = ReceiverStream::new(upload_channel_rx).map_err(Error::from); + + let upload_options = UploadOptions { + compress: true, + encrypt: false, + fixed_size: size, + ..UploadOptions::default() + }; + + let upload_stats = backup_writer + .upload_index_chunk_info(filename, merged_chunk_info_stream, upload_options) + .await?; + + Ok(SyncStats { + chunk_count: upload_stats.chunk_count as usize, + bytes: upload_stats.size as usize, + elapsed: upload_stats.duration, + removed: None, + }) +} -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:46:23 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:46:23 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 00/29] fix #3044: push datastore to remote target In-Reply-To: <20241031121519.434337-1-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> Message-ID: <8b75448e-efd7-4561-8aa3-92b3c8afe15a@proxmox.com> superseded-by version 7: https://lore.proxmox.com/pbs-devel/20241111154353.482734-1-c.ebner at proxmox.com/T/ From c.ebner at proxmox.com Mon Nov 11 16:43:49 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:49 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 27/31] ui: sync job: adapt edit window to be used for pull and push In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-28-c.ebner@proxmox.com> Switch the subject and labels to be shown based on the direction of the sync job, and set the `sync-direction` parameter from the submit values in case of push direction. Signed-off-by: Christian Ebner --- changes since version 6: - conditionally set sync-direction request param www/window/SyncJobEdit.js | 41 ++++++++++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 5 deletions(-) diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 9ca79eaa9..0e648e7b3 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -9,7 +9,7 @@ Ext.define('PBS.window.SyncJobEdit', { isAdd: true, - subject: gettext('Sync Job'), + subject: gettext('Sync Job - Pull Direction'), bodyPadding: 0, @@ -29,6 +29,30 @@ Ext.define('PBS.window.SyncJobEdit', { me.scheduleValue = id ? null : 'hourly'; me.authid = id ? null : Proxmox.UserName; me.editDatastore = me.datastore === undefined && me.isCreate; + + if (me.syncDirection === 'push') { + me.subject = gettext('Sync Job - Push Direction'); + me.syncDirectionPush = true; + me.syncRemoteLabel = gettext('Target Remote'); + me.syncRemoteDatastore = gettext('Target Datastore'); + me.syncRemoteNamespace = gettext('Target Namespace'); + me.syncLocalOwner = gettext('Local User'); + // Sync direction request parameter is only required for creating new jobs, + // for edit and delete it is derived from the job config given by it's id. + if (me.isCreate) { + me.extraRequestParams = { + "sync-direction": 'push', + }; + } + } else { + me.subject = gettext('Sync Job - Pull Direction'); + me.syncDirectionPush = false; + me.syncRemoteLabel = gettext('Source Remote'); + me.syncRemoteDatastore = gettext('Source Datastore'); + me.syncRemoteNamespace = gettext('Source Namespace'); + me.syncLocalOwner = gettext('Local Owner'); + } + return { }; }, @@ -118,10 +142,10 @@ Ext.define('PBS.window.SyncJobEdit', { }, }, { - fieldLabel: gettext('Local Owner'), xtype: 'pbsAuthidSelector', name: 'owner', cbind: { + fieldLabel: '{syncLocalOwner}', value: '{authid}', deleteEmpty: '{!isCreate}', }, @@ -151,6 +175,9 @@ Ext.define('PBS.window.SyncJobEdit', { xtype: 'radiogroup', fieldLabel: gettext('Location'), defaultType: 'radiofield', + cbind: { + disabled: '{syncDirectionPush}', + }, items: [ { boxLabel: 'Local', @@ -201,7 +228,9 @@ Ext.define('PBS.window.SyncJobEdit', { }, }, { - fieldLabel: gettext('Source Remote'), + cbind: { + fieldLabel: '{syncRemoteLabel}', + }, xtype: 'pbsRemoteSelector', allowBlank: false, name: 'remote', @@ -222,13 +251,13 @@ Ext.define('PBS.window.SyncJobEdit', { }, }, { - fieldLabel: gettext('Source Datastore'), xtype: 'pbsRemoteStoreSelector', allowBlank: false, autoSelect: false, name: 'remote-store', cbind: { datastore: '{datastore}', + fieldLabel: '{syncRemoteDatastore}', }, listeners: { change: function(field, value) { @@ -249,7 +278,9 @@ Ext.define('PBS.window.SyncJobEdit', { }, }, { - fieldLabel: gettext('Source Namespace'), + cbind: { + fieldLabel: '{syncRemoteNamespace}', + }, xtype: 'pbsRemoteNamespaceSelector', allowBlank: true, autoSelect: false, -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:48 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:48 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 26/31] ui: add view with separate grids for pull and push sync jobs In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-27-c.ebner@proxmox.com> Show sync jobs in pull and in push direction in two separate grids, visually separating them to limit possible misconfiguration. Signed-off-by: Christian Ebner --- changes since version 6: - Fix owner header label for sync direction push www/Makefile | 1 + www/config/SyncPullPushView.js | 61 ++++++++++++++++++++++++++++++++++ www/config/SyncView.js | 21 ++++++++++-- www/datastore/DataStoreList.js | 2 +- www/datastore/Panel.js | 2 +- 5 files changed, 83 insertions(+), 4 deletions(-) create mode 100644 www/config/SyncPullPushView.js diff --git a/www/Makefile b/www/Makefile index 609a0ba67..d35e81283 100644 --- a/www/Makefile +++ b/www/Makefile @@ -61,6 +61,7 @@ JSSRC= \ config/TrafficControlView.js \ config/ACLView.js \ config/SyncView.js \ + config/SyncPullPushView.js \ config/VerifyView.js \ config/PruneView.js \ config/GCView.js \ diff --git a/www/config/SyncPullPushView.js b/www/config/SyncPullPushView.js new file mode 100644 index 000000000..3460bc662 --- /dev/null +++ b/www/config/SyncPullPushView.js @@ -0,0 +1,61 @@ +Ext.define('PBS.config.SyncPullPush', { + extend: 'Ext.panel.Panel', + alias: 'widget.pbsSyncJobPullPushView', + title: gettext('Sync Jobs'), + + mixins: ['Proxmox.Mixin.CBind'], + + layout: { + type: 'vbox', + align: 'stretch', + multi: true, + bodyPadding: 5, + }, + defaults: { + collapsible: false, + margin: 5, + }, + scrollable: true, + items: [ + { + xtype: 'pbsSyncJobView', + itemId: 'syncJobsPull', + syncDirection: 'pull', + cbind: { + datastore: '{datastore}', + }, + minHeight: 125, // shows at least one line of content + }, + { + xtype: 'splitter', + performCollapse: false, + }, + { + xtype: 'pbsSyncJobView', + itemId: 'syncJobsPush', + syncDirection: 'push', + cbind: { + datastore: '{datastore}', + }, + flex: 1, + minHeight: 125, // shows at least one line of content + }, + ], + initComponent: function() { + let me = this; + + let subPanelIds = me.items.map(el => el.itemId).filter(id => !!id); + + me.callParent(); + + for (const itemId of subPanelIds) { + let component = me.getComponent(itemId); + component.relayEvents(me, ['activate', 'deactivate', 'destroy']); + } + }, + + cbindData: function(initialConfig) { + let me = this; + me.datastore = initialConfig.datastore ? initialConfig.datastore : undefined; + }, +}); diff --git a/www/config/SyncView.js b/www/config/SyncView.js index 4669a23e2..c1b8fddc2 100644 --- a/www/config/SyncView.js +++ b/www/config/SyncView.js @@ -25,11 +25,21 @@ Ext.define('pbs-sync-jobs-status', { Ext.define('PBS.config.SyncJobView', { extend: 'Ext.grid.GridPanel', alias: 'widget.pbsSyncJobView', + mixins: ['Proxmox.Mixin.CBind'], stateful: true, stateId: 'grid-sync-jobs-v1', - title: gettext('Sync Jobs'), + title: gettext('Sync Jobs - Pull Direction'), + ownerHeader: gettext('Owner'), + + cbindData: function(initialConfig) { + let me = this; + if (me.syncDirection === 'push') { + me.title = gettext('Sync Jobs - Push Direction'); + me.ownerHeader = gettext('Local User'); + } + }, controller: { xclass: 'Ext.app.ViewController', @@ -39,6 +49,7 @@ Ext.define('PBS.config.SyncJobView', { let view = me.getView(); Ext.create('PBS.window.SyncJobEdit', { datastore: view.datastore, + syncDirection: view.syncDirection, listeners: { destroy: function() { me.reload(); @@ -56,6 +67,7 @@ Ext.define('PBS.config.SyncJobView', { Ext.create('PBS.window.SyncJobEdit', { datastore: view.datastore, id: selection[0].data.id, + syncDirection: view.syncDirection, listeners: { destroy: function() { me.reload(); @@ -117,6 +129,9 @@ Ext.define('PBS.config.SyncJobView', { if (view.datastore !== undefined) { params.store = view.datastore; } + if (view.syncDirection !== undefined) { + params["sync-direction"] = view.syncDirection; + } view.getStore().rstore.getProxy().setExtraParams(params); Proxmox.Utils.monStoreErrors(view, view.getStore().rstore); }, @@ -230,7 +245,9 @@ Ext.define('PBS.config.SyncJobView', { sortable: true, }, { - header: gettext('Owner'), + cbind: { + header: '{ownerHeader}', + }, dataIndex: 'owner', renderer: 'render_optional_owner', flex: 2, diff --git a/www/datastore/DataStoreList.js b/www/datastore/DataStoreList.js index fc68cfc10..22ef18540 100644 --- a/www/datastore/DataStoreList.js +++ b/www/datastore/DataStoreList.js @@ -239,7 +239,7 @@ Ext.define('PBS.datastore.DataStores', { { iconCls: 'fa fa-refresh', itemId: 'syncjobs', - xtype: 'pbsSyncJobView', + xtype: 'pbsSyncJobPullPushView', }, { iconCls: 'fa fa-check-circle', diff --git a/www/datastore/Panel.js b/www/datastore/Panel.js index ad9fc10fe..e1da7cfac 100644 --- a/www/datastore/Panel.js +++ b/www/datastore/Panel.js @@ -68,7 +68,7 @@ Ext.define('PBS.DataStorePanel', { { iconCls: 'fa fa-refresh', itemId: 'syncjobs', - xtype: 'pbsSyncJobView', + xtype: 'pbsSyncJobPullPushView', cbind: { datastore: '{datastore}', }, -- 2.39.5 From c.ebner at proxmox.com Mon Nov 11 16:43:52 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 11 Nov 2024 16:43:52 +0100 Subject: [pbs-devel] [PATCH v7 proxmox-backup 30/31] api: version: add 'prune-delete-stats' as supported feature In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <20241111154353.482734-31-c.ebner@proxmox.com> Expose the 'prune-delete-stats' as supported feature, in order for the sync job in pull direction to pass the optional `error-on-protected=false` flag to the api calls when pruning backup snapshots, groups or namespaces. --- changes since version 6: - no changes src/api2/version.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api2/version.rs b/src/api2/version.rs index a6cec5216..da2cb74b4 100644 --- a/src/api2/version.rs +++ b/src/api2/version.rs @@ -8,7 +8,7 @@ use proxmox_schema::api; use pbs_api_types::ApiVersionInfo; -const FEATURES: &'static [&'static str] = &[]; +const FEATURES: &[&str] = &["prune-delete-stats"]; #[api( returns: { -- 2.39.5 From t.lamprecht at proxmox.com Mon Nov 11 23:02:50 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 11 Nov 2024 23:02:50 +0100 Subject: [pbs-devel] partially-applied: [pve-devel] [PATCH many v3 00/14] notifications: add support for webhook endpoints In-Reply-To: <20241108144124.273550-1-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> Message-ID: Am 08.11.24 um 15:41 schrieb Lukas Wagner: > This series adds support for webhook notification targets to PVE > and PBS. > > A webhook is a HTTP API route provided by a third-party service that > can be used to inform the third-party about an event. In our case, > we can easily interact with various third-party notification/messaging > systems and send PVE/PBS notifications via this service. > The changes were tested against ntfy.sh, Discord and Slack. > > The configuration of webhook targets allows one to configure: > - The URL > - The HTTP method (GET/POST/PUT) > - HTTP Headers > - Body > > One can use handlebar templating to inject notification text and metadata > in the url, headers and body. > > One challenge is the handling of sensitve tokens and other secrets. > Since the endpoint is completely generic, we cannot know in advance > whether the body/header/url contains sensitive values. > Thus we add 'secrets' which are stored in the protected config only > accessible by root (e.g. /etc/pve/priv/notifications.cfg). These > secrets are accessible in URLs/headers/body via templating: > > Url: https://example.com/{{ secrets.token }} > > Secrets can only be set and updated, but never retrieved via the API. > In the UI, secrets are handled like other secret tokens/passwords. > > Bumps for PVE: > - libpve-rs-perl needs proxmox-notify bumped > - pve-manager needs proxmox-widget-toolkit and libpve-rs-perl bumped > - proxmox-mail-forward needs proxmox-notify bumped > > Bumps for PBS: > - proxmox-backup needs proxmox-notify bumped > - proxmox-mail-forward needs proxmox-notify bumped > > > Changes v1 -> v2: > - Rebase proxmox-notify changes > > Changes v2 -> v3: > - Fix utf8 -> base64 encoding bug (thx @ Stefan) > - Fix bug that allowed one to save a target with an empty header > value when updating the target > - Additional UI-side input validation (e.g. target name, URL) > - Code documentation improvments > - Mask secrets in errors returned from the proxmox-notify crate, hopefully > preventing them to be shown in logs or error messages > - Rebased on the latest master branches > > proxmox: > > Lukas Wagner (3): > notify: renderer: adapt to changes in proxmox-time > notify: implement webhook targets > notify: add api for webhook targets > > proxmox-notify/Cargo.toml | 9 +- > proxmox-notify/src/api/mod.rs | 20 + > proxmox-notify/src/api/webhook.rs | 432 +++++++++++++++++++ > proxmox-notify/src/config.rs | 23 + > proxmox-notify/src/endpoints/mod.rs | 2 + > proxmox-notify/src/endpoints/webhook.rs | 550 ++++++++++++++++++++++++ > proxmox-notify/src/lib.rs | 17 + > proxmox-notify/src/renderer/mod.rs | 4 +- > 8 files changed, 1052 insertions(+), 5 deletions(-) > create mode 100644 proxmox-notify/src/api/webhook.rs > create mode 100644 proxmox-notify/src/endpoints/webhook.rs > > > proxmox-perl-rs: > > Lukas Wagner (2): > common: notify: add bindings for webhook API routes > common: notify: add bindings for get_targets > > common/src/notify.rs | 72 ++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 72 insertions(+) > > > proxmox-widget-toolkit: > > Gabriel Goller (1): > utils: add base64 conversion helper > > Lukas Wagner (1): > notification: add UI for adding/updating webhook targets > > src/Makefile | 1 + > src/Schema.js | 5 + > src/Utils.js | 38 +++ > src/panel/WebhookEditPanel.js | 424 ++++++++++++++++++++++++++++++++++ > 4 files changed, 468 insertions(+) > create mode 100644 src/panel/WebhookEditPanel.js > > > pve-manager: > > Lukas Wagner (2): > api: notifications: use get_targets impl from proxmox-notify > api: add routes for webhook notification endpoints > > PVE/API2/Cluster/Notifications.pm | 297 ++++++++++++++++++++++++++---- > 1 file changed, 263 insertions(+), 34 deletions(-) > > > pve-docs: > > Lukas Wagner (1): > notification: add documentation for webhook target endpoints. > > notifications.adoc | 93 ++++++++++++++++++++++++++++++++++++++++++++++ > 1 file changed, 93 insertions(+) > > applied the common and PVE part of this series, thanks! From t.lamprecht at proxmox.com Mon Nov 11 23:09:29 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 11 Nov 2024 23:09:29 +0100 Subject: [pbs-devel] [pve-devel] [PATCH widget-toolkit v3 07/14] notification: add UI for adding/updating webhook targets In-Reply-To: <20241108144124.273550-8-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> <20241108144124.273550-8-l.wagner@proxmox.com> Message-ID: Am 08.11.24 um 15:41 schrieb Lukas Wagner: > The widgets for editing the headers/secrets were adapted from > the 'Tag Edit' dialog from PVE's datacenter options. > > Apart from that, the new dialog is rather standard. I've decided > to put the http method and url in a single row, mostly to > save space and also to make it analogous to how an actual http request > is structured (VERB URL, followed by headers, followed by the body). > > The secrets are a mechanism to store tokens/passwords in the > protected notification config. Secrets are accessible via > templating in the URL, headers and body via {{ secrets.NAME }}. > Secrets can only be set/updated, but not retrieved/displayed. > I re-checked this now with a bit more time and while it works fine I think there can be still some UX enhancements: - Move the "Add" buttons below the grids, like we do for most other such UIs (like e.g. in the "Match Rules" tab of the Matcher Add/Edit window). You probably need to change the layout a bit to keep the label on top; might want to ask Dominik for how to best achieve that. Maybe include what is added in the button text, like "Add Header" or "Add Secret" - always show an emptyText for the key/value fields to better convey what is what, alternatively show colum headers, but that takes more space. - improve validity checking for header/secret fields, without clicking in them no invalid/required border is shown, but the overall form state is invalid, thus disabling the window's Add button. This can be confusing if no field is marked as invalid. From t.lamprecht at proxmox.com Mon Nov 11 23:56:08 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 11 Nov 2024 23:56:08 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup v6 1/2] fix #5439: allow to reuse existing datastore In-Reply-To: <20240829125844.290208-1-g.goller@proxmox.com> References: <20240829125844.290208-1-g.goller@proxmox.com> Message-ID: Am 29.08.24 um 14:58 schrieb Gabriel Goller: > Disallow creating datastores in non-empty directories. Allow adding > existing datastores via a 'reuse-datastore' checkmark. This only checks > if all the necessary directories (.chunks + subdirectories and .lock) > exist and have the correct permissions. Note that the reuse-datastore > path does not open the datastore, so that we don't drop the > ProcessLocker of an existing datastore. > > Signed-off-by: Gabriel Goller > --- > > v6, thanks @Wolfgang > - iterate over directories with loop > - make `chunk_dir_accessible` not pub > - remove unnecessary clone > > v5, thanks @Wolfgang: > - remove unnecessary call to `chunk_dir_accessible` > - match on exact permissions > - remove unused worker reference > > v4, thanks @Thomas: > - move reuse-datastore checkbox to "advanced options" > > v3, thanks @Fabian: > - don't open chunkstore on existing datastore, as this drops the > previous ProcessLocker > - factor out `ChunkStore::open` checks and call them in reuse-datastore > path as well > > v2, thanks @Fabian: > - also check on frontend for root > - forbid datastore creation if dir not empty > - add reuse-datastore option > - verify chunkstore directories permissions and owners > > pbs-datastore/src/chunk_store.rs | 73 ++++++++++++++++++++++++++++---- > src/api2/config/datastore.rs | 45 +++++++++++++++----- > src/api2/node/disks/directory.rs | 4 +- > src/api2/node/disks/zfs.rs | 4 +- > 4 files changed, 105 insertions(+), 21 deletions(-) > > applied series, thanks! From t.lamprecht at proxmox.com Tue Nov 12 00:01:10 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 00:01:10 +0100 Subject: [pbs-devel] partially-applied: [PATCH proxmox-backup/pwt v4 00/16] fix #5379: introduce default auth realm option In-Reply-To: <20240823110737.831304-1-c.heiss@proxmox.com> References: <20240823110737.831304-1-c.heiss@proxmox.com> Message-ID: Am 23.08.24 um 13:07 schrieb Christoph Heiss: > Fixes #5379 [0]. > > First, it adds an updatable `default` field to all existing editable > realms. Then it converts the PAM and PBS built-in realms to proper > realms, instead of being hard-coded in-between somewhere. > In turns this enables editing of these realms, allowing setting whether > these realms should be the default for login or not. > > For proxmox-widget-toolkit, the first three patches could in principal > be applied on their own. The others depend on the API changes as > introduced in the proxmox-backup part. > > W.r.t. the inconsistency as discovered/discussed in [1], the (current) > behaviour is not changed in this series. Since both PVE and PBS use the > same realm login dialog from proxmox-widget-toolkit, I'd rather fix it > separately -- to avoid blocking this series on a completely separate > issue, which might still need some discussing. > > W.r.t. to applying, proxmox-backup will need a bump of > proxmox-widget-toolkit afterwards. > > [0] https://bugzilla.proxmox.com/show_bug.cgi?id=5379 > [1] https://lists.proxmox.com/pipermail/pbs-devel/2024-August/010429.html > > History > ======= > > v3: https://lists.proxmox.com/pipermail/pbs-devel/2024-August/010605.html > v2: https://lists.proxmox.com/pipermail/pbs-devel/2024-August/010521.html > v1: https://lists.proxmox.com/pipermail/pbs-devel/2024-July/010250.html > > Changes v3 -> v4: > * added proper PAM/PBS realm documentation > * reworked SimpleRealmInputPanel properties (thanks Gabriel & Hannes!) > > Changes v2 -> v3: > * rebased on latest master > * fixed unsetting the `default` property by making it deletable > * unset previous default realm when creating a new realm with > `default` set > > Changes v1 -> v2: > * rebased on latest master > * trivial fixes as suggested by Lukas > * add documentation to unset_default_realm() > > Diffstat > ======== > > proxmox-widget-toolkit: > > Christoph Heiss (7): > window: AuthEditBase: include more information in thrown errors > panel: AuthView: make `useTypeInUrl` property per-realm > panel: AuthView: use help link from schema if set > window: add panel for editing simple, built-in realms > schema: make PAM realm editable using new AuthSimple panel > fix #5379: panel: AuthView: add column displaying whether the realm is > default > fix #5379: window: AuthEdit{LDAP,OpenId}: add 'Default realm' checkbox > > src/Makefile | 1 + > src/Schema.js | 8 +++++++- > src/panel/AuthView.js | 16 ++++++++++++--- > src/window/AuthEditBase.js | 8 ++++---- > src/window/AuthEditLDAP.js | 14 ++++++++++++- > src/window/AuthEditOpenId.js | 13 ++++++++++++ > src/window/AuthEditSimple.js | 40 ++++++++++++++++++++++++++++++++++++ > 7 files changed, 91 insertions(+), 9 deletions(-) > create mode 100644 src/window/AuthEditSimple.js > applied the widget-toolkit patches for now, thanks! would be nice to get somebody else to test the rest once more From c.ebner at proxmox.com Tue Nov 12 11:43:13 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 12 Nov 2024 11:43:13 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 1/4] api-types: implement dedicated api type for match patterns In-Reply-To: <20241112104316.206282-1-c.ebner@proxmox.com> References: <20241112104316.206282-1-c.ebner@proxmox.com> Message-ID: <20241112104316.206282-2-c.ebner@proxmox.com> Introduces a dedicated api type `PathPattern` and the corresponding format and input validation schema. Further, add a `PathPatterns` type for collections of path patterns and implement required traits to be able to replace currently defined api parameters. In preparation for using this common api type for all api endpoints exposing a match pattern parameter. Signed-off-by: Christian Ebner --- changes since version 5: - no changes pbs-api-types/src/lib.rs | 3 ++ pbs-api-types/src/pathpatterns.rs | 55 +++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) create mode 100644 pbs-api-types/src/pathpatterns.rs diff --git a/pbs-api-types/src/lib.rs b/pbs-api-types/src/lib.rs index 460c7da7c..75dc42407 100644 --- a/pbs-api-types/src/lib.rs +++ b/pbs-api-types/src/lib.rs @@ -143,6 +143,9 @@ pub use ad::*; mod remote; pub use remote::*; +mod pathpatterns; +pub use pathpatterns::*; + mod tape; pub use tape::*; diff --git a/pbs-api-types/src/pathpatterns.rs b/pbs-api-types/src/pathpatterns.rs new file mode 100644 index 000000000..c40926a44 --- /dev/null +++ b/pbs-api-types/src/pathpatterns.rs @@ -0,0 +1,55 @@ +use proxmox_schema::{const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema}; + +use serde::{Deserialize, Serialize}; + +const_regex! { + pub PATH_PATTERN_REGEX = concat!(r"^.+[^\\]$"); +} + +pub const PATH_PATTERN_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&PATH_PATTERN_REGEX); + +pub const PATH_PATTERN_SCHEMA: Schema = + StringSchema::new("Path or match pattern for matching filenames.") + .format(&PATH_PATTERN_FORMAT) + .schema(); + +pub const PATH_PATTERN_LIST_SCHEMA: Schema = ArraySchema::new( + "List of paths or match patterns for matching filenames.", + &PATH_PATTERN_SCHEMA, +) +.schema(); + +#[derive(Default, Deserialize, Serialize)] +/// Path or path pattern for filename matching +pub struct PathPattern { + pattern: String, +} + +impl ApiType for PathPattern { + const API_SCHEMA: Schema = PATH_PATTERN_SCHEMA; +} + +impl AsRef<[u8]> for PathPattern { + fn as_ref(&self) -> &[u8] { + self.pattern.as_bytes() + } +} + +#[derive(Default, Deserialize, Serialize)] +/// Array of paths and/or path patterns for filename matching +pub struct PathPatterns { + patterns: Vec, +} + +impl ApiType for PathPatterns { + const API_SCHEMA: Schema = PATH_PATTERN_LIST_SCHEMA; +} + +impl IntoIterator for PathPatterns { + type Item = PathPattern; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.patterns.into_iter() + } +} -- 2.39.5 From c.ebner at proxmox.com Tue Nov 12 11:43:12 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 12 Nov 2024 11:43:12 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 0/4] fix #2996: client: allow optional match patterns for restore Message-ID: <20241112104316.206282-1-c.ebner@proxmox.com> This patches implement the api types to allow input validation for pathpatterns and reuse them in the pxar-bin, the catalog shell as well as the newly exposed optional restore patterns to the backup clients restore command. Patterns are parsed and passed along to the preexisting restore logic via the `PxarExtractOptions`. To correctly work also with split pxar archives, this patches depend on the following patch being applied to the pxar repo first: https://lore.proxmox.com/pbs-devel/20240918150047.485551-1-c.ebner at proxmox.com/ changes since version 5: - rebased onto current master changes since version 4: - rebased onto current master - fixed passing patterns via cli for pxar extract changes since version 3: - s/matches/patterns for bail message, thanks for testing and catching this Gabriel! changes since version 2: - added API types as suggested - reuse same API types for proxmox-backup-client catalog shell and restore as well as the pxar extract - use simple reference instead of `as_slice()` when passing vector of patterns Link to bugtracker issue: https://bugzilla.proxmox.com/show_bug.cgi?id=2996 Christian Ebner (4): api-types: implement dedicated api type for match patterns pxar: bin: use dedicated api type for restore pattern client: catalog shell: use dedicated api type for patterns fix #2996: client: allow optional match patterns for restore pbs-api-types/src/lib.rs | 3 ++ pbs-api-types/src/pathpatterns.rs | 55 +++++++++++++++++++++++++++++++ pbs-client/src/catalog_shell.rs | 7 ++-- proxmox-backup-client/src/main.rs | 29 +++++++++++++--- pxar-bin/Cargo.toml | 1 + pxar-bin/src/main.rs | 26 +++++++-------- 6 files changed, 99 insertions(+), 22 deletions(-) create mode 100644 pbs-api-types/src/pathpatterns.rs -- 2.39.5 From c.ebner at proxmox.com Tue Nov 12 11:43:16 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 12 Nov 2024 11:43:16 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 4/4] fix #2996: client: allow optional match patterns for restore In-Reply-To: <20241112104316.206282-1-c.ebner@proxmox.com> References: <20241112104316.206282-1-c.ebner@proxmox.com> Message-ID: <20241112104316.206282-5-c.ebner@proxmox.com> When the user is only interested in a subset of the entries stored in a file-level backup, it is convenient to be able to provide a list of match patterns for the entries intended to be restored. The required restore logic is already in place. Therefore, expose it for the `proxmox-backup-client restore` command by adding the optional array of patterns as command line argument and parse these before passing them via the pxar restore options to the archive extractor. Link to bugtracker issue: https://bugzilla.proxmox.com/show_bug.cgi?id=2996 Signed-off-by: Christian Ebner --- changes since version 5: - no changes proxmox-backup-client/src/main.rs | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index e4034aa99..817235dbe 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -26,9 +26,9 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, ClientRateLimitConfig, - CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, - SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, + CryptMode, Fingerprint, GroupListItem, PathPatterns, PruneJobOptions, PruneListItem, + RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, + BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, }; use pbs_client::catalog_shell::Shell; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; @@ -1394,6 +1394,10 @@ We do not extract '.pxar' archives when writing to standard output. type: ClientRateLimitConfig, flatten: true, }, + pattern: { + type: PathPatterns, + optional: true, + }, "allow-existing-dirs": { type: Boolean, description: "Do not fail if directories already exists.", @@ -1503,6 +1507,21 @@ async fn restore( let target = json::required_string_param(¶m, "target")?; let target = if target == "-" { None } else { Some(target) }; + let mut match_list = Vec::new(); + if let Some(pattern) = param["pattern"].as_array() { + if target.is_none() { + bail!("patterns not allowed when restoring to stdout"); + } + + for p in pattern { + if let Some(pattern) = p.as_str() { + let match_entry = + MatchEntry::parse_pattern(pattern, PatternFlag::PATH_NAME, MatchType::Include)?; + match_list.push(match_entry); + } + } + }; + let crypto = crypto_parameters(¶m)?; let crypt_config = match crypto.enc_key { @@ -1622,8 +1641,8 @@ async fn restore( let prelude_path = param["prelude-target"].as_str().map(PathBuf::from); let options = pbs_client::pxar::PxarExtractOptions { - match_list: &[], - extract_match_default: true, + match_list: &match_list, + extract_match_default: match_list.is_empty(), allow_existing_dirs, overwrite_flags, on_error, -- 2.39.5 From c.ebner at proxmox.com Tue Nov 12 11:43:14 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 12 Nov 2024 11:43:14 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 2/4] pxar: bin: use dedicated api type for restore pattern In-Reply-To: <20241112104316.206282-1-c.ebner@proxmox.com> References: <20241112104316.206282-1-c.ebner@proxmox.com> Message-ID: <20241112104316.206282-3-c.ebner@proxmox.com> Instead of taking a plain string as input parameter, use the corresponding api type performing additional input validation. Signed-off-by: Christian Ebner --- changes since version 5: - rebased to current master pxar-bin/Cargo.toml | 1 + pxar-bin/src/main.rs | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/pxar-bin/Cargo.toml b/pxar-bin/Cargo.toml index d0d7ab24d..37c980e28 100644 --- a/pxar-bin/Cargo.toml +++ b/pxar-bin/Cargo.toml @@ -25,5 +25,6 @@ proxmox-router = { workspace = true, features = ["cli", "server"] } proxmox-schema = { workspace = true, features = [ "api-macro" ] } proxmox-sys.workspace = true +pbs-api-types.workspace = true pbs-client.workspace = true pbs-pxar-fuse.workspace = true diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs index 2fc0d1bb5..eb3580d92 100644 --- a/pxar-bin/src/main.rs +++ b/pxar-bin/src/main.rs @@ -9,9 +9,11 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; use futures::future::FutureExt; use futures::select; +use serde_json::Value; use tokio::signal::unix::{signal, SignalKind}; use pathpatterns::{MatchEntry, MatchType, PatternFlag}; +use pbs_api_types::PathPatterns; use pbs_client::pxar::tools::format_single_line_entry; use pbs_client::pxar::{ Flags, OverwriteFlags, PxarExtractOptions, PxarWriters, ENCODER_MAX_ENTRIES, @@ -53,12 +55,7 @@ fn extract_archive_from_reader( description: "Archive name.", }, pattern: { - description: "List of paths or pattern matching files to restore", - type: Array, - items: { - type: String, - description: "Path or pattern matching files to restore.", - }, + type: PathPatterns, optional: true, }, target: { @@ -144,7 +141,6 @@ fn extract_archive_from_reader( #[allow(clippy::too_many_arguments)] fn extract_archive( archive: String, - pattern: Option>, target: Option, no_xattrs: bool, no_fcaps: bool, @@ -161,6 +157,7 @@ fn extract_archive( strict: bool, payload_input: Option, prelude_target: Option, + param: Value, ) -> Result<(), Error> { let mut feature_flags = Flags::DEFAULT; if no_xattrs { @@ -190,7 +187,6 @@ fn extract_archive( overwrite_flags.insert(OverwriteFlags::all()); } - let pattern = pattern.unwrap_or_default(); let target = target.as_ref().map_or_else(|| ".", String::as_str); let mut match_list = Vec::new(); @@ -204,11 +200,15 @@ fn extract_archive( } } - for entry in pattern { - match_list.push( - MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Include) - .map_err(|err| format_err!("error in pattern: {}", err))?, - ); + if let Some(pattern) = param["pattern"].as_array() { + for p in pattern { + if let Some(entry) = p.as_str() { + match_list.push( + MatchEntry::parse_pattern(entry, PatternFlag::PATH_NAME, MatchType::Include) + .map_err(|err| format_err!("error in pattern: {err}"))?, + ); + } + } } let extract_match_default = match_list.is_empty(); -- 2.39.5 From c.ebner at proxmox.com Tue Nov 12 11:43:15 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 12 Nov 2024 11:43:15 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 3/4] client: catalog shell: use dedicated api type for patterns In-Reply-To: <20241112104316.206282-1-c.ebner@proxmox.com> References: <20241112104316.206282-1-c.ebner@proxmox.com> Message-ID: <20241112104316.206282-4-c.ebner@proxmox.com> Use the common api type with schema based input validation for all match pattern parameters exposed via the api macro. Signed-off-by: Christian Ebner --- changes since version 5: - no changes pbs-client/src/catalog_shell.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pbs-client/src/catalog_shell.rs b/pbs-client/src/catalog_shell.rs index 8c8e9a654..7e69970fe 100644 --- a/pbs-client/src/catalog_shell.rs +++ b/pbs-client/src/catalog_shell.rs @@ -14,6 +14,7 @@ use nix::fcntl::OFlag; use nix::sys::stat::Mode; use pathpatterns::{MatchEntry, MatchList, MatchPattern, MatchType, PatternFlag}; +use pbs_api_types::PathPattern; use proxmox_router::cli::{self, CliCommand, CliCommandMap, CliHelper, CommandLineInterface}; use proxmox_schema::api; use proxmox_sys::fs::{create_path, CreateOptions}; @@ -240,8 +241,7 @@ async fn list_selected_command(patterns: bool) -> Result<(), Error> { input: { properties: { pattern: { - type: String, - description: "Match pattern for matching files in the catalog." + type: PathPattern, }, select: { type: bool, @@ -282,9 +282,8 @@ async fn restore_selected_command(target: String) -> Result<(), Error> { description: "target path for restore on local filesystem." }, pattern: { - type: String, + type: PathPattern, optional: true, - description: "match pattern to limit files for restore." } } } -- 2.39.5 From c.ebner at proxmox.com Tue Nov 12 11:45:14 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 12 Nov 2024 11:45:14 +0100 Subject: [pbs-devel] [PATCH v5 proxmox-backup 0/4] fix #2996: client: allow optional match patterns for restore In-Reply-To: <20240918152716.511337-1-c.ebner@proxmox.com> References: <20240918152716.511337-1-c.ebner@proxmox.com> Message-ID: <0012c1bb-3c65-424e-b42c-30bd7424c373@proxmox.com> superseded-by version 6: https://lore.proxmox.com/pbs-devel/20241112104316.206282-1-c.ebner at proxmox.com/T/ From t.lamprecht at proxmox.com Tue Nov 12 13:56:17 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 13:56:17 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup] api: tape: add permission to move_tape endpoint In-Reply-To: <20241106104512.41479-1-h.laimer@proxmox.com> References: <20241106104512.41479-1-h.laimer@proxmox.com> Message-ID: Am 06.11.24 um 11:45 schrieb Hannes Laimer: > ... so it is usable by non-root users, this came up in support. > > Signed-off-by: Hannes Laimer > --- > This came up in enterprise support, but it also makes sense generally > > src/api2/tape/media.rs | 6 +++++- > 1 file changed, 5 insertions(+), 1 deletion(-) > > applied, thanks! From t.lamprecht at proxmox.com Tue Nov 12 21:07:47 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 21:07:47 +0100 Subject: [pbs-devel] applied: [PATCH proxmox] systemd: debcargo: add libsystemd-dev to dependencies In-Reply-To: <20240823121132.300298-1-m.sandoval@proxmox.com> References: <20240823121132.300298-1-m.sandoval@proxmox.com> Message-ID: Am 23.08.24 um 14:11 schrieb Maximiliano Sandoval: > `Build-Depends` and `Depends` in d/control are missing `libsystemd-dev`, > resulting in mk-build-deps not being able to install all dependencies > needed by `make deb`. > > After running `make deb` the control file looks: > > ```diff > modified proxmox-systemd/debian/control > @@ -6,7 +6,8 @@ Build-Depends: debhelper (>= 12), > cargo:native , > rustc:native , > libstd-rust-dev , > - librust-libc-0.2+default-dev (>= 0.2.107-~~) > + librust-libc-0.2+default-dev (>= 0.2.107-~~) , > + libsystemd-dev > Maintainer: Proxmox Support Team > Standards-Version: 4.6.2 > Vcs-Git: git://git.proxmox.com/git/proxmox.git > @@ -19,7 +20,8 @@ Architecture: any > Multi-Arch: same > Depends: > ${misc:Depends}, > - librust-libc-0.2+default-dev (>= 0.2.107-~~) > + librust-libc-0.2+default-dev (>= 0.2.107-~~), > + libsystemd-dev > Provides: > librust-proxmox-systemd+default-dev (= ${binary:Version}), > librust-proxmox-systemd-0-dev (= ${binary:Version}), > ``` > > Suggested-by: Wolfgang Bumiller > Signed-off-by: Maximiliano Sandoval > --- > proxmox-systemd/debian/control | 6 ++++-- > proxmox-systemd/debian/debcargo.toml | 3 +++ > 2 files changed, 7 insertions(+), 2 deletions(-) > > this was applied by wolfgang a while ago, thanks! From t.lamprecht at proxmox.com Tue Nov 12 21:09:09 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 21:09:09 +0100 Subject: [pbs-devel] applied: [PATCH v3 proxmox-backup] client: catalog shell: avoid navigating below archive root In-Reply-To: <20240903123946.332720-1-c.ebner@proxmox.com> References: <20240903123946.332720-1-c.ebner@proxmox.com> Message-ID: Am 03.09.24 um 14:39 schrieb Christian Ebner: > Avoid to underflow the catalogs shell position stack by navigating > below the archives root directory into the catalog root. Otherwise > the shell will panic, as the root entry is always expected to be > present. > > This threats the archive root directory as being it's own parent > directory, mimicking the behaviour of most common shells. > > Signed-off-by: Christian Ebner > --- > Encountered while implementing the catalog shell for the split pxar > archive case. > > Without this additional check, underflowing the pxar archive root of > the catalog shell will panic. > > changes since version 2, thanks @Wolfgang for catching this: > - also handle cases where the current working directory is not the root > directory > > changes since version 1: > - use `is_empty` to check if vector is empty > - extend commit message and comment to clarify that archive root acts as > its own parent directory > > pbs-client/src/catalog_shell.rs | 8 ++++++++ > 1 file changed, 8 insertions(+) > > applied, thanks! From t.lamprecht at proxmox.com Tue Nov 12 21:15:12 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 21:15:12 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix #5233: don't require root for some tape operations In-Reply-To: <20240910070818.268267-1-d.csapak@proxmox.com> References: <20240910070818.268267-1-d.csapak@proxmox.com> Message-ID: <68d6639e-a27d-4198-be2e-6a423a78581f@proxmox.com> Am 10.09.24 um 09:08 schrieb Dominik Csapak: > instead, require 'Tape.Write' on '/tape' path. > This makes it possible for a TapeAdmin or TapeOperator to > format/remove/vault tapes, instead of just root at pam. > > I opted for the path '/tape' since we don't have a dedicated acl > structure for single tapes, just '/tape/pool' (which does not apply > since not all tapes have to have a pool), '/tape/device' (which is > intended for drives/changers) and '/tape/jobs' (which is for jobs only). > > Alternatively we could invent a new scheme for tape media, e.g. > '/tape/media' for this. the path is fine, but why Tape.Write over Tape.Modify? > Tape.Modify > Tape.Modify allows a user to modify the configuration of tape drives, changers and backups. vs > Tape.Write > Tape.Write allows a user to write to a tape media. The former might be a better fit here as these calls alter not only the tape content, or? Noticed because Hannes' recent patch already switched the move-tape one to Tape.Modify, From t.lamprecht at proxmox.com Tue Nov 12 21:18:51 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 21:18:51 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v2 1/2] api: avoid retrieving lsblk result twice In-Reply-To: <20240917080550.51803-1-g.goller@proxmox.com> References: <20240917080550.51803-1-g.goller@proxmox.com> Message-ID: Am 17.09.24 um 10:05 schrieb Gabriel Goller: > Avoid running `lsblk` twice when executing the `list_disk` > endpoint/command. This and the various other small nits improve the > performance of the endpoint. > > Does not really fix, but is related to: #4961. > > Signed-off-by: Gabriel Goller > --- > > v2: > - nothing > > src/api2/node/disks/mod.rs | 6 +++++- > src/tools/disks/mod.rs | 10 +++++++--- > src/tools/disks/smart.rs | 10 +++------- > src/tools/disks/zfs.rs | 15 ++++++++------- > src/tools/disks/zpool_list.rs | 2 +- > 5 files changed, 24 insertions(+), 19 deletions(-) > > applied series, thanks! While parallelism can also cause issues (load), this should be fine here and it's only an implementation detail, so we always can change this back without much hassle anyway. From t.lamprecht at proxmox.com Tue Nov 12 21:22:45 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 21:22:45 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup v4 1/2] pbs2to3: add test for kernel version compatibility In-Reply-To: <20240918130100.193090-1-d.kral@proxmox.com> References: <20240918130100.193090-1-d.kral@proxmox.com> Message-ID: Am 18.09.24 um 15:00 schrieb Daniel Kral: > Factors the kernel version compatibility check into its own method and > adds test cases for a set of expected and unexpected kernel versions. > > Signed-off-by: Daniel Kral > --- > Changes to v1/v2/v3: > - Moved refactoring and test in first commit and changes afterwards > > src/bin/pbs2to3.rs | 62 ++++++++++++++++++++++++++++++++++++++++------ > 1 file changed, 55 insertions(+), 7 deletions(-) > > applied both patches, thanks! albeit, it might be even nicer to just parse out the major and minor parts of the version and compare them explicitly to a simple const u16 tuple, that way bumping the minimum kernel might be less headache than coming up with a new regex. From t.lamprecht at proxmox.com Tue Nov 12 21:25:31 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 12 Nov 2024 21:25:31 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup 1/2] client: pxar: perform match pattern check only once In-Reply-To: <20240926123437.192228-1-c.ebner@proxmox.com> References: <20240926123437.192228-1-c.ebner@proxmox.com> Message-ID: <2f517888-16a8-4006-8417-ccc124bd04cb@proxmox.com> Am 26.09.24 um 14:34 schrieb Christian Ebner: > While traversing the filesystem tree, `generate_directory_file_list` > generates the list of entries to include for each directory level, > already matching the entry against the given list of match patterns. > > Since this already excludes entries which should not be included in > the archive, the same check in the `add_entry` call is redundant, > as it is executed for each entry which is included in the list > generated by `generate_directory_file_list`. > > Signed-off-by: Christian Ebner > --- > Seems to be present since commit: > c443f58b09 ("switch to external pxar and fuse crates") feel free to add commit references to the commit message itself. > > Noticed while looking at the code because an user reported an issue in > the community forum, the issue turned out to be unrelated: > https://forum.proxmox.com/threads/154995/ > > pbs-client/src/pxar/create.rs | 9 --------- > 1 file changed, 9 deletions(-) > > applied series, thanks! From d.csapak at proxmox.com Wed Nov 13 08:42:22 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Wed, 13 Nov 2024 08:42:22 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix #5233: don't require root for some tape operations In-Reply-To: <68d6639e-a27d-4198-be2e-6a423a78581f@proxmox.com> References: <20240910070818.268267-1-d.csapak@proxmox.com> <68d6639e-a27d-4198-be2e-6a423a78581f@proxmox.com> Message-ID: On 11/12/24 21:15, Thomas Lamprecht wrote: > Am 10.09.24 um 09:08 schrieb Dominik Csapak: >> instead, require 'Tape.Write' on '/tape' path. >> This makes it possible for a TapeAdmin or TapeOperator to >> format/remove/vault tapes, instead of just root at pam. >> >> I opted for the path '/tape' since we don't have a dedicated acl >> structure for single tapes, just '/tape/pool' (which does not apply >> since not all tapes have to have a pool), '/tape/device' (which is >> intended for drives/changers) and '/tape/jobs' (which is for jobs only). >> >> Alternatively we could invent a new scheme for tape media, e.g. >> '/tape/media' for this. > > the path is fine, but why Tape.Write over Tape.Modify? > >> Tape.Modify >> Tape.Modify allows a user to modify the configuration of tape drives, changers and backups. > > vs > >> Tape.Write >> Tape.Write allows a user to write to a tape media. > > The former might be a better fit here as these calls alter not only the tape > content, or? > > Noticed because Hannes' recent patch already switched the move-tape one to > Tape.Modify, > mhmm... not sure why i chose Tape.Write exactly, but IMHO looking at it again, Modify would probably fit better for the 'update_media_status' (also fits better for the move-tape as Hannes rightly noticed) for destroy I'd be inclined to still use Write. While it does update the inventory (it removes it from there), It's basically the inverse of 'format_media' (as in, that inserts it into the inventory) which also uses Write. I'd send a rebased version for using Write for destroy, and Modify for update status if that's fine with you. From c.ebner at proxmox.com Wed Nov 13 09:23:07 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 09:23:07 +0100 (CET) Subject: [pbs-devel] applied: [PATCH proxmox-backup 1/2] client: pxar: perform match pattern check only once In-Reply-To: <2f517888-16a8-4006-8417-ccc124bd04cb@proxmox.com> References: <20240926123437.192228-1-c.ebner@proxmox.com> <2f517888-16a8-4006-8417-ccc124bd04cb@proxmox.com> Message-ID: <494560983.2860.1731486187400@webmail.proxmox.com> > On 12.11.2024 21:25 CET Thomas Lamprecht wrote: > > > Am 26.09.24 um 14:34 schrieb Christian Ebner: > > While traversing the filesystem tree, `generate_directory_file_list` > > generates the list of entries to include for each directory level, > > already matching the entry against the given list of match patterns. > > > > Since this already excludes entries which should not be included in > > the archive, the same check in the `add_entry` call is redundant, > > as it is executed for each entry which is included in the list > > generated by `generate_directory_file_list`. > > > > Signed-off-by: Christian Ebner > > --- > > Seems to be present since commit: > > c443f58b09 ("switch to external pxar and fuse crates") > > feel free to add commit references to the commit message itself. Acked, will do next time. Did not include it there directly as it does not really fix anything, just drops redundant code and I investigated why it was introduced like this. > > > > > Noticed while looking at the code because an user reported an issue in > > the community forum, the issue turned out to be unrelated: > > https://forum.proxmox.com/threads/154995/ > > > > pbs-client/src/pxar/create.rs | 9 --------- > > 1 file changed, 9 deletions(-) > > > > > > applied series, thanks! From t.lamprecht at proxmox.com Wed Nov 13 09:25:48 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Wed, 13 Nov 2024 09:25:48 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix #5233: don't require root for some tape operations In-Reply-To: References: <20240910070818.268267-1-d.csapak@proxmox.com> <68d6639e-a27d-4198-be2e-6a423a78581f@proxmox.com> Message-ID: <8d1c237b-b271-467c-a200-e7ffd3b13b78@proxmox.com> Am 13.11.24 um 08:42 schrieb Dominik Csapak: > for destroy I'd be inclined to still use Write. While it does update the inventory > (it removes it from there), It's basically the inverse of 'format_media' (as > in, that inserts it into the inventory) which also uses Write. We could also require both, but no hard feelings here.. > > I'd send a rebased version for using Write for destroy, and Modify for update status if that's > fine with you. Yeah, that's fine by me. From d.csapak at proxmox.com Wed Nov 13 11:35:19 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Wed, 13 Nov 2024 11:35:19 +0100 Subject: [pbs-devel] [PATCH proxmox] rest-server: connection: fix busy waiting on closed connections pre tls Message-ID: <20241113103519.1498601-1-d.csapak@proxmox.com> when a connection is closed before we have enough data to determine if it's tls or not, the socket stays in a readable state. Sadly, the tokio timeout we use here gets starved by the async_io callback. To fix this, save the amount of bytes peek returned and if they did not change between invocations of the callback, we assume that the connection was closed and exit with an error. Signed-off-by: Dominik Csapak --- proxmox-rest-server/src/connection.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/proxmox-rest-server/src/connection.rs b/proxmox-rest-server/src/connection.rs index 3815a8f4..4fed84b1 100644 --- a/proxmox-rest-server/src/connection.rs +++ b/proxmox-rest-server/src/connection.rs @@ -477,6 +477,7 @@ impl AcceptBuilder { const HANDSHAKE_BYTES_LEN: usize = 5; let future = async { + let mut old_peek_len = 0; incoming_stream .async_io(tokio::io::Interest::READABLE, || { let mut buf = [0; HANDSHAKE_BYTES_LEN]; @@ -491,6 +492,8 @@ impl AcceptBuilder { let peek_res = std_stream.peek(&mut buf); + std_stream.read(buf) + match peek_res { // If we didn't get enough bytes, raise an EAGAIN / EWOULDBLOCK which tells // tokio to await the readiness of the socket again. This should normally @@ -500,7 +503,14 @@ impl AcceptBuilder { // This means we will peek into the stream's queue until we got // HANDSHAKE_BYTE_LEN bytes or an error. Ok(peek_len) if peek_len < HANDSHAKE_BYTES_LEN => { - Err(io::ErrorKind::WouldBlock.into()) + // if we detect the same peek len again but still got a readable + // stream, the connection was probably closed, so abort here + if peek_len == old_peek_len { + Err(io::ErrorKind::UnexpectedEof.into()) + } else { + old_peek_len = peek_len; + Err(io::ErrorKind::WouldBlock.into()) + } } // Either we got Ok(HANDSHAKE_BYTES_LEN) or some error. res => res.map(|_| contains_tls_handshake_fragment(&buf)), -- 2.39.5 From d.csapak at proxmox.com Wed Nov 13 11:39:37 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Wed, 13 Nov 2024 11:39:37 +0100 Subject: [pbs-devel] [PATCH proxmox v2] fix #5868: rest-server: connection: fix busy waiting on closed connections pre tls Message-ID: <20241113103937.1554474-1-d.csapak@proxmox.com> when a connection is closed before we have enough data to determine if it's tls or not, the socket stays in a readable state. Sadly, the tokio timeout we use here gets starved by the async_io callback. To fix this, save the amount of bytes peek returned and if they did not change between invocations of the callback, we assume that the connection was closed and exit with an error. Signed-off-by: Dominik Csapak --- changes from v1: * removed leftover unrelated test code * fixed up the commit message with the bug # proxmox-rest-server/src/connection.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/proxmox-rest-server/src/connection.rs b/proxmox-rest-server/src/connection.rs index 3815a8f4..11f29ce4 100644 --- a/proxmox-rest-server/src/connection.rs +++ b/proxmox-rest-server/src/connection.rs @@ -477,6 +477,7 @@ impl AcceptBuilder { const HANDSHAKE_BYTES_LEN: usize = 5; let future = async { + let mut old_peek_len = 0; incoming_stream .async_io(tokio::io::Interest::READABLE, || { let mut buf = [0; HANDSHAKE_BYTES_LEN]; @@ -500,7 +501,14 @@ impl AcceptBuilder { // This means we will peek into the stream's queue until we got // HANDSHAKE_BYTE_LEN bytes or an error. Ok(peek_len) if peek_len < HANDSHAKE_BYTES_LEN => { - Err(io::ErrorKind::WouldBlock.into()) + // if we detect the same peek len again but still got a readable + // stream, the connection was probably closed, so abort here + if peek_len == old_peek_len { + Err(io::ErrorKind::UnexpectedEof.into()) + } else { + old_peek_len = peek_len; + Err(io::ErrorKind::WouldBlock.into()) + } } // Either we got Ok(HANDSHAKE_BYTES_LEN) or some error. res => res.map(|_| contains_tls_handshake_fragment(&buf)), -- 2.39.5 From d.csapak at proxmox.com Wed Nov 13 11:40:06 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Wed, 13 Nov 2024 11:40:06 +0100 Subject: [pbs-devel] [PATCH proxmox] rest-server: connection: fix busy waiting on closed connections pre tls In-Reply-To: <20241113103519.1498601-1-d.csapak@proxmox.com> References: <20241113103519.1498601-1-d.csapak@proxmox.com> Message-ID: disregard this, sent a v2: https://lore.proxmox.com/pbs-devel/20241113103937.1554474-1-d.csapak at proxmox.com/ From c.ebner at proxmox.com Wed Nov 13 11:50:06 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 11:50:06 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup 4/5] client: drop unused parse_archive_type helper In-Reply-To: <20241113105007.151258-1-c.ebner@proxmox.com> References: <20241113105007.151258-1-c.ebner@proxmox.com> Message-ID: <20241113105007.151258-5-c.ebner@proxmox.com> Parsing of the type based on the archive name extension is now handled by `BackupArchiveName`. Signed-off-by: Christian Ebner --- changes since version 3: - no changes proxmox-backup-client/src/main.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index a155f56f0..581bc245b 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -1380,18 +1380,6 @@ async fn dump_image( Ok(()) } -fn parse_archive_type(name: &str) -> (String, ArchiveType) { - if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { - (name.into(), ArchiveType::from_path(name).unwrap()) - } else if has_pxar_filename_extension(name, false) { - (format!("{}.didx", name), ArchiveType::DynamicIndex) - } else if name.ends_with(".img") { - (format!("{}.fidx", name), ArchiveType::FixedIndex) - } else { - (format!("{}.blob", name), ArchiveType::Blob) - } -} - #[api( input: { properties: { -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 11:50:04 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 11:50:04 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup 2/5] api types: introduce `BackupArchiveName` type In-Reply-To: <20241113105007.151258-1-c.ebner@proxmox.com> References: <20241113105007.151258-1-c.ebner@proxmox.com> Message-ID: <20241113105007.151258-3-c.ebner@proxmox.com> Introduces a dedicated wrapper type to be used for backup archive names instead of plain strings and associated helper methods for archive type checks and archive name mappings. Signed-off-by: Christian Ebner --- changes since version 3: - reworked archive type parsing, removed catch all blob mapping and made the mapping more expressive by using a match statement - added helpers for common archive names such as catalog, manifest, key files, ecc. to use them over the const definitions for comparisons - introduce extension helper for `ArchiveType`, to be used for archive name string generation pbs-api-types/src/datastore.rs | 153 ++++++++++++++++++++++++++++++++- 1 file changed, 152 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index dfa6bb259..00ac63255 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,7 @@ +use std::convert::{AsRef, TryFrom}; use std::fmt; use std::path::{Path, PathBuf}; +use std::str::FromStr; use anyhow::{bail, format_err, Error}; use const_format::concatcp; @@ -1570,7 +1572,7 @@ pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { } } -#[derive(PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] /// Allowed variants of backup archives to be contained in a snapshot's manifest pub enum ArchiveType { FixedIndex, @@ -1589,4 +1591,153 @@ impl ArchiveType { }; Ok(archive_type) } + + pub fn extension(&self) -> &'static str { + match self { + ArchiveType::DynamicIndex => "didx", + ArchiveType::FixedIndex => "fidx", + ArchiveType::Blob => "blob", + } + } +} + +#[derive(Clone, PartialEq, Eq)] +/// Name of archive files contained in snapshot's manifest +pub struct BackupArchiveName { + // archive name including the `.fidx`, `.didx` or `.blob` archive type extension + name: String, + // archive type parsed based on given extension + ty: ArchiveType, +} + +impl fmt::Display for BackupArchiveName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{name}", name = self.name) + } +} + +serde_plain::derive_deserialize_from_fromstr!(BackupArchiveName, "archive name"); + +impl FromStr for BackupArchiveName { + type Err = Error; + + fn from_str(name: &str) -> Result { + Self::try_from(name) + } +} + +serde_plain::derive_serialize_from_display!(BackupArchiveName); + +impl TryFrom<&str> for BackupArchiveName { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + let (name, ty) = Self::parse_archive_type(value)?; + Ok(Self { name, ty }) + } +} + +impl AsRef for BackupArchiveName { + fn as_ref(&self) -> &str { + &self.name + } +} + +impl BackupArchiveName { + pub fn from_path(path: impl AsRef) -> Result { + let path = path.as_ref(); + if path.as_os_str().as_encoded_bytes().last() == Some(&b'/') { + bail!("invalid archive name, got directory"); + } + let file_name = path + .file_name() + .ok_or_else(|| format_err!("invalid archive name"))?; + let name = file_name + .to_str() + .ok_or_else(|| format_err!("archive name not valid UTF-8"))?; + + Self::try_from(name) + } + + pub fn catalog() -> Self { + // Note: .pcat1 => Proxmox Catalog Format version 1 + Self { + name: "catalog.pcat1.didx".to_string(), + ty: ArchiveType::DynamicIndex, + } + } + + pub fn manifest() -> Self { + Self { + name: "index.json.blob".to_string(), + ty: ArchiveType::Blob, + } + } + + pub fn client_log() -> Self { + Self { + name: "client.log.blob".to_string(), + ty: ArchiveType::Blob, + } + } + + pub fn encrypted_key() -> Self { + Self { + name: "rsa-encrypted.key.blob".to_string(), + ty: ArchiveType::Blob, + } + } + + pub fn archive_type(&self) -> ArchiveType { + self.ty.clone() + } + + pub fn ends_with(&self, postfix: &str) -> bool { + self.name.ends_with(postfix) + } + + pub fn has_pxar_filename_extension(&self) -> bool { + self.name.ends_with(".pxar.didx") + || self.name.ends_with(".mpxar.didx") + || self.name.ends_with(".ppxar.didx") + } + + pub fn without_type_extension(&self) -> String { + self.name + .strip_suffix(&format!(".{ext}", ext = self.ty.extension())) + .unwrap() + .into() + } + + fn parse_archive_type(archive_name: &str) -> Result<(String, ArchiveType), Error> { + // Detect archive type via given server archive name type extension, if present + if let Ok(archive_type) = ArchiveType::from_path(archive_name) { + return Ok((archive_name.into(), archive_type)); + } + + // No server archive name type extension in archive name, map based on extension + let archive_type = match Path::new(archive_name) + .extension() + .and_then(|ext| ext.to_str()) + { + Some("pxar") => ArchiveType::DynamicIndex, + Some("mpxar") => ArchiveType::DynamicIndex, + Some("ppxar") => ArchiveType::DynamicIndex, + Some("pcat1") => ArchiveType::DynamicIndex, + Some("img") => ArchiveType::FixedIndex, + Some("json") => ArchiveType::Blob, + Some("key") => ArchiveType::Blob, + Some("log") => ArchiveType::Blob, + _ => bail!("failed to parse archive type for '{archive_name}'"), + }; + + Ok(( + format!("{archive_name}.{ext}", ext = archive_type.extension()), + archive_type, + )) + } +} + +impl ApiType for BackupArchiveName { + const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; } -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 11:50:07 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 11:50:07 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup 5/5] api types: add unit tests for backup archive name parsing In-Reply-To: <20241113105007.151258-1-c.ebner@proxmox.com> References: <20241113105007.151258-1-c.ebner@proxmox.com> Message-ID: <20241113105007.151258-6-c.ebner@proxmox.com> Signed-off-by: Christian Ebner --- changes since version 3: - extend tests to cover currently used blob files since the catchall blob mapping was removed pbs-api-types/src/datastore.rs | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 00ac63255..f2bd26d85 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1741,3 +1741,67 @@ impl BackupArchiveName { impl ApiType for BackupArchiveName { const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_invalid_backup_archive_names() { + let invalid_archive_names = ["/invalid/", "/invalid/..", "/invalid/archive-name.invalid"]; + + for archive_name in invalid_archive_names { + assert!(BackupArchiveName::from_path(archive_name).is_err()); + } + } + + #[test] + fn test_valid_didx_backup_archive_names() { + let valid_archive_names = [ + "/valid/archive-name.pxar", + "/valid/archive-name.pxar.didx", + "/valid/archive-name.mpxar", + "/valid/archive-name.mpxar.didx", + "/valid/archive-name.ppxar", + "/valid/archive-name.ppxar.didx", + "/valid/archive-name.pcat1", + "/valid/archive-name.pcat1.didx", + ]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref().ends_with(".didx")); + assert!(archive.archive_type() == ArchiveType::DynamicIndex); + } + } + + #[test] + fn test_valid_fidx_backup_archive_names() { + let valid_archive_names = ["/valid/archive-name.img", "/valid/archive-name.img.fidx"]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref() == "archive-name.img.fidx"); + assert!(archive.without_type_extension() == "archive-name.img"); + assert!(archive.archive_type() == ArchiveType::FixedIndex); + } + } + + #[test] + fn test_valid_blob_backup_archive_names() { + let valid_archive_names = [ + "/valid/index.json", + "/valid/index.json.blob", + "/valid/rsa-encrypted.key", + "/valid/rsa-encrypted.key.blob", + "/valid/archive-name.log", + "/valid/archive-name.log.blob", + ]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref().ends_with(".blob")); + assert!(archive.archive_type() == ArchiveType::Blob); + } + } +} -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 11:50:05 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 11:50:05 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup 3/5] client/server: use dedicated api type for all archive names In-Reply-To: <20241113105007.151258-1-c.ebner@proxmox.com> References: <20241113105007.151258-1-c.ebner@proxmox.com> Message-ID: <20241113105007.151258-4-c.ebner@proxmox.com> Instead of using the plain String or slices of it for archive names, use the dedicated api type and its methods to parse and check for archive type based on archive filename extension. Thereby, keeping the checks and mappings in the api type and resticting function parameters by the narrower wrapper type to reduce potential misuse. Further, instead of declaring and using the archive name constants throughout the codebase, use the `BackupArchiveName` helpers to generate the archive names for manifest, client logs and encryption keys. This allows for easy archive name comparisons using the same `BackupArchiveName` type, at the cost of some extra allocations and avoids the currently present double constant declaration of `CATALOG_NAME`. A positive ergonomic side effect of this is that commands now also accept the archive type extension optionally, when passing the archive name. E.g. ``` proxmox-backup-client restore .pxar.didx ``` is equal to ``` proxmox-backup-client restore .pxar ``` The previously default mapping of any archive name extension to a blob has been dropped in favor of consistent mapping by the api type helpers. Signed-off-by: Christian Ebner --- changes since version 3: - use api type helpers and drop consts for archive names - adapt to rework of mapping from previous patch in this series, especially use the `ArchiveType::extension()` helper for pxar archive name generation (when mapping regular -> split pxar archives). pbs-client/src/backup_reader.rs | 18 ++-- pbs-client/src/backup_writer.rs | 45 +++++----- pbs-client/src/pxar/tools.rs | 3 +- pbs-client/src/tools/mod.rs | 28 +++--- pbs-datastore/src/backup_info.rs | 21 ++--- pbs-datastore/src/lib.rs | 3 - pbs-datastore/src/manifest.rs | 33 ++++--- pbs-datastore/src/snapshot_reader.rs | 11 +-- proxmox-backup-client/src/catalog.rs | 35 ++++---- proxmox-backup-client/src/helper.rs | 7 +- proxmox-backup-client/src/main.rs | 124 +++++++++++++++++---------- proxmox-backup-client/src/mount.rs | 33 +++---- proxmox-file-restore/src/main.rs | 13 +-- src/api2/admin/datastore.rs | 70 +++++++-------- src/api2/tape/restore.rs | 17 ++-- src/backup/mod.rs | 3 - src/bin/proxmox_backup_debug/diff.rs | 16 ++-- src/server/pull.rs | 23 ++--- src/server/sync.rs | 10 +-- tests/prune.rs | 5 +- 20 files changed, 274 insertions(+), 244 deletions(-) diff --git a/pbs-client/src/backup_reader.rs b/pbs-client/src/backup_reader.rs index 4706abc78..24c2edbba 100644 --- a/pbs-client/src/backup_reader.rs +++ b/pbs-client/src/backup_reader.rs @@ -6,13 +6,12 @@ use std::sync::Arc; use futures::future::AbortHandle; use serde_json::{json, Value}; -use pbs_api_types::{BackupDir, BackupNamespace}; +use pbs_api_types::{BackupArchiveName, BackupDir, BackupNamespace}; use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::MANIFEST_BLOB_NAME; use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::sha::sha256; @@ -127,7 +126,8 @@ impl BackupReader { /// The manifest signature is verified if we have a crypt_config. pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec), Error> { let mut raw_data = Vec::with_capacity(64 * 1024); - self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?; + self.download(BackupArchiveName::manifest().as_ref(), &mut raw_data) + .await?; let blob = DataBlob::load_from_reader(&mut &raw_data[..])?; // no expected digest available let data = blob.decode(None, None)?; @@ -145,11 +145,11 @@ impl BackupReader { pub async fn download_blob( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result, Error> { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; tmpfile.seek(SeekFrom::Start(0))?; let (csum, size) = sha256(&mut tmpfile)?; @@ -167,11 +167,11 @@ impl BackupReader { pub async fn download_dynamic_index( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; let index = DynamicIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?; @@ -190,11 +190,11 @@ impl BackupReader { pub async fn download_fixed_index( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; let index = FixedIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?; diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 8adaf9ef2..cd142cff4 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -11,13 +11,13 @@ use tokio::io::AsyncReadExt; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use pbs_api_types::{ArchiveType, BackupDir, BackupNamespace}; +use pbs_api_types::{ArchiveType, BackupArchiveName, BackupDir, BackupNamespace}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; -use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1}; +use pbs_datastore::manifest::BackupManifest; +use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1; use pbs_tools::crypt_config::CryptConfig; use proxmox_human_byte::HumanByte; @@ -270,7 +270,7 @@ impl BackupWriter { pub async fn upload_stream( &self, - archive_name: &str, + archive_name: &BackupArchiveName, stream: impl Stream>, options: UploadOptions, injections: Option>, @@ -296,13 +296,13 @@ impl BackupWriter { if !manifest .files() .iter() - .any(|file| file.filename == archive_name) + .any(|file| file.filename == archive_name.as_ref()) { log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download.."); } else { // try, but ignore errors - match ArchiveType::from_path(archive_name) { - Ok(ArchiveType::FixedIndex) => { + match archive_name.archive_type() { + ArchiveType::FixedIndex => { if let Err(err) = self .download_previous_fixed_index( archive_name, @@ -314,7 +314,7 @@ impl BackupWriter { log::warn!("Error downloading .fidx from previous manifest: {}", err); } } - Ok(ArchiveType::DynamicIndex) => { + ArchiveType::DynamicIndex => { if let Err(err) = self .download_previous_dynamic_index( archive_name, @@ -338,12 +338,6 @@ impl BackupWriter { .as_u64() .unwrap(); - let archive = if log::log_enabled!(log::Level::Debug) { - archive_name - } else { - pbs_tools::format::strip_server_file_extension(archive_name) - }; - let upload_stats = Self::upload_chunk_info_stream( self.h2.clone(), wid, @@ -357,12 +351,17 @@ impl BackupWriter { }, options.compress, injections, - archive, + archive_name, ) .await?; let size_dirty = upload_stats.size - upload_stats.size_reused; let size: HumanByte = upload_stats.size.into(); + let archive = if log::log_enabled!(log::Level::Debug) { + archive_name.to_string() + } else { + archive_name.without_type_extension() + }; if upload_stats.chunk_injected > 0 { log::info!( @@ -372,7 +371,7 @@ impl BackupWriter { ); } - if archive_name != CATALOG_NAME { + if *archive_name != BackupArchiveName::catalog() { let speed: HumanByte = ((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into(); let size_dirty: HumanByte = size_dirty.into(); @@ -541,7 +540,7 @@ impl BackupWriter { pub async fn download_previous_fixed_index( &self, - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { @@ -576,7 +575,7 @@ impl BackupWriter { pub async fn download_previous_dynamic_index( &self, - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { @@ -623,7 +622,7 @@ impl BackupWriter { pub async fn download_previous_manifest(&self) -> Result { let mut raw_data = Vec::with_capacity(64 * 1024); - let param = json!({ "archive-name": MANIFEST_BLOB_NAME }); + let param = json!({ "archive-name": BackupArchiveName::manifest().to_string() }); self.h2 .download("previous", Some(param), &mut raw_data) .await?; @@ -651,7 +650,7 @@ impl BackupWriter { crypt_config: Option>, compress: bool, injections: Option>, - archive: &str, + archive: &BackupArchiveName, ) -> impl Future> { let total_chunks = Arc::new(AtomicUsize::new(0)); let total_chunks2 = total_chunks.clone(); @@ -683,9 +682,9 @@ impl BackupWriter { let index_csum = Arc::new(Mutex::new(Some(openssl::sha::Sha256::new()))); let index_csum_2 = index_csum.clone(); - let progress_handle = if archive.ends_with(".img") - || archive.ends_with(".pxar") - || archive.ends_with(".ppxar") + let progress_handle = if archive.ends_with(".img.fidx") + || archive.ends_with(".pxar.didx") + || archive.ends_with(".ppxar.didx") { Some(tokio::spawn(async move { loop { diff --git a/pbs-client/src/pxar/tools.rs b/pbs-client/src/pxar/tools.rs index b076daf6b..483ef19b8 100644 --- a/pbs-client/src/pxar/tools.rs +++ b/pbs-client/src/pxar/tools.rs @@ -14,6 +14,7 @@ use pxar::accessor::ReadAt; use pxar::format::StatxTimestamp; use pxar::{mode, Entry, EntryKind, Metadata}; +use pbs_api_types::BackupArchiveName; use pbs_datastore::catalog::{ArchiveEntry, CatalogEntryType, DirEntryAttribute}; use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt}; @@ -330,7 +331,7 @@ pub fn handle_root_with_optional_format_version_prelude, manifest: &BackupManifest, crypt_config: Option>, diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs index 28db6f348..8068dc004 100644 --- a/pbs-client/src/tools/mod.rs +++ b/pbs-client/src/tools/mod.rs @@ -17,7 +17,9 @@ use proxmox_router::cli::{complete_file_name, shellword_split}; use proxmox_schema::*; use proxmox_sys::fs::file_get_json; -use pbs_api_types::{Authid, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL}; +use pbs_api_types::{ + Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL, +}; use pbs_datastore::BackupManifest; use crate::{BackupRepository, HttpClient, HttpClientOptions}; @@ -548,19 +550,18 @@ pub fn place_xdg_file( } pub fn get_pxar_archive_names( - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, -) -> Result<(String, Option), Error> { - let (filename, ext) = match archive_name.strip_suffix(".didx") { - Some(filename) => (filename, ".didx"), - None => (archive_name, ""), - }; +) -> Result<(BackupArchiveName, Option), Error> { + let filename = archive_name.without_type_extension(); + let ext = archive_name.archive_type().extension(); - // Check if archive with given extension is present + // Check if archive is given as split archive or regular archive and is present in manifest, + // otherwise goto fallback below if manifest .files() .iter() - .any(|fileinfo| fileinfo.filename == format!("{filename}.didx")) + .any(|fileinfo| fileinfo.filename == archive_name.as_ref()) { // check if already given as one of split archive name variants if let Some(base) = filename @@ -568,8 +569,8 @@ pub fn get_pxar_archive_names( .or_else(|| filename.strip_suffix(".ppxar")) { return Ok(( - format!("{base}.mpxar{ext}"), - Some(format!("{base}.ppxar{ext}")), + format!("{base}.mpxar.{ext}").as_str().try_into()?, + Some(format!("{base}.ppxar.{ext}").as_str().try_into()?), )); } return Ok((archive_name.to_owned(), None)); @@ -577,7 +578,10 @@ pub fn get_pxar_archive_names( // if not, try fallback from regular to split archive if let Some(base) = filename.strip_suffix(".pxar") { - return get_pxar_archive_names(&format!("{base}.mpxar{ext}"), manifest); + return get_pxar_archive_names( + &format!("{base}.mpxar.{ext}").as_str().try_into()?, + manifest, + ); } bail!("archive not found in manifest"); diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 414ec878d..972931000 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -8,13 +8,12 @@ use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, + Authid, BackupArchiveName, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, + BACKUP_FILE_REGEX, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; -use crate::manifest::{ - BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, -}; +use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME}; use crate::{DataBlob, DataStore}; #[derive(Default)] @@ -168,7 +167,7 @@ impl BackupGroup { } let mut manifest_path = PathBuf::from(backup_time); - manifest_path.push(MANIFEST_BLOB_NAME); + manifest_path.push(BackupArchiveName::manifest().as_ref()); use nix::fcntl::{openat, OFlag}; match openat( @@ -520,7 +519,7 @@ impl BackupDir { /// Load the manifest without a lock. Must not be written back. pub fn load_manifest(&self) -> Result<(BackupManifest, u64), Error> { - let blob = self.load_blob(MANIFEST_BLOB_NAME)?; + let blob = self.load_blob(BackupArchiveName::manifest().as_ref())?; let raw_size = blob.raw_size(); let manifest = BackupManifest::try_from(blob)?; Ok((manifest, raw_size)) @@ -543,7 +542,7 @@ impl BackupDir { let raw_data = blob.raw_data(); let mut path = self.full_path(); - path.push(MANIFEST_BLOB_NAME); + path.push(BackupArchiveName::manifest().as_ref()); // atomic replace invalidates flock - no other writes past this point! replace_file(&path, raw_data, CreateOptions::new(), false)?; @@ -555,8 +554,8 @@ impl BackupDir { let full_path = self.full_path(); let mut wanted_files = std::collections::HashSet::new(); - wanted_files.insert(MANIFEST_BLOB_NAME.to_string()); - wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string()); + wanted_files.insert(BackupArchiveName::manifest().to_string()); + wanted_files.insert(BackupArchiveName::client_log().to_string()); manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); }); @@ -664,7 +663,9 @@ impl BackupInfo { pub fn is_finished(&self) -> bool { // backup is considered unfinished if there is no manifest - self.files.iter().any(|name| name == MANIFEST_BLOB_NAME) + self.files + .iter() + .any(|name| name == BackupArchiveName::manifest().as_ref()) } } diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 202b09558..8050cf4d0 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -144,9 +144,6 @@ #![deny(unsafe_op_in_unsafe_fn)] -// Note: .pcat1 => Proxmox Catalog Format version 1 -pub const CATALOG_NAME: &str = "catalog.pcat1.didx"; - /// Directory path where active operations counters are saved. pub const ACTIVE_OPERATIONS_DIR: &str = concat!( pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index 823c85003..51ec117ea 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -3,13 +3,10 @@ use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{ArchiveType, BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{BackupArchiveName, BackupType, CryptMode, Fingerprint}; use pbs_tools::crypt_config::CryptConfig; -pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck"; -pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob"; -pub const ENCRYPTED_KEY_BLOB_NAME: &str = "rsa-encrypted.key.blob"; fn crypt_mode_none() -> CryptMode { CryptMode::None @@ -68,14 +65,13 @@ impl BackupManifest { pub fn add_file( &mut self, - filename: String, + filename: &BackupArchiveName, size: u64, csum: [u8; 32], crypt_mode: CryptMode, ) -> Result<(), Error> { - let _archive_type = ArchiveType::from_path(&filename)?; // check type self.files.push(FileInfo { - filename, + filename: filename.to_string(), size, csum, crypt_mode, @@ -87,8 +83,11 @@ impl BackupManifest { &self.files[..] } - pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> { - let info = self.files.iter().find(|item| item.filename == name); + pub fn lookup_file_info(&self, name: &BackupArchiveName) -> Result<&FileInfo, Error> { + let info = self + .files + .iter() + .find(|item| item.filename == name.as_ref()); match info { None => bail!("manifest does not contain file '{}'", name), @@ -96,7 +95,12 @@ impl BackupManifest { } } - pub fn verify_file(&self, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> { + pub fn verify_file( + &self, + name: &BackupArchiveName, + csum: &[u8; 32], + size: u64, + ) -> Result<(), Error> { let info = self.lookup_file_info(name)?; if size != info.size { @@ -256,8 +260,13 @@ fn test_manifest_signature() -> Result<(), Error> { let mut manifest = BackupManifest::new("host/elsa/2020-06-26T13:56:05Z".parse()?); - manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?; - manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?; + manifest.add_file( + &"test1.img.fidx".try_into()?, + 200, + [1u8; 32], + CryptMode::Encrypt, + )?; + manifest.add_file(&"abc.blob".try_into()?, 200, [2u8; 32], CryptMode::None)?; manifest.unprotected["note"] = "This is not protected by the signature.".into(); diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index 432701ea0..ef70c7013 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -8,13 +8,14 @@ use nix::dir::Dir; use proxmox_sys::fs::lock_dir_noblock_shared; -use pbs_api_types::{print_store_and_ns, ArchiveType, BackupNamespace, Operation}; +use pbs_api_types::{ + print_store_and_ns, ArchiveType, BackupArchiveName, BackupNamespace, Operation, +}; use crate::backup_info::BackupDir; use crate::dynamic_index::DynamicIndexReader; use crate::fixed_index::FixedIndexReader; use crate::index::IndexFile; -use crate::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use crate::DataStore; /// Helper to access the contents of a datastore backup snapshot @@ -62,14 +63,14 @@ impl SnapshotReader { }; let mut client_log_path = snapshot_path; - client_log_path.push(CLIENT_LOG_BLOB_NAME); + client_log_path.push(BackupArchiveName::client_log().as_ref()); - let mut file_list = vec![MANIFEST_BLOB_NAME.to_string()]; + let mut file_list = vec![BackupArchiveName::manifest().to_string()]; for item in manifest.files() { file_list.push(item.filename.clone()); } if client_log_path.exists() { - file_list.push(CLIENT_LOG_BLOB_NAME.to_string()); + file_list.push(BackupArchiveName::client_log().to_string()); } Ok(Self { diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs index a55c9effe..39416d1d4 100644 --- a/proxmox-backup-client/src/catalog.rs +++ b/proxmox-backup-client/src/catalog.rs @@ -7,9 +7,8 @@ use serde_json::Value; use proxmox_router::cli::*; use proxmox_schema::api; -use pbs_api_types::BackupNamespace; +use pbs_api_types::{BackupArchiveName, BackupNamespace}; use pbs_client::pxar::tools::get_remote_pxar_reader; -use pbs_client::tools::has_pxar_filename_extension; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_tools::crypt_config::CryptConfig; @@ -22,7 +21,7 @@ use crate::{ complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group, extract_repository_from_value, format_key_source, optional_ns_param, record_repository, BackupDir, BufferedDynamicReader, CatalogReader, DynamicIndexReader, - IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, + IndexFile, Shell, KEYFD_SCHEMA, REPO_URL_SCHEMA, }; #[api( @@ -90,7 +89,8 @@ async fn dump_catalog(param: Value) -> Result { let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - let file_info = match manifest.lookup_file_info(CATALOG_NAME) { + let catalog_name = BackupArchiveName::catalog(); + let file_info = match manifest.lookup_file_info(&catalog_name) { Ok(file_info) => file_info, Err(err) => { let mut metadata_archives = Vec::new(); @@ -104,7 +104,7 @@ async fn dump_catalog(param: Value) -> Result { for archive in &metadata_archives { let (reader, archive_size) = get_remote_pxar_reader( - &archive, + &archive.as_str().try_into()?, client.clone(), &manifest, crypt_config.clone(), @@ -128,7 +128,7 @@ async fn dump_catalog(param: Value) -> Result { }; let index = client - .download_dynamic_index(&manifest, CATALOG_NAME) + .download_dynamic_index(&manifest, &catalog_name) .await?; let most_used = index.find_most_used_chunks(8); @@ -170,8 +170,7 @@ async fn dump_catalog(param: Value) -> Result { description: "Group/Snapshot path.", }, "archive-name": { - type: String, - description: "Backup archive name.", + type: BackupArchiveName, }, "repository": { optional: true, @@ -195,7 +194,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let client = connect(&repo)?; let backup_ns = optional_ns_param(¶m)?; let path = required_string_param(¶m, "snapshot")?; - let archive_name = required_string_param(¶m, "archive-name")?; + let server_archive_name: BackupArchiveName = + required_string_param(¶m, "archive-name")?.try_into()?; let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?; @@ -214,9 +214,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { } }; - let server_archive_name = if has_pxar_filename_extension(archive_name, false) { - format!("{}.didx", archive_name) - } else { + if !server_archive_name.has_pxar_filename_extension() { bail!("Can only mount pxar archives."); }; @@ -233,7 +231,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - if let Err(_err) = manifest.lookup_file_info(CATALOG_NAME) { + let catalog_name = BackupArchiveName::catalog(); + if let Err(_err) = manifest.lookup_file_info(&catalog_name) { // No catalog, fallback to pxar archive accessor if present let accessor = helper::get_pxar_fuse_accessor( &server_archive_name, @@ -243,7 +242,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { ) .await?; - let state = Shell::new(None, &server_archive_name, accessor).await?; + let state = Shell::new(None, &server_archive_name.as_ref(), accessor).await?; log::info!("Starting interactive shell"); state.shell().await?; record_repository(&repo); @@ -261,17 +260,17 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { ) .await?; - client.download(CATALOG_NAME, &mut tmpfile).await?; + client.download(catalog_name.as_ref(), &mut tmpfile).await?; let index = DynamicIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read catalog index - {}", err))?; // Note: do not use values stored in index (not trusted) - instead, computed them again let (csum, size) = index.compute_csum(); - manifest.verify_file(CATALOG_NAME, &csum, size)?; + manifest.verify_file(&catalog_name, &csum, size)?; let most_used = index.find_most_used_chunks(8); - let file_info = manifest.lookup_file_info(CATALOG_NAME)?; + let file_info = manifest.lookup_file_info(&catalog_name)?; let chunk_reader = RemoteChunkReader::new( client.clone(), crypt_config, @@ -286,7 +285,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { catalogfile.seek(SeekFrom::Start(0))?; let catalog_reader = CatalogReader::new(catalogfile); - let state = Shell::new(Some(catalog_reader), &server_archive_name, decoder).await?; + let state = Shell::new(Some(catalog_reader), &server_archive_name.as_ref(), decoder).await?; log::info!("Starting interactive shell"); state.shell().await?; diff --git a/proxmox-backup-client/src/helper.rs b/proxmox-backup-client/src/helper.rs index 60355d7d0..642d66a7b 100644 --- a/proxmox-backup-client/src/helper.rs +++ b/proxmox-backup-client/src/helper.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use anyhow::Error; +use pbs_api_types::BackupArchiveName; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_datastore::BackupManifest; use pbs_tools::crypt_config::CryptConfig; @@ -8,7 +9,7 @@ use pbs_tools::crypt_config::CryptConfig; use crate::{BufferedDynamicReadAt, BufferedDynamicReader, IndexFile}; pub(crate) async fn get_pxar_fuse_accessor( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, @@ -44,7 +45,7 @@ pub(crate) async fn get_pxar_fuse_accessor( } pub(crate) async fn get_pxar_fuse_reader( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, @@ -57,7 +58,7 @@ pub(crate) async fn get_pxar_fuse_reader( } pub(crate) async fn get_buffered_pxar_reader( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index f6fb3555e..a155f56f0 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -25,10 +25,10 @@ use pxar::accessor::aio::Accessor; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ - ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, - ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, - RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, + ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart, + BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, + PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, }; use pbs_client::catalog_shell::Shell; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; @@ -36,7 +36,7 @@ use pbs_client::tools::{ complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot, complete_backup_source, complete_chunk_size, complete_group_or_snapshot, complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository, - connect, connect_rate_limited, extract_repository_from_value, has_pxar_filename_extension, + connect, connect_rate_limited, extract_repository_from_value, key_source::{ crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA, @@ -54,9 +54,8 @@ use pbs_datastore::chunk_store::verify_chunk_size; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::read_chunk::AsyncReadChunk; -use pbs_datastore::CATALOG_NAME; use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json; @@ -196,8 +195,8 @@ pub async fn dir_or_last_from_group( async fn backup_directory>( client: &BackupWriter, dir_path: P, - archive_name: &str, - payload_target: Option<&str>, + archive_name: &BackupArchiveName, + payload_target: Option<&BackupArchiveName>, chunk_size: Option, catalog: Option>>>>>, pxar_create_options: pbs_client::pxar::PxarCreateOptions, @@ -276,7 +275,7 @@ async fn backup_directory>( async fn backup_image>( client: &BackupWriter, image_path: P, - archive_name: &str, + archive_name: &BackupArchiveName, chunk_size: Option, upload_options: UploadOptions, ) -> Result { @@ -606,7 +605,12 @@ fn spawn_catalog_upload( tokio::spawn(async move { let catalog_upload_result = client - .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options, None) + .upload_stream( + &BackupArchiveName::catalog(), + catalog_chunk_stream, + upload_options, + None, + ) .await; if let Err(ref err) = catalog_upload_result { @@ -1005,13 +1009,21 @@ async fn create_backup( }; for (backup_type, filename, target_base, extension, size) in upload_list { - let target = format!("{target_base}.{extension}"); + let target: BackupArchiveName = format!("{target_base}.{extension}").as_str().try_into()?; match (backup_type, dry_run) { // dry-run - (BackupSpecificationType::CONFIG, true) => log_file("config file", &filename, &target), - (BackupSpecificationType::LOGFILE, true) => log_file("log file", &filename, &target), - (BackupSpecificationType::PXAR, true) => log_file("directory", &filename, &target), - (BackupSpecificationType::IMAGE, true) => log_file("image", &filename, &target), + (BackupSpecificationType::CONFIG, true) => { + log_file("config file", &filename, target.as_ref()) + } + (BackupSpecificationType::LOGFILE, true) => { + log_file("log file", &filename, target.as_ref()) + } + (BackupSpecificationType::PXAR, true) => { + log_file("directory", &filename, target.as_ref()) + } + (BackupSpecificationType::IMAGE, true) => { + log_file("image", &filename, &target.as_ref()) + } // no dry-run (BackupSpecificationType::CONFIG, false) => { let upload_options = UploadOptions { @@ -1020,11 +1032,11 @@ async fn create_backup( ..UploadOptions::default() }; - log_file("config file", &filename, &target); + log_file("config file", &filename, target.as_ref()); let stats = client - .upload_blob_from_file(&filename, &target, upload_options) + .upload_blob_from_file(&filename, target.as_ref(), upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } (BackupSpecificationType::LOGFILE, false) => { // fixme: remove - not needed anymore ? @@ -1034,11 +1046,11 @@ async fn create_backup( ..UploadOptions::default() }; - log_file("log file", &filename, &target); + log_file("log file", &filename, target.as_ref()); let stats = client - .upload_blob_from_file(&filename, &target, upload_options) + .upload_blob_from_file(&filename, target.as_ref(), upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } (BackupSpecificationType::PXAR, false) => { let target_base = if let Some(base) = target_base.strip_suffix(".pxar") { @@ -1050,8 +1062,14 @@ async fn create_backup( let (target, payload_target) = if detection_mode.is_metadata() || detection_mode.is_data() { ( - format!("{target_base}.mpxar.{extension}"), - Some(format!("{target_base}.ppxar.{extension}")), + format!("{target_base}.mpxar.{extension}") + .as_str() + .try_into()?, + Some( + format!("{target_base}.ppxar.{extension}") + .as_str() + .try_into()?, + ), ) } else { (target, None) @@ -1065,12 +1083,12 @@ async fn create_backup( catalog_result_rx = Some(catalog_upload_res.result); } - log_file("directory", &filename, &target); + log_file("directory", &filename, target.as_ref()); if let Some(catalog) = catalog.as_ref() { catalog .lock() .unwrap() - .start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; + .start_directory(std::ffi::CString::new(target.as_ref())?.as_c_str())?; } let mut previous_ref = None; @@ -1137,7 +1155,7 @@ async fn create_backup( &client, &filename, &target, - payload_target.as_deref(), + payload_target.as_ref().as_deref(), chunk_size_opt, catalog.as_ref().cloned(), pxar_options, @@ -1147,20 +1165,20 @@ async fn create_backup( if let Some(payload_stats) = payload_stats { manifest.add_file( - payload_target + &payload_target .ok_or_else(|| format_err!("missing payload target archive"))?, payload_stats.size, payload_stats.csum, crypto.mode, )?; } - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; if let Some(catalog) = catalog.as_ref() { catalog.lock().unwrap().end_directory()?; } } (BackupSpecificationType::IMAGE, false) => { - log_file("image", &filename, &target); + log_file("image", &filename, target.as_ref()); let upload_options = UploadOptions { previous_manifest: previous_manifest.clone(), @@ -1172,7 +1190,7 @@ async fn create_backup( let stats = backup_image(&client, &filename, &target, chunk_size_opt, upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } } } @@ -1194,12 +1212,17 @@ async fn create_backup( if let Some(catalog_result_rx) = catalog_result_rx { let stats = catalog_result_rx.await??; - manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?; + manifest.add_file( + &BackupArchiveName::catalog(), + stats.size, + stats.csum, + crypto.mode, + )?; } } if let Some(rsa_encrypted_key) = rsa_encrypted_key { - let target = ENCRYPTED_KEY_BLOB_NAME; + let target = BackupArchiveName::encrypted_key(); log::info!("Upload RSA encoded key to '{}' as {}", repo, target); let options = UploadOptions { compress: false, @@ -1207,9 +1230,9 @@ async fn create_backup( ..UploadOptions::default() }; let stats = client - .upload_blob_from_data(rsa_encrypted_key, target, options) + .upload_blob_from_data(rsa_encrypted_key, target.as_ref(), options) .await?; - manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } // create manifest (index.json) // manifests are never encrypted, but include a signature @@ -1225,7 +1248,11 @@ async fn create_backup( ..UploadOptions::default() }; client - .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options) + .upload_blob_from_data( + manifest.into_bytes(), + BackupArchiveName::manifest().as_ref(), + options, + ) .await?; client.finish().await?; @@ -1238,7 +1265,7 @@ async fn create_backup( } async fn prepare_reference( - target: &str, + target: &BackupArchiveName, manifest: Arc, backup_writer: &BackupWriter, backup_reader: Arc, @@ -1250,7 +1277,11 @@ async fn prepare_reference( Ok((target, payload_target)) => (target, payload_target), Err(_) => return Ok(None), }; - let payload_target = payload_target.unwrap_or_default(); + let payload_target = if let Some(payload_target) = payload_target { + payload_target + } else { + return Ok(None); + }; let metadata_ref_index = if let Ok(index) = backup_reader .download_dynamic_index(&manifest, &target) @@ -1299,7 +1330,7 @@ async fn prepare_reference( Ok(Some(pbs_client::pxar::PxarPrevRef { accessor, payload_index: payload_ref_index, - archive_name: target, + archive_name: target.to_string(), })) } @@ -1486,7 +1517,8 @@ async fn restore( ) -> Result { let repo = extract_repository_from_value(¶m)?; - let archive_name = json::required_string_param(¶m, "archive-name")?; + let archive_name: BackupArchiveName = + json::required_string_param(¶m, "archive-name")?.try_into()?; let rate_limit = RateLimitConfig::from_client_config(limit); @@ -1525,11 +1557,9 @@ async fn restore( ) .await?; - let (archive_name, archive_type) = parse_archive_type(archive_name); - let (manifest, backup_index_data) = client.download_manifest().await?; - if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() { + if archive_name == BackupArchiveName::encrypted_key() && crypt_config.is_none() { log::info!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!") } else { if manifest.signature.is_some() { @@ -1543,7 +1573,7 @@ async fn restore( manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; } - if archive_name == MANIFEST_BLOB_NAME { + if archive_name == BackupArchiveName::manifest() { if let Some(target) = target { replace_file(target, &backup_index_data, CreateOptions::new(), false)?; } else { @@ -1557,7 +1587,7 @@ async fn restore( return Ok(Value::Null); } - if archive_type == ArchiveType::Blob { + if archive_name.archive_type() == ArchiveType::Blob { let mut reader = client.download_blob(&manifest, &archive_name).await?; if let Some(target) = target { @@ -1576,7 +1606,7 @@ async fn restore( std::io::copy(&mut reader, &mut writer) .map_err(|err| format_err!("unable to pipe data - {}", err))?; } - } else if archive_type == ArchiveType::DynamicIndex { + } else if archive_name.archive_type() == ArchiveType::DynamicIndex { let (archive_name, payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; @@ -1680,7 +1710,7 @@ async fn restore( std::io::copy(&mut reader, &mut writer) .map_err(|err| format_err!("unable to pipe data - {}", err))?; } - } else if archive_type == ArchiveType::FixedIndex { + } else if archive_name.archive_type() == ArchiveType::FixedIndex { let file_info = manifest.lookup_file_info(&archive_name)?; let index = client .download_fixed_index(&manifest, &archive_name) diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs index c15e030f5..0048a8ad4 100644 --- a/proxmox-backup-client/src/mount.rs +++ b/proxmox-backup-client/src/mount.rs @@ -18,8 +18,7 @@ use proxmox_schema::*; use proxmox_sortable_macro::sortable; use proxmox_systemd; -use pbs_api_types::BackupNamespace; -use pbs_client::tools::has_pxar_filename_extension; +use pbs_api_types::{ArchiveType, BackupArchiveName, BackupNamespace}; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_datastore::cached_chunk_reader::CachedChunkReader; @@ -47,11 +46,7 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new( false, &StringSchema::new("Group/Snapshot path.").schema() ), - ( - "archive-name", - false, - &StringSchema::new("Backup archive name.").schema() - ), + ("archive-name", false, &BackupArchiveName::API_SCHEMA), ( "target", false, @@ -87,11 +82,7 @@ WARNING: Only do this with *trusted* backups!", false, &StringSchema::new("Group/Snapshot path.").schema() ), - ( - "archive-name", - false, - &StringSchema::new("Backup archive name.").schema() - ), + ("archive-name", false, &BackupArchiveName::API_SCHEMA), ("repository", true, &REPO_URL_SCHEMA), ( "keyfile", @@ -208,7 +199,8 @@ fn mount( async fn mount_do(param: Value, pipe: Option) -> Result { let repo = extract_repository_from_value(¶m)?; - let archive_name = required_string_param(¶m, "archive-name")?; + let server_archive_name: BackupArchiveName = + required_string_param(¶m, "archive-name")?.try_into()?; let client = connect(&repo)?; let target = param["target"].as_str(); @@ -230,16 +222,14 @@ async fn mount_do(param: Value, pipe: Option) -> Result { } }; - let server_archive_name = if has_pxar_filename_extension(archive_name, false) { + if server_archive_name.has_pxar_filename_extension() { if target.is_none() { bail!("use the 'mount' command to mount pxar archives"); } - format!("{}.didx", archive_name) - } else if archive_name.ends_with(".img") { + } else if server_archive_name.ends_with(".img.fidx") { if target.is_some() { bail!("use the 'map' command to map drive images"); } - format!("{}.fidx", archive_name) } else { bail!("Can only mount/map pxar archives and drive images."); }; @@ -291,7 +281,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let mut interrupt = futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed()); - if server_archive_name.ends_with(".didx") { + if server_archive_name.archive_type() == ArchiveType::DynamicIndex { let decoder = helper::get_pxar_fuse_accessor( &server_archive_name, client.clone(), @@ -312,7 +302,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { // exit on interrupted } } - } else if server_archive_name.ends_with(".fidx") { + } else if server_archive_name.archive_type() == ArchiveType::FixedIndex { let file_info = manifest.lookup_file_info(&server_archive_name)?; let index = client .download_fixed_index(&manifest, &server_archive_name) @@ -326,7 +316,10 @@ async fn mount_do(param: Value, pipe: Option) -> Result { ); let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable(); - let name = &format!("{}:{}/{}", repo, path, archive_name); + let name = &format!( + "{repo}:{path}/{}", + server_archive_name.without_type_extension(), + ); let name_escaped = proxmox_systemd::escape_unit(name, false); let mut session = diff --git a/proxmox-file-restore/src/main.rs b/proxmox-file-restore/src/main.rs index 08354b454..5434a1351 100644 --- a/proxmox-file-restore/src/main.rs +++ b/proxmox-file-restore/src/main.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; use futures::StreamExt; +use pbs_api_types::BackupArchiveName; use serde_json::{json, Value}; use tokio::io::AsyncWriteExt; @@ -37,7 +38,6 @@ use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader}; use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute}; use pbs_datastore::dynamic_index::BufferedDynamicReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::CATALOG_NAME; use pbs_key_config::decrypt_key; use pbs_tools::crypt_config::CryptConfig; @@ -149,9 +149,10 @@ async fn list_files( Ok(entries) } ExtractPath::Pxar(file, mut path) => { - if let Ok(file_info) = manifest.lookup_file_info(CATALOG_NAME) { + let catalog_name = BackupArchiveName::catalog(); + if let Ok(file_info) = manifest.lookup_file_info(&catalog_name) { let index = client - .download_dynamic_index(&manifest, CATALOG_NAME) + .download_dynamic_index(&manifest, &catalog_name) .await?; let most_used = index.find_most_used_chunks(8); let chunk_reader = RemoteChunkReader::new( @@ -172,6 +173,7 @@ async fn list_files( path = vec![b'/']; } + let file: BackupArchiveName = file.as_str().try_into()?; let (archive_name, _payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&file, &manifest)?; @@ -191,7 +193,7 @@ async fn list_files( pbs_client::pxar::tools::pxar_metadata_catalog_lookup( accessor, path, - Some(&archive_name), + Some(archive_name.as_ref()), ) .await } @@ -476,10 +478,11 @@ async fn extract( match path { ExtractPath::Pxar(archive_name, path) => { + let archive_name: BackupArchiveName = archive_name.as_str().try_into()?; let (archive_name, payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; let (reader, archive_size) = get_remote_pxar_reader( - &archive_name, + &archive_name.try_into()?, client.clone(), &manifest, crypt_config.clone(), diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index b73ad0ff0..3ca9d35ae 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -34,15 +34,15 @@ use pxar::accessor::aio::Accessor; use pxar::EntryKind; use pbs_api_types::{ - print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, - Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, - GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, - PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, - BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, - DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, + BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, + DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, + JobScheduleStatus, KeepOptions, Operation, PruneJobOptions, SnapshotListItem, + SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, + BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, + MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -54,11 +54,11 @@ use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{ check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, - StoreProgress, CATALOG_NAME, + StoreProgress, }; use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; @@ -124,7 +124,7 @@ fn read_backup_index( } result.push(BackupContent { - filename: MANIFEST_BLOB_NAME.to_string(), + filename: BackupArchiveName::manifest().to_string(), crypt_mode: match manifest.signature { Some(_) => Some(CryptMode::SignOnly), None => Some(CryptMode::None), @@ -1468,12 +1468,13 @@ pub fn download_file_decoded( &backup_dir_api.group, )?; - let file_name = required_string_param(¶m, "file-name")?.to_owned(); + let file_name: BackupArchiveName = + required_string_param(¶m, "file-name")?.try_into()?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{}' - is encrypted", file_name); } } @@ -1488,12 +1489,10 @@ pub fn download_file_decoded( let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(&file_name); + path.push(file_name.as_ref()); - let (_, extension) = file_name.rsplit_once('.').unwrap(); - - let body = match extension { - "didx" => { + let body = match file_name.archive_type() { + ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path).map_err(|err| { format_err!("unable to read dynamic index '{:?}' - {}", &path, err) })?; @@ -1507,7 +1506,7 @@ pub fn download_file_decoded( err })) } - "fidx" => { + ArchiveType::FixedIndex => { let index = FixedIndexReader::open(&path).map_err(|err| { format_err!("unable to read fixed index '{:?}' - {}", &path, err) })?; @@ -1526,7 +1525,7 @@ pub fn download_file_decoded( ), ) } - "blob" => { + ArchiveType::Blob => { let file = std::fs::File::open(&path) .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; @@ -1541,9 +1540,6 @@ pub fn download_file_decoded( ), ) } - extension => { - bail!("cannot download '{}' files", extension); - } }; // fixme: set other headers ? @@ -1600,10 +1596,10 @@ pub fn upload_backup_log( )?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; - let file_name = CLIENT_LOG_BLOB_NAME; + let file_name = BackupArchiveName::client_log(); let mut path = backup_dir.full_path(); - path.push(file_name); + path.push(file_name.as_ref()); if path.exists() { bail!("backup already contains a log."); @@ -1658,7 +1654,7 @@ fn decode_path(path: &str) -> Result, Error> { type: String, }, "archive-name": { - schema: BACKUP_ARCHIVE_NAME_SCHEMA, + type: BackupArchiveName, optional: true, }, }, @@ -1675,12 +1671,12 @@ pub async fn catalog( ns: Option, backup_dir: pbs_api_types::BackupDir, filepath: String, - archive_name: Option, + archive_name: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { let file_name = archive_name .clone() - .unwrap_or_else(|| CATALOG_NAME.to_string()); + .unwrap_or_else(|| BackupArchiveName::catalog()); let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -1700,7 +1696,7 @@ pub async fn catalog( let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{file_name}' - is encrypted"); } } @@ -1709,7 +1705,7 @@ pub async fn catalog( tokio::task::spawn_blocking(move || { let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(&file_name); + path.push(file_name.as_ref()); let index = DynamicIndexReader::open(&path) .map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?; @@ -1759,7 +1755,7 @@ pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( ("backup-time", false, &BACKUP_TIME_SCHEMA), ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()), - ("archive-name", true, &BACKUP_ARCHIVE_NAME_SCHEMA), + ("archive-name", true, &BackupArchiveName::API_SCHEMA), ]), ) ).access( @@ -1774,11 +1770,11 @@ fn get_local_pxar_reader( datastore: Arc, manifest: &BackupManifest, backup_dir: &BackupDir, - pxar_name: &str, + pxar_name: &BackupArchiveName, ) -> Result<(LocalDynamicReadAt, u64), Error> { let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(pxar_name); + path.push(pxar_name.as_ref()); let index = DynamicIndexReader::open(&path) .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; @@ -1836,16 +1832,16 @@ pub fn pxar_file_download( let file_path = split.next().unwrap_or(b"/"); (pxar_name.to_owned(), file_path.to_owned()) }; - let pxar_name = std::str::from_utf8(&pxar_name)?; + let pxar_name: BackupArchiveName = std::str::from_utf8(&pxar_name)?.try_into()?; let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == pxar_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{}' - is encrypted", pxar_name); } } let (pxar_name, payload_archive_name) = - pbs_client::tools::get_pxar_archive_names(pxar_name, &manifest)?; + pbs_client::tools::get_pxar_archive_names(&pxar_name, &manifest)?; let (reader, archive_size) = get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?; diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index a180a4b02..65eda56dd 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -19,18 +19,18 @@ use proxmox_uuid::Uuid; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupDir, BackupNamespace, - CryptMode, NotificationMode, Operation, TapeRestoreNamespace, Userid, - DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, - TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, + parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupArchiveName, + BackupDir, BackupNamespace, CryptMode, NotificationMode, Operation, TapeRestoreNamespace, + Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, + MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, + TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, }; use pbs_client::pxar::tools::handle_root_with_optional_format_version_prelude; use pbs_config::CachedUserInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::{DataBlob, DataStore}; use pbs_tape::{ BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, @@ -1652,7 +1652,8 @@ fn try_restore_snapshot_archive( } let root_path = Path::new("/"); - let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); + let manifest_archive_name = BackupArchiveName::manifest(); + let manifest_file_name = OsStr::new(manifest_archive_name.as_ref()); let mut manifest = None; @@ -1732,7 +1733,7 @@ fn try_restore_snapshot_archive( // commit manifest let mut manifest_path = snapshot_path.to_owned(); - manifest_path.push(MANIFEST_BLOB_NAME); + manifest_path.push(BackupArchiveName::manifest().as_ref()); let mut tmp_manifest_path = manifest_path.clone(); tmp_manifest_path.set_extension("tmp"); diff --git a/src/backup/mod.rs b/src/backup/mod.rs index 8c84b8ce8..c5dae69a6 100644 --- a/src/backup/mod.rs +++ b/src/backup/mod.rs @@ -1,8 +1,5 @@ //! Server/client-specific parts for what's otherwise in pbs-datastore. -// Note: .pcat1 => Proxmox Catalog Format version 1 -pub const CATALOG_NAME: &str = "catalog.pcat1.didx"; - mod verify; pub use verify::*; diff --git a/src/bin/proxmox_backup_debug/diff.rs b/src/bin/proxmox_backup_debug/diff.rs index b0436d048..dcd351d93 100644 --- a/src/bin/proxmox_backup_debug/diff.rs +++ b/src/bin/proxmox_backup_debug/diff.rs @@ -13,7 +13,7 @@ use proxmox_human_byte::HumanByte; use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface}; use proxmox_schema::api; -use pbs_api_types::{BackupNamespace, BackupPart}; +use pbs_api_types::{BackupArchiveName, BackupNamespace, BackupPart}; use pbs_client::tools::key_source::{ crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, }; @@ -70,8 +70,7 @@ pub fn diff_commands() -> CommandLineInterface { type: String, }, "archive-name": { - description: "Name of the .pxar archive", - type: String, + type: BackupArchiveName, }, "repository": { optional: true, @@ -106,7 +105,7 @@ pub fn diff_commands() -> CommandLineInterface { async fn diff_archive_cmd( prev_snapshot: String, snapshot: String, - archive_name: String, + archive_name: BackupArchiveName, compare_content: bool, color: Option, ns: Option, @@ -140,12 +139,11 @@ async fn diff_archive_cmd( let output_params = OutputParams { color }; - if archive_name.ends_with(".pxar") { - let file_name = format!("{}.didx", archive_name); + if archive_name.ends_with(".pxar.didx") { diff_archive( &prev_snapshot, &snapshot, - &file_name, + &archive_name, &repo_params, compare_content, &output_params, @@ -161,7 +159,7 @@ async fn diff_archive_cmd( async fn diff_archive( snapshot_a: &str, snapshot_b: &str, - file_name: &str, + file_name: &BackupArchiveName, repo_params: &RepoParams, compare_contents: bool, output_params: &OutputParams, @@ -249,7 +247,7 @@ struct OutputParams { async fn open_dynamic_index( snapshot: &str, - archive_name: &str, + archive_name: &BackupArchiveName, params: &RepoParams, ) -> Result<(DynamicIndexReader, Accessor), Error> { let backup_reader = create_backup_reader(snapshot, params).await?; diff --git a/src/server/pull.rs b/src/server/pull.rs index 8d53ccd6e..7a72f6615 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -11,9 +11,9 @@ use proxmox_human_byte::HumanByte; use tracing::info; use pbs_api_types::{ - print_store_and_ns, ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, - Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, - PRIV_DATASTORE_BACKUP, + print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, + BackupNamespace, GroupFilter, Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -21,7 +21,7 @@ use pbs_datastore::data_blob::DataBlob; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{BackupManifest, FileInfo}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{check_backup_owner, DataStore, StoreProgress}; use pbs_tools::sha::sha256; @@ -334,16 +334,16 @@ async fn pull_snapshot<'a>( ) -> Result { let mut sync_stats = SyncStats::default(); let mut manifest_name = snapshot.full_path(); - manifest_name.push(MANIFEST_BLOB_NAME); + manifest_name.push(BackupArchiveName::manifest().as_ref()); let mut client_log_name = snapshot.full_path(); - client_log_name.push(CLIENT_LOG_BLOB_NAME); + client_log_name.push(BackupArchiveName::client_log().as_ref()); let mut tmp_manifest_name = manifest_name.clone(); tmp_manifest_name.set_extension("tmp"); let tmp_manifest_blob; if let Some(data) = reader - .load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name) + .load_file_into(BackupArchiveName::manifest().as_ref(), &tmp_manifest_name) .await? { tmp_manifest_blob = data; @@ -381,11 +381,12 @@ async fn pull_snapshot<'a>( path.push(&item.filename); if path.exists() { - match ArchiveType::from_path(&item.filename)? { + let filename: BackupArchiveName = item.filename.as_str().try_into()?; + match filename.archive_type() { ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path)?; let (csum, size) = index.compute_csum(); - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); @@ -395,7 +396,7 @@ async fn pull_snapshot<'a>( ArchiveType::FixedIndex => { let index = FixedIndexReader::open(&path)?; let (csum, size) = index.compute_csum(); - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); @@ -405,7 +406,7 @@ async fn pull_snapshot<'a>( ArchiveType::Blob => { let mut tmpfile = std::fs::File::open(&path)?; let (csum, size) = sha256(&mut tmpfile)?; - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); diff --git a/src/server/sync.rs b/src/server/sync.rs index bd68dda46..1f11ee5b4 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -14,12 +14,11 @@ use tracing::{info, warn}; use proxmox_router::HttpError; use pbs_api_types::{ - Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem, - MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, + SnapshotListItem, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, }; use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; use pbs_datastore::data_blob::DataBlob; -use pbs_datastore::manifest::CLIENT_LOG_BLOB_NAME; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader}; @@ -156,15 +155,16 @@ impl SyncSourceReader for RemoteSourceReader { .open(&tmp_path)?; // Note: be silent if there is no log - only log successful download + let client_log_name = BackupArchiveName::client_log(); if let Ok(()) = self .backup_reader - .download(CLIENT_LOG_BLOB_NAME, tmpfile) + .download(client_log_name.as_ref(), tmpfile) .await { if let Err(err) = std::fs::rename(&tmp_path, to_path) { bail!("Atomic rename file {to_path:?} failed - {err}"); } - info!("got backup log file {CLIENT_LOG_BLOB_NAME:?}"); + info!("got backup log file {client_log_name}"); } Ok(()) diff --git a/tests/prune.rs b/tests/prune.rs index 3b3209698..edc614821 100644 --- a/tests/prune.rs +++ b/tests/prune.rs @@ -2,8 +2,7 @@ use std::path::PathBuf; use anyhow::Error; -use pbs_api_types::PruneJobOptions; -use pbs_datastore::manifest::MANIFEST_BLOB_NAME; +use pbs_api_types::{BackupArchiveName, PruneJobOptions}; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{BackupDir, BackupInfo}; @@ -34,7 +33,7 @@ fn create_info(snapshot: &str, partial: bool) -> BackupInfo { let mut files = Vec::new(); if !partial { - files.push(String::from(MANIFEST_BLOB_NAME)); + files.push(BackupArchiveName::manifest().to_string()); } BackupInfo { -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 11:50:02 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 11:50:02 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup 0/5] introduce dedcated archive name api type Message-ID: <20241113105007.151258-1-c.ebner@proxmox.com> There is currently no dedicated api type for the archive names, given as input parameters to several api methods. This patches introduce a dedicated type for archive names, in order to collect the code for checks and eventual mappings into one location and reduce possible unintentional misuse by passing incorrect argument values to the functions and methods consuming the archive names. Further, drop all archive name constants in favor of helper methods on the api type to generate `BackupArchiveName` instances for them. This allows for direct comparison with other `BackupArchiveName` instances. As a positive side effect, the mapping now allows also for the server archive type extensions to be optionally passed as input to several commands, e.g. ``` proxmox-backup-client restore .pxar.didx ``` is now valid, being equal to ``` proxmox-backup-client restore ``` Changes since version 3: - Removed catchall fallback to blob type, reworked type parsing logic - Removed archive name constants in favor of helper methods to generate archive names for them - Extended tests Changes since version 2: - Rebased onto current master - Amended commit messages Changes since version 1 (thanks @Gabriel): - Rebased onto current master - Added unit tests for archive name parsing - Added missing check for invalid archive names ending with '/' - Inlined variable names for format strings - Import implemented traits at top Christian Ebner (5): datastore: move `ArchiveType` to api types api types: introduce `BackupArchiveName` type client/server: use dedicated api type for all archive names client: drop unused parse_archive_type helper api types: add unit tests for backup archive name parsing pbs-api-types/src/datastore.rs | 238 ++++++++++++++++++++++++++- pbs-client/src/backup_reader.rs | 18 +- pbs-client/src/backup_writer.rs | 45 +++-- pbs-client/src/pxar/tools.rs | 3 +- pbs-client/src/tools/mod.rs | 28 ++-- pbs-datastore/src/backup_info.rs | 21 +-- pbs-datastore/src/datastore.rs | 6 +- pbs-datastore/src/lib.rs | 3 - pbs-datastore/src/manifest.rs | 55 +++---- pbs-datastore/src/snapshot_reader.rs | 11 +- proxmox-backup-client/src/catalog.rs | 35 ++-- proxmox-backup-client/src/helper.rs | 7 +- proxmox-backup-client/src/main.rs | 138 +++++++++------- proxmox-backup-client/src/mount.rs | 33 ++-- proxmox-file-restore/src/main.rs | 13 +- src/api2/admin/datastore.rs | 70 ++++---- src/api2/backup/mod.rs | 3 +- src/api2/reader/mod.rs | 7 +- src/api2/tape/restore.rs | 17 +- src/backup/mod.rs | 3 - src/backup/verify.rs | 7 +- src/bin/proxmox_backup_debug/diff.rs | 16 +- src/server/pull.rs | 24 +-- src/server/sync.rs | 10 +- tests/prune.rs | 5 +- 25 files changed, 522 insertions(+), 294 deletions(-) -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 11:50:03 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 11:50:03 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup 1/5] datastore: move `ArchiveType` to api types In-Reply-To: <20241113105007.151258-1-c.ebner@proxmox.com> References: <20241113105007.151258-1-c.ebner@proxmox.com> Message-ID: <20241113105007.151258-2-c.ebner@proxmox.com> Moving the `ArchiveType` to avoid crate dependencies on `pbs-datastore`. In preparation for introducing a dedicated `BackupArchiveName` api type, allowing to set the corresponding archive type variant when parsing the archive name based on it's filename. Signed-off-by: Christian Ebner --- changes since version 3: - no changes pbs-api-types/src/datastore.rs | 23 ++++++++++++++++++++++- pbs-client/src/backup_writer.rs | 4 ++-- pbs-datastore/src/datastore.rs | 6 +++--- pbs-datastore/src/manifest.rs | 24 +----------------------- pbs-datastore/src/snapshot_reader.rs | 4 ++-- proxmox-backup-client/src/main.rs | 12 +++++------- src/api2/backup/mod.rs | 3 +-- src/api2/reader/mod.rs | 7 +++---- src/api2/tape/restore.rs | 10 +++++----- src/backup/verify.rs | 7 ++++--- src/server/pull.rs | 9 ++++----- 11 files changed, 52 insertions(+), 57 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 31767417a..dfa6bb259 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use anyhow::{bail, format_err, Error}; use const_format::concatcp; @@ -1569,3 +1569,24 @@ pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String { format!("datastore '{}', namespace '{}'", store, ns) } } + +#[derive(PartialEq, Eq)] +/// Allowed variants of backup archives to be contained in a snapshot's manifest +pub enum ArchiveType { + FixedIndex, + DynamicIndex, + Blob, +} + +impl ArchiveType { + pub fn from_path(archive_name: impl AsRef) -> Result { + let archive_name = archive_name.as_ref(); + let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) { + Some("didx") => ArchiveType::DynamicIndex, + Some("fidx") => ArchiveType::FixedIndex, + Some("blob") => ArchiveType::Blob, + _ => bail!("unknown archive type: {archive_name:?}"), + }; + Ok(archive_type) + } +} diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 4d2e8a801..8adaf9ef2 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -11,12 +11,12 @@ use tokio::io::AsyncReadExt; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use pbs_api_types::{BackupDir, BackupNamespace}; +use pbs_api_types::{ArchiveType, BackupDir, BackupNamespace}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1}; use pbs_tools::crypt_config::CryptConfig; diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index d0f3c53ac..d5419f881 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -18,8 +18,9 @@ use proxmox_sys::process_locker::ProcessLockSharedGuard; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreFSyncLevel, - DatastoreTuning, GarbageCollectionStatus, MaintenanceMode, MaintenanceType, Operation, UPID, + ArchiveType, Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, + DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus, MaintenanceMode, + MaintenanceType, Operation, UPID, }; use crate::backup_info::{BackupDir, BackupGroup, BackupGroupDeleteStats}; @@ -28,7 +29,6 @@ use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; use crate::fixed_index::{FixedIndexReader, FixedIndexWriter}; use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive}; use crate::index::IndexFile; -use crate::manifest::ArchiveType; use crate::task_tracking::{self, update_active_operations}; use crate::DataBlob; diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index c3df01427..823c85003 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -1,11 +1,9 @@ -use std::path::Path; - use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{ArchiveType, BackupType, CryptMode, Fingerprint}; use pbs_tools::crypt_config::CryptConfig; pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; @@ -56,26 +54,6 @@ pub struct BackupManifest { pub signature: Option, } -#[derive(PartialEq, Eq)] -pub enum ArchiveType { - FixedIndex, - DynamicIndex, - Blob, -} - -impl ArchiveType { - pub fn from_path(archive_name: impl AsRef) -> Result { - let archive_name = archive_name.as_ref(); - let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) { - Some("didx") => ArchiveType::DynamicIndex, - Some("fidx") => ArchiveType::FixedIndex, - Some("blob") => ArchiveType::Blob, - _ => bail!("unknown archive type: {:?}", archive_name), - }; - Ok(archive_type) - } -} - impl BackupManifest { pub fn new(snapshot: pbs_api_types::BackupDir) -> Self { Self { diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index f9c772079..432701ea0 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -8,13 +8,13 @@ use nix::dir::Dir; use proxmox_sys::fs::lock_dir_noblock_shared; -use pbs_api_types::{print_store_and_ns, BackupNamespace, Operation}; +use pbs_api_types::{print_store_and_ns, ArchiveType, BackupNamespace, Operation}; use crate::backup_info::BackupDir; use crate::dynamic_index::DynamicIndexReader; use crate::fixed_index::FixedIndexReader; use crate::index::IndexFile; -use crate::manifest::{ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use crate::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use crate::DataStore; /// Helper to access the contents of a datastore backup snapshot diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index e4034aa99..f6fb3555e 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -25,10 +25,10 @@ use pxar::accessor::aio::Accessor; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ - Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, ClientRateLimitConfig, - CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, - SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, + ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, + ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, + RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, + BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, }; use pbs_client::catalog_shell::Shell; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; @@ -54,9 +54,7 @@ use pbs_datastore::chunk_store::verify_chunk_size; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ - ArchiveType, BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, -}; +use pbs_datastore::manifest::{BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::CATALOG_NAME; use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig}; diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index ea0d0292e..92e79a267 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -19,13 +19,12 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, + ArchiveType, Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::ArchiveType; use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1}; use pbs_tools::json::{required_array_param, required_integer_param, required_string_param}; diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs index 23051653e..50f80de43 100644 --- a/src/api2/reader/mod.rs +++ b/src/api2/reader/mod.rs @@ -19,13 +19,12 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ - Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + ArchiveType, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, + DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::ArchiveType; use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_tools::json::required_string_param; diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index f7481bacc..a180a4b02 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -19,10 +19,10 @@ use proxmox_uuid::Uuid; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode, - NotificationMode, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, - DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, + parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupDir, BackupNamespace, + CryptMode, NotificationMode, Operation, TapeRestoreNamespace, Userid, + DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, }; use pbs_client::pxar::tools::handle_root_with_optional_format_version_prelude; @@ -30,7 +30,7 @@ use pbs_config::CachedUserInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::{DataBlob, DataStore}; use pbs_tape::{ BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6ef7e8eb3..fee6ecf5f 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -11,12 +11,13 @@ use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode, - SnapshotVerifyState, VerifyState, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY, UPID, + print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupNamespace, BackupType, + CryptMode, SnapshotVerifyState, VerifyState, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY, + UPID, }; use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ArchiveType, BackupManifest, FileInfo}; +use pbs_datastore::manifest::{BackupManifest, FileInfo}; use pbs_datastore::{DataBlob, DataStore, StoreProgress}; use crate::tools::parallel_handler::ParallelHandler; diff --git a/src/server/pull.rs b/src/server/pull.rs index d9584776e..8d53ccd6e 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -11,8 +11,9 @@ use proxmox_human_byte::HumanByte; use tracing::info; use pbs_api_types::{ - print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + print_store_and_ns, ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, + Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, + PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -20,9 +21,7 @@ use pbs_datastore::data_blob::DataBlob; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ - ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, -}; +use pbs_datastore::manifest::{BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{check_backup_owner, DataStore, StoreProgress}; use pbs_tools::sha::sha256; -- 2.39.5 From d.csapak at proxmox.com Wed Nov 13 12:37:42 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Wed, 13 Nov 2024 12:37:42 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] fix #5233: don't require root for some tape operations Message-ID: <20241113113742.2278769-1-d.csapak@proxmox.com> instead, require 'Tape.Write' or 'Tape.Modify' on '/tape' path. This makes it possible for a TapeOperator to destroy tapes and for a TapeAdmin to update the tape status, instead of just root at pam. I opted for the path '/tape' since we don't have a dedicated acl structure for single tapes, just '/tape/pool' (which does not apply since not all tapes have to have a pool), '/tape/device' (which is intended for drives/changers) and '/tape/jobs' (which is for jobs only). Also we use that path for e.g. move_tape already. Signed-off-by: Dominik Csapak --- changes from v1: * rebase on master * change permission required for update status to TAPE_MODIFY src/api2/tape/media.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/src/api2/tape/media.rs b/src/api2/tape/media.rs index 2ed3e961..9d22ca1a 100644 --- a/src/api2/tape/media.rs +++ b/src/api2/tape/media.rs @@ -9,7 +9,7 @@ use proxmox_uuid::Uuid; use pbs_api_types::{ Authid, MediaContentEntry, MediaContentListFilter, MediaListEntry, MediaPoolConfig, MediaSetListEntry, MediaStatus, CHANGER_NAME_SCHEMA, MEDIA_LABEL_SCHEMA, - MEDIA_POOL_NAME_SCHEMA, MEDIA_UUID_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, + MEDIA_POOL_NAME_SCHEMA, MEDIA_UUID_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY, PRIV_TAPE_WRITE, VAULT_NAME_SCHEMA, }; use pbs_config::CachedUserInfo; @@ -366,6 +366,9 @@ pub fn move_tape( }, }, }, + access: { + permission: &Permission::Privilege(&["tape"], PRIV_TAPE_WRITE, false), + }, )] /// Destroy media (completely remove from database) pub fn destroy_media( @@ -557,6 +560,9 @@ pub fn get_media_status(uuid: Uuid) -> Result { }, }, }, + access: { + permission: &Permission::Privilege(&["tape"], PRIV_TAPE_MODIFY, false), + }, )] /// Update media status (None, 'full', 'damaged' or 'retired') /// -- 2.39.5 From d.csapak at proxmox.com Wed Nov 13 12:38:28 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Wed, 13 Nov 2024 12:38:28 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix #5233: don't require root for some tape operations In-Reply-To: <20240910070818.268267-1-d.csapak@proxmox.com> References: <20240910070818.268267-1-d.csapak@proxmox.com> Message-ID: <7a1b236c-27c6-48bd-8c3e-fd7cc9ef237b@proxmox.com> sent a v2: https://lore.proxmox.com/pbs-devel/20241113113742.2278769-1-d.csapak at proxmox.com/ From s.sterz at proxmox.com Wed Nov 13 12:41:12 2024 From: s.sterz at proxmox.com (Shannon Sterz) Date: Wed, 13 Nov 2024 12:41:12 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 1/4] add support for bulk import of a dump directory In-Reply-To: <20241111130822.124584-2-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> <20241111130822.124584-2-f.schauer@proxmox.com> Message-ID: comments in-line: On Mon Nov 11, 2024 at 2:08 PM CET, Filip Schauer wrote: > When a path to a directory is provided in the vma_file argument, try to > upload all VMA backups in the directory. This also handles compressed > VMA files, notes and logs. If a vmid is specified with --vmid, only the > backups of that particular vmid are uploaded. > > This is intended for use on a dump directory: > > PBS_FINGERPRINT='PBS_FINGERPRINT' vma-to-pbs \ > --repository 'user at realm!token at server:port:datastore' \ > /var/lib/vz/dump > > Signed-off-by: Filip Schauer > --- > Cargo.toml | 2 + > src/main.rs | 167 +++++++++++++++++++++++++++++++++++++++++++++---- > src/vma2pbs.rs | 64 ++++++++++++++++--- > 3 files changed, 214 insertions(+), 19 deletions(-) > > diff --git a/Cargo.toml b/Cargo.toml > index cd13426..ad80304 100644 > --- a/Cargo.toml > +++ b/Cargo.toml > @@ -7,9 +7,11 @@ edition = "2021" > [dependencies] > anyhow = "1.0" > bincode = "1.3" > +chrono = "0.4" > hyper = "0.14.5" > pico-args = "0.5" > md5 = "0.7.0" > +regex = "1.7" > scopeguard = "1.1.0" > serde = "1.0" > serde_json = "1.0" > diff --git a/src/main.rs b/src/main.rs > index 3e25591..a394078 100644 > --- a/src/main.rs > +++ b/src/main.rs > @@ -1,26 +1,35 @@ > +use std::collections::HashMap; > use std::ffi::OsString; > +use std::fs::read_dir; > +use std::io::{BufRead, BufReader, Write}; > +use std::path::PathBuf; > > use anyhow::{bail, Context, Error}; > +use chrono::NaiveDateTime; > use proxmox_sys::linux::tty; > use proxmox_time::epoch_i64; > +use regex::Regex; > > mod vma; > mod vma2pbs; > -use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, PbsArgs, VmaBackupArgs}; > +use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, Compression, PbsArgs, VmaBackupArgs}; > > const CMD_HELP: &str = "\ > Usage: vma-to-pbs [OPTIONS] --repository --vmid [vma_file] > > Arguments: > - [vma_file] > + [vma_file | dump_directory] > > Options: > --repository > Repository URL > [--ns ] > Namespace > - --vmid > + [--vmid ] nit: this is marked as optional here (and in the code), but the usage line above still make it look like it's required. > Backup ID > + This is required if a single VMA file is provided. > + If not specified, bulk import all VMA backups in the provided directory. > + If specified with a dump directory, only import backups of the specified vmid. > [--backup-time ] > Backup timestamp > --fingerprint > @@ -41,6 +50,8 @@ Options: > File containing a comment/notes > [--log-file ] > Log file > + -y, --yes > + Automatic yes to prompts > -h, --help > Print help > -V, --version > @@ -52,7 +63,16 @@ fn parse_args() -> Result { > args.remove(0); // remove the executable path. > > let mut first_later_args_index = 0; > - let options = ["-h", "--help", "-c", "--compress", "-e", "--encrypt"]; > + let options = [ > + "-h", > + "--help", > + "-c", > + "--compress", > + "-e", > + "--encrypt", > + "-y", > + "--yes", > + ]; > > for (i, arg) in args.iter().enumerate() { > if let Some(arg) = arg.to_str() { > @@ -87,7 +107,7 @@ fn parse_args() -> Result { > > let pbs_repository = args.value_from_str("--repository")?; > let namespace = args.opt_value_from_str("--ns")?; > - let vmid = args.value_from_str("--vmid")?; > + let vmid: Option = args.opt_value_from_str("--vmid")?; > let backup_time: Option = args.opt_value_from_str("--backup-time")?; > let backup_time = backup_time.unwrap_or_else(epoch_i64); > let fingerprint = args.opt_value_from_str("--fingerprint")?; > @@ -99,6 +119,7 @@ fn parse_args() -> Result { > let key_password_file: Option = args.opt_value_from_str("--key-password-file")?; > let notes_file: Option = args.opt_value_from_str("--notes-file")?; > let log_file_path: Option = args.opt_value_from_str("--log-file")?; > + let yes = args.contains(["-y", "--yes"]); > > match (encrypt, keyfile.is_some()) { > (true, false) => bail!("--encrypt requires a --keyfile!"), > @@ -196,15 +217,137 @@ fn parse_args() -> Result { > encrypt, > }; > > - let vma_args = VmaBackupArgs { > - vma_file_path: vma_file_path.cloned(), > - backup_id: vmid, > - backup_time, > - notes, > - log_file_path, > + let bulk = > + vma_file_path > + .map(PathBuf::from) > + .and_then(|path| if path.is_dir() { Some(path) } else { None }); > + > + let grouped_vmas = if let Some(dump_dir_path) = bulk { > + let re = Regex::new( > + r"vzdump-qemu-(\d+)-(\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2}).vma(|.zst|.lzo|.gz)$", > + )?; > + > + let mut vmas = Vec::new(); > + > + for entry in read_dir(dump_dir_path)? { > + let entry = entry?; > + let path = entry.path(); > + > + if !path.is_file() { > + continue; > + } > + > + if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) { > + let Some((_, [backup_id, timestr, ext])) = > + re.captures(file_name).map(|c| c.extract()) > + else { > + // Skip the file, since it is not a VMA backup > + continue; > + }; > + > + if let Some(ref vmid) = vmid { > + if backup_id != vmid { > + // Skip the backup, since it does not match the specified vmid > + continue; > + } > + } > + > + let compression = match ext { > + "" => None, > + ".zst" => Some(Compression::Zstd), > + ".lzo" => Some(Compression::Lzo), > + ".gz" => Some(Compression::GZip), > + _ => bail!("Unexpected file extension: {ext}"), > + }; > + > + let backup_time = NaiveDateTime::parse_from_str(timestr, "%Y_%m_%d-%H_%M_%S")? > + .and_utc() > + .timestamp(); > + > + let notes_path = path.with_file_name(format!("{}.notes", file_name)); > + let notes = proxmox_sys::fs::file_read_optional_string(notes_path)?; > + > + let log_path = path.with_file_name(format!("{}.log", file_name)); > + let log_file_path = if log_path.exists() { > + Some(log_path.to_path_buf().into_os_string()) > + } else { > + None > + }; > + > + let backup_args = VmaBackupArgs { > + vma_file_path: Some(path.clone().into()), > + compression, > + backup_id: backup_id.to_string(), > + backup_time, > + notes, > + log_file_path, > + }; > + vmas.push(backup_args); > + } > + } > + > + vmas.sort_by_key(|d| d.backup_time); > + let total_vma_count = vmas.len(); > + let grouped_vmas = vmas.into_iter().fold( > + HashMap::new(), > + |mut grouped: HashMap>, vma_args| { > + grouped > + .entry(vma_args.backup_id.clone()) > + .or_default() > + .push(vma_args); > + grouped > + }, > + ); > + > + if grouped_vmas.is_empty() { > + bail!("Did not find any backup archives"); > + } > + > + println!( > + "Found {} backup archive(s) of {} different VMID(s):", > + total_vma_count, > + grouped_vmas.len() > + ); > + > + for (backup_id, vma_group) in &grouped_vmas { > + println!("- VMID {}: {} backups", backup_id, vma_group.len()); nit: this should be: ```rs println!("- VMID {backup_id}: {} backups", vma_group.len()); ``` > + } > + > + if !yes { > + eprint!("Proceed with the bulk import? (Y/n): "); > + std::io::stdout().flush()?; > + let mut line = String::new(); > + > + BufReader::new(std::io::stdin()).read_line(&mut line)?; > + let trimmed = line.trim(); > + match trimmed { > + "y" | "Y" | "" => {} > + "n" | "N" => bail!("Bulk import was not confirmed."), > + _ => bail!("Unexpected choice '{trimmed}'!"), > + } > + } > + > + grouped_vmas > + } else if let Some(vmid) = vmid { > + HashMap::from([( > + vmid.clone(), > + vec![VmaBackupArgs { > + vma_file_path: vma_file_path.cloned(), > + compression: None, > + backup_id: vmid, > + backup_time, > + notes, > + log_file_path, > + }], > + )]) > + } else { > + bail!("No vmid specified for single backup file"); > }; > > - let options = BackupVmaToPbsArgs { pbs_args, vma_args }; > + let options = BackupVmaToPbsArgs { > + pbs_args, > + grouped_vmas, > + }; > > Ok(options) > } > diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs > index a888a7b..95ede9b 100644 > --- a/src/vma2pbs.rs > +++ b/src/vma2pbs.rs > @@ -4,6 +4,7 @@ use std::collections::HashMap; > use std::ffi::{c_char, CStr, CString, OsString}; > use std::fs::File; > use std::io::{stdin, BufRead, BufReader, Read}; > +use std::process::{Command, Stdio}; > use std::ptr; > use std::time::SystemTime; > > @@ -30,7 +31,7 @@ const VMA_CLUSTER_SIZE: usize = 65536; > > pub struct BackupVmaToPbsArgs { > pub pbs_args: PbsArgs, > - pub vma_args: VmaBackupArgs, > + pub grouped_vmas: HashMap>, > } > > pub struct PbsArgs { > @@ -45,8 +46,15 @@ pub struct PbsArgs { > pub encrypt: bool, > } > > +pub enum Compression { > + Zstd, > + Lzo, > + GZip, > +} > + > pub struct VmaBackupArgs { > pub vma_file_path: Option, > + pub compression: Option, > pub backup_id: String, > pub backup_time: i64, > pub notes: Option, > @@ -467,7 +475,19 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { > > let start_transfer_time = SystemTime::now(); > > - upload_vma_file(pbs_args, &args.vma_args)?; > + for (_, vma_group) in args.grouped_vmas { > + for backup_args in vma_group { > + if let Err(e) = upload_vma_file(pbs_args, &backup_args) { > + eprintln!( > + "Failed to upload vma file at {:?} - {}", > + backup_args.vma_file_path.unwrap_or("(stdin)".into()), > + e nit: same as above, move `e` into the format string > + ); > + println!("Skipping VMID {}", backup_args.backup_id); > + break; > + } > + } > + } > > let transfer_duration = SystemTime::now().duration_since(start_transfer_time)?; > let total_seconds = transfer_duration.as_secs(); > @@ -480,13 +500,43 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { > } > > fn upload_vma_file(pbs_args: &PbsArgs, backup_args: &VmaBackupArgs) -> Result<(), Error> { > - let vma_file: Box = match &backup_args.vma_file_path { > - Some(vma_file_path) => match File::open(vma_file_path) { > - Err(why) => return Err(anyhow!("Couldn't open file: {}", why)), > - Ok(file) => Box::new(BufReader::new(file)), > + match &backup_args.vma_file_path { > + Some(vma_file_path) => println!("Uploading VMA backup from {:?}", vma_file_path), nit: this could be ```rs Some(vma_file_path) => println!("Uploading VMA backup from {vma_file_path:?}"), ``` > + None => println!("Uploading VMA backup from (stdin)"), > + }; > + > + let vma_file: Box = match &backup_args.compression { > + Some(compression) => { > + let vma_file_path = backup_args > + .vma_file_path > + .as_ref() > + .expect("No VMA file path provided"); > + let mut cmd = match compression { > + Compression::Zstd => { > + let mut cmd = Command::new("zstd"); > + cmd.args(["-q", "-d", "-c"]); > + cmd i think the following would be more elegant here: ```rs Compression::Zstd => Command::new("zstd") .args(["-q", "-d", "-c"]), ``` it's a bit more concise imo > + } > + Compression::Lzo => { > + let mut cmd = Command::new("lzop"); > + cmd.args(["-d", "-c"]); > + cmd same as above > + } > + Compression::GZip => Command::new("zcat"), > + }; > + let process = cmd.arg(vma_file_path).stdout(Stdio::piped()).spawn()?; > + let stdout = process.stdout.expect("Failed to capture stdout"); > + Box::new(BufReader::new(stdout)) > + } > + None => match &backup_args.vma_file_path { > + Some(vma_file_path) => match File::open(vma_file_path) { > + Err(why) => return Err(anyhow!("Couldn't open file: {}", why)), nit: `why` can be moved into the format string here > + Ok(file) => Box::new(BufReader::new(file)), > + }, > + None => Box::new(BufReader::new(stdin())), > }, > - None => Box::new(BufReader::new(stdin())), > }; > + > let vma_reader = VmaReader::new(vma_file)?; > > let pbs = create_pbs_backup_task(pbs_args, backup_args)?; From s.sterz at proxmox.com Wed Nov 13 12:41:26 2024 From: s.sterz at proxmox.com (Shannon Sterz) Date: Wed, 13 Nov 2024 12:41:26 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 4/4] log device upload progress as a percentage In-Reply-To: <20241111130822.124584-5-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> <20241111130822.124584-5-f.schauer@proxmox.com> Message-ID: comments in-line: On Mon Nov 11, 2024 at 2:08 PM CET, Filip Schauer wrote: > Log the upload progress of a device as a percentage with log level info > every 1000 chunks. > > Signed-off-by: Filip Schauer > --- > src/vma2pbs.rs | 9 +++++++++ > 1 file changed, 9 insertions(+) > > diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs > index 0517251..f469053 100644 > --- a/src/vma2pbs.rs > +++ b/src/vma2pbs.rs > @@ -6,6 +6,8 @@ use std::fs::File; > use std::io::{stdin, BufRead, BufReader, Read}; > use std::process::{Command, Stdio}; > use std::ptr; > +use std::sync::atomic::{AtomicU64, Ordering}; > +use std::sync::Arc; > use std::time::SystemTime; > > use anyhow::{anyhow, bail, Error}; > @@ -234,6 +236,8 @@ where > non_zero_mask: u64, > } > > + let chunk_stats = Arc::new([const { AtomicU64::new(0) }; VMA_MAX_DEVICES]); > + > let images_chunks: RefCell>> = > RefCell::new(HashMap::new()); > > @@ -284,6 +288,11 @@ where > pbs_chunk_offset, > pbs_chunk_offset + pbs_chunk_size, > ); > + let chunk_stat = chunk_stats[dev_id as usize].fetch_add(1, Ordering::SeqCst); > + if (chunk_stat % 1000) == 0 { > + let percentage = 100 * PROXMOX_BACKUP_DEFAULT_CHUNK_SIZE * chunk_stat / device_size; > + log::info!("\tUploading dev_id: {} ({}%)", dev_id, percentage); nit: format string > + } > > let mut pbs_err: *mut c_char = ptr::null_mut(); > Other than the nits across the four patches, consider this series: Reviewed-by: Shannon Sterz From s.sterz at proxmox.com Wed Nov 13 12:41:15 2024 From: s.sterz at proxmox.com (Shannon Sterz) Date: Wed, 13 Nov 2024 12:41:15 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 2/4] add option to skip vmids whose backups failed to upload In-Reply-To: <20241111130822.124584-3-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> <20241111130822.124584-3-f.schauer@proxmox.com> Message-ID: comments in-line: On Mon Nov 11, 2024 at 2:08 PM CET, Filip Schauer wrote: > Signed-off-by: Filip Schauer > --- > src/main.rs | 6 ++++++ > src/vma2pbs.rs | 13 ++++++++++--- > 2 files changed, 16 insertions(+), 3 deletions(-) > > diff --git a/src/main.rs b/src/main.rs > index a394078..d4b36fa 100644 > --- a/src/main.rs > +++ b/src/main.rs > @@ -50,6 +50,9 @@ Options: > File containing a comment/notes > [--log-file ] > Log file > + --skip-failed > + Skip VMIDs that failed to be uploaded and continue onto the next VMID if a dump directory > + is specified. > -y, --yes > Automatic yes to prompts > -h, --help > @@ -70,6 +73,7 @@ fn parse_args() -> Result { > "--compress", > "-e", > "--encrypt", > + "--skip-failed", > "-y", > "--yes", > ]; > @@ -119,6 +123,7 @@ fn parse_args() -> Result { > let key_password_file: Option = args.opt_value_from_str("--key-password-file")?; > let notes_file: Option = args.opt_value_from_str("--notes-file")?; > let log_file_path: Option = args.opt_value_from_str("--log-file")?; > + let skip_failed = args.contains("--skip-failed"); > let yes = args.contains(["-y", "--yes"]); > > match (encrypt, keyfile.is_some()) { > @@ -347,6 +352,7 @@ fn parse_args() -> Result { > let options = BackupVmaToPbsArgs { > pbs_args, > grouped_vmas, > + skip_failed, > }; > > Ok(options) > diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs > index 95ede9b..a5b4027 100644 > --- a/src/vma2pbs.rs > +++ b/src/vma2pbs.rs > @@ -32,6 +32,7 @@ const VMA_CLUSTER_SIZE: usize = 65536; > pub struct BackupVmaToPbsArgs { > pub pbs_args: PbsArgs, > pub grouped_vmas: HashMap>, > + pub skip_failed: bool, > } > > pub struct PbsArgs { > @@ -478,13 +479,19 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { > for (_, vma_group) in args.grouped_vmas { > for backup_args in vma_group { > if let Err(e) = upload_vma_file(pbs_args, &backup_args) { > - eprintln!( > + let err_msg = format!( > "Failed to upload vma file at {:?} - {}", > backup_args.vma_file_path.unwrap_or("(stdin)".into()), > e nit: i'd move `e` into the format string, since you are basically already touching these lines :) > ); > - println!("Skipping VMID {}", backup_args.backup_id); > - break; > + > + if args.skip_failed { > + eprintln!("{}", err_msg); > + println!("Skipping VMID {}", backup_args.backup_id); > + break; > + } else { > + bail!(err_msg); > + } > } > } > } From s.sterz at proxmox.com Wed Nov 13 12:41:21 2024 From: s.sterz at proxmox.com (Shannon Sterz) Date: Wed, 13 Nov 2024 12:41:21 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 3/4] use level-based logging instead of println In-Reply-To: <20241111130822.124584-4-f.schauer@proxmox.com> References: <20241111130822.124584-1-f.schauer@proxmox.com> <20241111130822.124584-4-f.schauer@proxmox.com> Message-ID: comments in-line: On Mon Nov 11, 2024 at 2:08 PM CET, Filip Schauer wrote: > Use log level "info" by default and prevent spamming messages for every > single chunk uploaded. To re-enable these messages, set the RUST_LOG > environment variable to "debug". > > Signed-off-by: Filip Schauer > --- > Cargo.toml | 2 ++ > src/main.rs | 28 ++++++++++++++++++++++------ > src/vma2pbs.rs | 38 ++++++++++++++++++++------------------ > 3 files changed, 44 insertions(+), 24 deletions(-) > > diff --git a/Cargo.toml b/Cargo.toml > index ad80304..7951bbc 100644 > --- a/Cargo.toml > +++ b/Cargo.toml > @@ -8,7 +8,9 @@ edition = "2021" > anyhow = "1.0" > bincode = "1.3" > chrono = "0.4" > +env_logger = "0.10" > hyper = "0.14.5" > +log = "0.4" > pico-args = "0.5" > md5 = "0.7.0" > regex = "1.7" > diff --git a/src/main.rs b/src/main.rs > index d4b36fa..203196b 100644 > --- a/src/main.rs > +++ b/src/main.rs > @@ -6,6 +6,7 @@ use std::path::PathBuf; > > use anyhow::{bail, Context, Error}; > use chrono::NaiveDateTime; > +use env_logger::Target; > use proxmox_sys::linux::tty; > use proxmox_time::epoch_i64; > use regex::Regex; > @@ -128,7 +129,7 @@ fn parse_args() -> Result { > > match (encrypt, keyfile.is_some()) { > (true, false) => bail!("--encrypt requires a --keyfile!"), > - (false, true) => println!( > + (false, true) => log::info!( > "--keyfile given, but --encrypt not set -> backup will be signed, but not encrypted!" > ), > _ => {} > @@ -190,7 +191,7 @@ fn parse_args() -> Result { > > Some(key_password) > } else if vma_file_path.is_none() { > - println!( > + log::info!( > "Please use --key-password-file to provide the password when passing the VMA file \ > to stdin, if required." > ); > @@ -246,13 +247,17 @@ fn parse_args() -> Result { > let Some((_, [backup_id, timestr, ext])) = > re.captures(file_name).map(|c| c.extract()) > else { > - // Skip the file, since it is not a VMA backup > + log::debug!("Skip \"{file_name}\", since it is not a VMA backup"); > continue; > }; > > if let Some(ref vmid) = vmid { > if backup_id != vmid { > - // Skip the backup, since it does not match the specified vmid > + log::debug!( > + "Skip backup with VMID {}, since it does not match specified VMID {}", > + backup_id, > + vmid nit: you can use format strings here > + ); > continue; > } > } > @@ -308,14 +313,14 @@ fn parse_args() -> Result { > bail!("Did not find any backup archives"); > } > > - println!( > + log::info!( > "Found {} backup archive(s) of {} different VMID(s):", > total_vma_count, > grouped_vmas.len() > ); > > for (backup_id, vma_group) in &grouped_vmas { > - println!("- VMID {}: {} backups", backup_id, vma_group.len()); > + log::info!("- VMID {}: {} backups", backup_id, vma_group.len()); > } nit: if you are already touching this, move this over to format strings as well > > if !yes { > @@ -358,7 +363,18 @@ fn parse_args() -> Result { > Ok(options) > } > > +fn init_cli_logger() { > + env_logger::Builder::from_env(env_logger::Env::new().filter_or("RUST_LOG", "info")) > + .format_level(false) > + .format_target(false) > + .format_timestamp(None) > + .target(Target::Stdout) > + .init(); > +} > + > fn main() -> Result<(), Error> { > + init_cli_logger(); > + > let args = parse_args()?; > vma2pbs(args)?; > > diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs > index a5b4027..0517251 100644 > --- a/src/vma2pbs.rs > +++ b/src/vma2pbs.rs > @@ -82,8 +82,8 @@ fn create_pbs_backup_task( > pbs_args: &PbsArgs, > backup_args: &VmaBackupArgs, > ) -> Result<*mut ProxmoxBackupHandle, Error> { > - println!( > - "backup time: {}", > + log::info!( > + "\tbackup time: {}", > epoch_to_rfc3339(backup_args.backup_time)? > ); > > @@ -152,7 +152,7 @@ where > let config_name = config.name; > let config_data = config.content; > > - println!("CFG: size: {} name: {}", config_data.len(), config_name); > + log::info!("\tCFG: size: {} name: {}", config_data.len(), config_name); nit: move `config_name` into the format string > > let config_name_cstr = CString::new(config_name)?; > > @@ -190,9 +190,11 @@ where > let device_name = vma_reader.get_device_name(device_id.try_into()?)?; > let device_size = vma_reader.get_device_size(device_id.try_into()?)?; > > - println!( > - "DEV: dev_id={} size: {} devname: {}", > - device_id, device_size, device_name > + log::info!( > + "\tDEV: dev_id={} size: {} devname: {}", nit: format string > + device_id, > + device_size, > + device_name > ); > > let device_name_cstr = CString::new(device_name)?; > @@ -276,8 +278,8 @@ where > }; > > let pbs_upload_chunk = |pbs_chunk_buffer: Option<&[u8]>| { > - println!( > - "Uploading dev_id: {} offset: {:#0X} - {:#0X}", > + log::debug!( > + "\tUploading dev_id: {} offset: {:#0X} - {:#0X}", nit: format string, for example: `\tUploading dev_id: {dev_id} offset: {pbs_chunk_offset:#0X} - {:#0X}` > dev_id, > pbs_chunk_offset, > pbs_chunk_offset + pbs_chunk_size, > @@ -466,13 +468,13 @@ fn set_notes( > > pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { > let pbs_args = &args.pbs_args; > - println!("PBS repository: {}", pbs_args.pbs_repository); > + log::info!("PBS repository: {}", pbs_args.pbs_repository); > if let Some(ns) = &pbs_args.namespace { > - println!("PBS namespace: {}", ns); > + log::info!("PBS namespace: {}", ns); nit: format string > } > - println!("PBS fingerprint: {}", pbs_args.fingerprint); > - println!("compress: {}", pbs_args.compress); > - println!("encrypt: {}", pbs_args.encrypt); > + log::info!("PBS fingerprint: {}", pbs_args.fingerprint); > + log::info!("compress: {}", pbs_args.compress); > + log::info!("encrypt: {}", pbs_args.encrypt); > > let start_transfer_time = SystemTime::now(); > > @@ -486,8 +488,8 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { > ); > > if args.skip_failed { > - eprintln!("{}", err_msg); > - println!("Skipping VMID {}", backup_args.backup_id); > + log::warn!("{}", err_msg); > + log::info!("Skipping VMID {}", backup_args.backup_id); > break; > } else { > bail!(err_msg); > @@ -501,15 +503,15 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { > let minutes = total_seconds / 60; > let seconds = total_seconds % 60; > let milliseconds = transfer_duration.as_millis() % 1000; > - println!("Backup finished within {minutes} minutes, {seconds} seconds and {milliseconds} ms"); > + log::info!("Backup finished within {minutes} minutes, {seconds} seconds and {milliseconds} ms"); > > Ok(()) > } > > fn upload_vma_file(pbs_args: &PbsArgs, backup_args: &VmaBackupArgs) -> Result<(), Error> { > match &backup_args.vma_file_path { > - Some(vma_file_path) => println!("Uploading VMA backup from {:?}", vma_file_path), > - None => println!("Uploading VMA backup from (stdin)"), > + Some(vma_file_path) => log::info!("Uploading VMA backup from {:?}", vma_file_path), nit: format string > + None => log::info!("Uploading VMA backup from (stdin)"), > }; > > let vma_file: Box = match &backup_args.compression { From h.laimer at proxmox.com Wed Nov 13 13:40:47 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 13:40:47 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] chunk_store: fix problem with permission checking Message-ID: <20241113124047.97456-1-h.laimer@proxmox.com> Permissions are stored in the lower 9 bits (rwxrwxrwx), so we have to mask `st_mode` with 0o777. The datastore root dir is created with 755, the `.chunks` dir and its contents with 750 and the `.lock` file with 644, this changes the expected permissions accordingly. Signed-off-by: Hannes Laimer --- pbs-datastore/src/chunk_store.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs index 38a88584..29d5874a 100644 --- a/pbs-datastore/src/chunk_store.rs +++ b/pbs-datastore/src/chunk_store.rs @@ -576,7 +576,7 @@ impl ChunkStore { Ok(stat) => { if stat.st_uid != u32::from(pbs_config::backup_user()?.uid) || stat.st_gid != u32::from(pbs_config::backup_group()?.gid) - || stat.st_mode != file_mode + || stat.st_mode & 0o777 != file_mode { bail!( "unable to open existing chunk store path {:?} - permissions or owner not correct", @@ -598,22 +598,22 @@ impl ChunkStore { /// subdirectories and the lock file. pub fn verify_chunkstore>(path: T) -> Result<(), Error> { // Check datastore root path perm/owner - ChunkStore::check_permissions(path.as_ref(), 0o700)?; + ChunkStore::check_permissions(path.as_ref(), 0o755)?; let chunk_dir = Self::chunk_dir(path.as_ref()); // Check datastore .chunks path perm/owner - ChunkStore::check_permissions(&chunk_dir, 0o700)?; + ChunkStore::check_permissions(&chunk_dir, 0o750)?; // Check all .chunks subdirectories for i in 0..64 * 1024 { let mut l1path = chunk_dir.clone(); l1path.push(format!("{:04x}", i)); - ChunkStore::check_permissions(&l1path, 0o700)?; + ChunkStore::check_permissions(&l1path, 0o750)?; } // Check .lock file let lockfile_path = Self::lockfile_path(path.as_ref()); - ChunkStore::check_permissions(lockfile_path, 0o600)?; + ChunkStore::check_permissions(lockfile_path, 0o644)?; Ok(()) } } -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 14:45:55 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 14:45:55 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 4/4] fix #5853: client: pxar: exclude stale files on metadata read In-Reply-To: <1731330267.220wgkqhtc.astroid@yuna.none> References: <20241105140153.282980-1-c.ebner@proxmox.com> <20241105140153.282980-5-c.ebner@proxmox.com> <1731330267.220wgkqhtc.astroid@yuna.none> Message-ID: <39def709-3df0-4182-8179-8be4022f0f9e@proxmox.com> On 11/11/24 14:37, Fabian Gr?nbichler wrote: > behaviour wise this seems okay to me, but if possible, I'd avoid all the > return value tuples, see detailed comments below.. Agreed, I am not a fan of passing the stale file handle error info along a the boolean in the return value as well. But unfortunately passing along the error without loosing pre-existing error context and switching all the `get_metadata` related functions to return an `Errno` is not possible as is. E.g. `process_acl` returns an `anyhow::Error` (could be defined to return an e.g. `Errno::EINVALID` instead?), special handling of `Errno::E2BIG` for the xattr case only, ... The current approach was choosen to keep the current anyhow error context close to the actual errors when they occur. > On November 5, 2024 3:01 pm, Christian Ebner wrote: >> Skip and warn the user for files which returned a stale file handle >> error while reading the metadata associated to that file. >> >> Instead of returning with an error when getting the metadata, return >> a boolean flag signaling if a stale file handle has been encountered. >> >> Link to issue in bugtracker: >> https://bugzilla.proxmox.com/show_bug.cgi?id=5853 >> >> Link to thread in community forum: >> https://forum.proxmox.com/threads/156822/ >> >> Signed-off-by: Christian Ebner >> --- >> pbs-client/src/pxar/create.rs | 100 ++++++++++++++++++++++------------ >> 1 file changed, 66 insertions(+), 34 deletions(-) >> >> diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs >> index 2a844922c..85be00db4 100644 >> --- a/pbs-client/src/pxar/create.rs >> +++ b/pbs-client/src/pxar/create.rs >> @@ -228,7 +228,7 @@ where >> let mut fs_feature_flags = Flags::from_magic(fs_magic); >> >> let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?; >> - let metadata = get_metadata( >> + let (metadata, stale_fd) = get_metadata( > > stale_fd here is not used at all.. Yes, that one should lead to a hard error as you mentioned, so should be handled accordingly. I will adapt this to be a hard error. > >> source_dir.as_raw_fd(), >> &stat, >> feature_flags & fs_feature_flags, >> @@ -744,7 +744,7 @@ impl Archiver { >> return Ok(()); >> } >> >> - let metadata = get_metadata( >> + let (metadata, stale_fd) = get_metadata( > > this one is used > >> fd.as_raw_fd(), >> stat, >> self.flags(), >> @@ -753,6 +753,11 @@ impl Archiver { >> self.skip_e2big_xattr, >> )?; >> >> + if stale_fd { >> + log::warn!("Stale filehandle encountered, skip {:?}", self.path); >> + return Ok(()); >> + } > > for this warning.. but get_metadata already logs (potentially multiple > times ;)) that things are incomplete cause of the stale filehandle, this > only adds the path context.. but here there is also an early return, not just the log... this skips over adding this entry, and any sub entries if the entry is a directory. The logging could however be moved to the get_metadata call and only be logged once, agreed. > >> + >> if self.previous_payload_index.is_none() { >> return self >> .add_entry_to_archive(encoder, &mut None, c_file_name, stat, fd, &metadata, None) >> @@ -1301,7 +1306,14 @@ impl Archiver { >> file_name: &Path, >> metadata: &Metadata, >> ) -> Result<(), Error> { >> - let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?; >> + let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) { >> + Ok(dest) => dest, >> + Err(Errno::ESTALE) => { >> + log::warn!("Stale file handle encountered, skip {file_name:?}"); >> + return Ok(()); >> + } >> + Err(err) => return Err(err.into()), >> + }; >> encoder.add_symlink(metadata, file_name, dest).await?; >> Ok(()) >> } >> @@ -1397,9 +1409,10 @@ fn get_metadata( >> fs_magic: i64, >> fs_feature_flags: &mut Flags, >> skip_e2big_xattr: bool, >> -) -> Result { >> +) -> Result<(Metadata, bool), Error> { >> // required for some of these >> let proc_path = Path::new("/proc/self/fd/").join(fd.to_string()); >> + let mut stale_fd = false; >> >> let mut meta = Metadata { >> stat: pxar::Stat { >> @@ -1412,18 +1425,27 @@ fn get_metadata( >> ..Default::default() >> }; >> >> - get_xattr_fcaps_acl( >> + if get_xattr_fcaps_acl( > > only call site, could just bubble up ESTALE As mentioned, this has 2 issues: Loss of anyhow error context for which sub-function the Errno occurred and sub-functions like `process_acl` which do not rely on ffi calls at all, returning plain `anyhow::Error`, which granted could be redefined to return an Errno. > >> &mut meta, >> fd, >> &proc_path, >> flags, >> fs_feature_flags, >> skip_e2big_xattr, >> - )?; >> - get_chattr(&mut meta, fd)?; >> + )? { >> + stale_fd = true; >> + log::warn!("Stale filehandle, xattrs incomplete"); >> + } >> + if get_chattr(&mut meta, fd)? { > > same > >> + stale_fd = true; >> + log::warn!("Stale filehandle, chattr incomplete"); >> + } >> get_fat_attr(&mut meta, fd, fs_magic)?; >> - get_quota_project_id(&mut meta, fd, flags, fs_magic)?; >> - Ok(meta) >> + if get_quota_project_id(&mut meta, fd, flags, fs_magic)? { > > same > >> + stale_fd = true; >> + log::warn!("Stale filehandle, quota project id incomplete"); >> + } > > see above and way down below, IMHO all of these could just bubble up the error.. > >> + Ok((meta, stale_fd)) >> } >> >> fn get_fcaps( >> @@ -1431,22 +1453,23 @@ fn get_fcaps( >> fd: RawFd, >> flags: Flags, >> fs_feature_flags: &mut Flags, >> -) -> Result<(), Error> { >> +) -> Result { > > this is only called by get_xattr_fcaps_acl, so could just bubble up > ESTALE as well.. > >> if !flags.contains(Flags::WITH_FCAPS) { >> - return Ok(()); >> + return Ok(false); >> } >> >> match xattr::fgetxattr(fd, xattr::XATTR_NAME_FCAPS) { >> Ok(data) => { >> meta.fcaps = Some(pxar::format::FCaps { data }); >> - Ok(()) >> + Ok(false) >> } >> - Err(Errno::ENODATA) => Ok(()), >> + Err(Errno::ENODATA) => Ok(false), >> Err(Errno::EOPNOTSUPP) => { >> fs_feature_flags.remove(Flags::WITH_FCAPS); >> - Ok(()) >> + Ok(false) >> } >> - Err(Errno::EBADF) => Ok(()), // symlinks >> + Err(Errno::EBADF) => Ok(false), // symlinks >> + Err(Errno::ESTALE) => Ok(true), >> Err(err) => Err(err).context("failed to read file capabilities"), >> } >> } >> @@ -1458,32 +1481,35 @@ fn get_xattr_fcaps_acl( >> flags: Flags, >> fs_feature_flags: &mut Flags, >> skip_e2big_xattr: bool, >> -) -> Result<(), Error> { >> +) -> Result { >> if !flags.contains(Flags::WITH_XATTRS) { >> - return Ok(()); >> + return Ok(false); >> } >> >> let xattrs = match xattr::flistxattr(fd) { >> Ok(names) => names, >> Err(Errno::EOPNOTSUPP) => { >> fs_feature_flags.remove(Flags::WITH_XATTRS); >> - return Ok(()); >> + return Ok(false); >> } >> Err(Errno::E2BIG) => { >> match skip_e2big_xattr { >> - true => return Ok(()), >> + true => return Ok(false), >> false => { >> bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); >> } >> }; >> } >> - Err(Errno::EBADF) => return Ok(()), // symlinks >> + Err(Errno::EBADF) => return Ok(false), // symlinks >> + Err(Errno::ESTALE) => return Ok(true), > > see above > >> Err(err) => return Err(err).context("failed to read xattrs"), >> }; >> >> for attr in &xattrs { >> if xattr::is_security_capability(attr) { >> - get_fcaps(meta, fd, flags, fs_feature_flags)?; >> + if get_fcaps(meta, fd, flags, fs_feature_flags)? { >> + return Ok(true); > > see above > >> + } >> continue; >> } >> >> @@ -1505,35 +1531,37 @@ fn get_xattr_fcaps_acl( >> Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either >> Err(Errno::E2BIG) => { >> match skip_e2big_xattr { >> - true => return Ok(()), >> + true => return Ok(false), >> false => { >> bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); >> } >> }; >> } >> + Err(Errno::ESTALE) => return Ok(true), // symlinks > > same here (and stray copy-paste comment I guess?) > >> Err(err) => { >> return Err(err).context(format!("error reading extended attribute {attr:?}")) >> } >> } >> } >> >> - Ok(()) >> + Ok(false) >> } >> >> -fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> { >> +fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result { >> let mut attr: libc::c_long = 0; >> >> match unsafe { fs::read_attr_fd(fd, &mut attr) } { >> Ok(_) => (), >> + Err(Errno::ESTALE) => return Ok(true), >> Err(errno) if errno_is_unsupported(errno) => { >> - return Ok(()); >> + return Ok(false); >> } >> Err(err) => return Err(err).context("failed to read file attributes"), >> } >> >> metadata.stat.flags |= Flags::from_chattr(attr).bits(); >> >> - Ok(()) >> + Ok(false) >> } >> >> fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> { >> @@ -1564,30 +1592,34 @@ fn get_quota_project_id( >> fd: RawFd, >> flags: Flags, >> magic: i64, >> -) -> Result<(), Error> { >> +) -> Result { > > see above > >> if !(metadata.is_dir() || metadata.is_regular_file()) { >> - return Ok(()); >> + return Ok(false); >> } >> >> if !flags.contains(Flags::WITH_QUOTA_PROJID) { >> - return Ok(()); >> + return Ok(false); >> } >> >> use proxmox_sys::linux::magic::*; >> >> match magic { >> EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (), >> - _ => return Ok(()), >> + _ => return Ok(false), >> } >> >> let mut fsxattr = fs::FSXAttr::default(); >> let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) }; >> >> + if let Err(Errno::ESTALE) = res { >> + return Ok(true); >> + } >> + >> // On some FUSE filesystems it can happen that ioctl is not supported. >> // For these cases projid is set to 0 while the error is ignored. >> if let Err(errno) = res { >> if errno_is_unsupported(errno) { >> - return Ok(()); >> + return Ok(false); >> } else { >> return Err(errno).context("error while reading quota project id"); >> } >> @@ -1597,7 +1629,7 @@ fn get_quota_project_id( >> if projid != 0 { >> metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid }); >> } >> - Ok(()) >> + Ok(false) >> } >> >> fn get_acl( >> @@ -1840,7 +1872,7 @@ mod tests { >> let fs_magic = detect_fs_type(dir.as_raw_fd()).unwrap(); >> let stat = nix::sys::stat::fstat(dir.as_raw_fd()).unwrap(); >> let mut fs_feature_flags = Flags::from_magic(fs_magic); >> - let metadata = get_metadata( >> + let (metadata, _) = get_metadata( > > no use of the new return value > >> dir.as_raw_fd(), >> &stat, >> fs_feature_flags, >> @@ -1937,7 +1969,7 @@ mod tests { >> let stat = nix::sys::stat::fstat(source_dir.as_raw_fd()).unwrap(); >> let mut fs_feature_flags = Flags::from_magic(fs_magic); >> >> - let metadata = get_metadata( >> + let (metadata, _) = get_metadata( > > no use either.. so wouldn't it make more sense to pass in a path and log > the context right in get_metadata? or treat the stale FD as an error, > and add the context/path as part of error handling? The first approach seems better, will however not help to differentiate the (hard) errors from the soft error ESTALE, which requires to skip over entries at the `get_metadata` call side conditionally. Returning the stale file handle error as `Anyhow::Error` also does not allow to distinguish from other (hard) errors, so again it cannot be handled as soft error at the call site. And returning all errors as `Errno` has the loss of error context issue as described above. I will see if I can cover this better by refactoring the code, as most of the helpers have a single call side, so it should be possible to reorganize without much side effects. > > the four call sites are: > - two related to tests, we can probably treat ESTALE as hard error there > - the one for obtaining the metadata of the source dir of the archive, > if that is stale we can't create an archive -> hard error as well > - adding an entry: for the stale case, we already log a warning and > proceed with the next entry, so we don't benefit from the fact that > (incomplete) metadata and the staleness is returned, as opposed to > just treating ESTALE as an error that we can "catch" and handle.. > >> source_dir.as_raw_fd(), >> &stat, >> fs_feature_flags, >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> >> > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Wed Nov 13 14:50:00 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Wed, 13 Nov 2024 14:50:00 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/2] docs: add security implications of prune and change detection mode In-Reply-To: <20241031154554.585068-1-c.ebner@proxmox.com> References: <20241031154554.585068-1-c.ebner@proxmox.com> Message-ID: <1731505125.0cl85561ct.astroid@yuna.none> On October 31, 2024 4:45 pm, Christian Ebner wrote: > Users should be made aware that the data stored in chunks outlives > the backup snapshots on pruning and that backups created using the > change-detection-mode set to metadata might reference chunks > containing files which have vanished since the previous backup, but > might still be accessible when access to the chunks raw data is > possible (client or server side). > > Signed-off-by: Christian Ebner > --- > docs/maintenance.rst | 23 +++++++++++++++++++++-- > 1 file changed, 21 insertions(+), 2 deletions(-) > > diff --git a/docs/maintenance.rst b/docs/maintenance.rst > index 4bb135e4e..b6d42ecc2 100644 > --- a/docs/maintenance.rst > +++ b/docs/maintenance.rst > @@ -6,8 +6,27 @@ Maintenance Tasks > Pruning > ------- > > -Prune lets you specify which backup snapshots you want to keep. > -The following retention options are available: > +Prune lets you specify which backup snapshots you want to keep, removing others. > +For removed backups, only the metadata associating the snapshot with the data this is a bit hard to parse (if you don't already know what it means) how about: When removing snapshots, only the snapshot metadata (manifest, indices, blobs, log and notes) is removed, the chunks containing the actual backup data referenced by the snapshot indices have to be removed by a garbage collection run. > +stored in the data chunks is removed, the actual backup data has to be removed > +by garbage collection. > + > +.. Caution:: Take into consideration that sensitive information stored in data > + chunks will outlive a pruned snapshot and remain present in the datastore as > + long as at least one backup snapshot references this data. > + > + If no longer referenced, the data remains until removed by the garbage > + collection. *Even* if no snapshot references a given chunk, it will remain.. > + > + Further, backups created using the `change-detection-mode` set to `metadata` > + might reference backup chunks containing files which have vanished since the > + previous backup, but might still be accessible when reading the chunks raw > + data is possible (client or server side). > + > + Creating a backup with `change-detection-mode` set to `data` will break this > + chain, as files will never reuse chunks partially. This is a bit unclear IMHO. if we want to give instructions on what to do when sensitive data ended up in a backup, they should be complete: - prune any snapshots made while the sensitive data was part of the backup input - if using file-based backups with change-detection-mode metadata: -- additionally prune all snapshots since the sensitive data was removed from the backup input - trigger a GC run the change-detection-mode data would break the chain, but not remove all affected snapshots. if all affected snapshots are removed, there is no need for change-detection-mode data? in fact, not using it might be better -> there might be a snapshot before the sensitive data was added to the input that can still serve as a valid baseline for metadata-using change detection? > + > +The following retention options are available for pruning: > > ``keep-last `` > Keep the last ```` backup snapshots. > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Wed Nov 13 14:50:02 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Wed, 13 Nov 2024 14:50:02 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/2] docs: deduplicate background details for garbage collection In-Reply-To: <20241031154554.585068-2-c.ebner@proxmox.com> References: <20241031154554.585068-1-c.ebner@proxmox.com> <20241031154554.585068-2-c.ebner@proxmox.com> Message-ID: <1731505655.nd5vn0ks6l.astroid@yuna.none> On October 31, 2024 4:45 pm, Christian Ebner wrote: > Currently, common details regarding garbage collection are documented > in the backup client and the maintenance task. Deduplicate this > information by moving the details to the background section of the > maintenance task and reference that section in the backup client > part. > > Signed-off-by: Christian Ebner > --- > docs/backup-client.rst | 28 ++++++++++++---------------- > docs/maintenance.rst | 35 ++++++++++++++++++++++++----------- > 2 files changed, 36 insertions(+), 27 deletions(-) > > diff --git a/docs/backup-client.rst b/docs/backup-client.rst > index e56e0625b..892be11d9 100644 > --- a/docs/backup-client.rst > +++ b/docs/backup-client.rst > @@ -789,29 +789,25 @@ Garbage Collection > ------------------ > > The ``prune`` command removes only the backup index files, not the data > -from the datastore. This task is left to the garbage collection > -command. It is recommended to carry out garbage collection on a regular basis. > +from the datastore. Deletion of unused backup data from the datastore is done by > +:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to > +schedule garbage collection tasks on a regular basis. The working principle of > +garbage collection is described in more details in the related :ref:`background > +section `. > > -The garbage collection works in two phases. In the first phase, all > -data blocks that are still in use are marked. In the second phase, > -unused data blocks are removed. > +To start garbage collection from the client side, run the following command: > + > +.. code-block:: console > + > + # proxmox-backup-client garbage-collect > > .. note:: This command needs to read all existing backup index files > and touches the complete chunk-store. This can take a long time > depending on the number of chunks and the speed of the underlying > disks. > > -.. note:: The garbage collection will only remove chunks that haven't been used > - for at least one day (exactly 24h 5m). This grace period is necessary because > - chunks in use are marked by touching the chunk which updates the ``atime`` > - (access time) property. Filesystems are mounted with the ``relatime`` option > - by default. This results in a better performance by only updating the > - ``atime`` property if the last access has been at least 24 hours ago. The > - downside is that touching a chunk within these 24 hours will not always > - update its ``atime`` property. > - > - Chunks in the grace period will be logged at the end of the garbage > - collection task as *Pending removals*. > +The progress of the garbage collection will be displayed as shown in the example > +below: > > .. code-block:: console > > diff --git a/docs/maintenance.rst b/docs/maintenance.rst > index b6d42ecc2..01c24ea7d 100644 > --- a/docs/maintenance.rst > +++ b/docs/maintenance.rst > @@ -190,6 +190,8 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up > periodically. For most setups a weekly schedule provides a good interval to > start. > > +.. _gc_background: > + > GC Background > ^^^^^^^^^^^^^ > > @@ -215,17 +217,28 @@ datastore or interfering with other backups. > The garbage collection (GC) process is performed per datastore and is split > into two phases: > > -- Phase one: Mark > - All index files are read, and the access time of the referred chunk files is > - updated. > - > -- Phase two: Sweep > - The task iterates over all chunks, checks their file access time, and if it > - is older than the cutoff time (i.e., the time when GC started, plus some > - headroom for safety and Linux file system behavior), the task knows that the > - chunk was neither referred to in any backup index nor part of any currently > - running backup that has no index to scan for. As such, the chunk can be > - safely deleted. > +- Phase one (Mark): > + > + All index files are read, and the access time (``atime``) of the referred pre-existing, but "referenced" fits better IMHO > + chunk files is updated. > + > +- Phase two (Sweep): > + > + The task iterates over all chunks and checks their file access time. If it is > + older than the cutoff time given by either 24 hours and 5 minutes after the > + start time of the garbage collection or the start time of the oldest backup > + writer instance, the garbage collection can consider the chunk as neither > + referenced by any backup index nor part of any currently running backup. > + Therefore, the chunk can be safely deleted. Should we re-order/simplify this, and first explain/define the cutoff, and then (in a separate sentence) describe how it is used? > + > + Chunks within the grace period will not be deleted and logged at the end of > + the garbage collection task as *Pending removals*. > + > +.. note:: The grace period for backup chunk removal is not arbitrary, but stems > + from the fact that filesystems are typically mounted with the ``relatime`` > + option by default. This results in better performance by only updating the > + ``atime`` property if a file has been modified since the last access or the > + last access has been at least 24 hours ago. > > Manually Starting GC > ^^^^^^^^^^^^^^^^^^^^ > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Wed Nov 13 14:55:46 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Wed, 13 Nov 2024 14:55:46 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 4/4] fix #5853: client: pxar: exclude stale files on metadata read In-Reply-To: <39def709-3df0-4182-8179-8be4022f0f9e@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> <20241105140153.282980-5-c.ebner@proxmox.com> <1731330267.220wgkqhtc.astroid@yuna.none> <39def709-3df0-4182-8179-8be4022f0f9e@proxmox.com> Message-ID: <1731506079.pge8n3f5vc.astroid@yuna.none> On November 13, 2024 2:45 pm, Christian Ebner wrote: > On 11/11/24 14:37, Fabian Gr?nbichler wrote: >> behaviour wise this seems okay to me, but if possible, I'd avoid all the >> return value tuples, see detailed comments below.. > > Agreed, I am not a fan of passing the stale file handle error info along > a the boolean in the return value as well. > > But unfortunately passing along the error without loosing pre-existing > error context and switching all the `get_metadata` related functions to > return an `Errno` is not possible as is. > > E.g. `process_acl` returns an `anyhow::Error` (could be defined to > return an e.g. `Errno::EINVALID` instead?), special handling of > `Errno::E2BIG` for the xattr case only, ... > > The current approach was choosen to keep the current anyhow error > context close to the actual errors when they occur. I think that should be solvable with some refactoring/defining of a proper error type.. but you can also attempt to downcast the anyhow error to get the ESTALE? > > On November 5, 2024 3:01 pm, Christian Ebner wrote: >>> Skip and warn the user for files which returned a stale file handle >>> error while reading the metadata associated to that file. >>> >>> Instead of returning with an error when getting the metadata, return >>> a boolean flag signaling if a stale file handle has been encountered. >>> >>> Link to issue in bugtracker: >>> https://bugzilla.proxmox.com/show_bug.cgi?id=5853 >>> >>> Link to thread in community forum: >>> https://forum.proxmox.com/threads/156822/ >>> >>> Signed-off-by: Christian Ebner >>> --- >>> pbs-client/src/pxar/create.rs | 100 ++++++++++++++++++++++------------ >>> 1 file changed, 66 insertions(+), 34 deletions(-) >>> >>> diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs >>> index 2a844922c..85be00db4 100644 >>> --- a/pbs-client/src/pxar/create.rs >>> +++ b/pbs-client/src/pxar/create.rs >>> @@ -228,7 +228,7 @@ where >>> let mut fs_feature_flags = Flags::from_magic(fs_magic); >>> >>> let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?; >>> - let metadata = get_metadata( >>> + let (metadata, stale_fd) = get_metadata( >> >> stale_fd here is not used at all.. > > Yes, that one should lead to a hard error as you mentioned, so should be > handled accordingly. I will adapt this to be a hard error. > >> >>> source_dir.as_raw_fd(), >>> &stat, >>> feature_flags & fs_feature_flags, >>> @@ -744,7 +744,7 @@ impl Archiver { >>> return Ok(()); >>> } >>> >>> - let metadata = get_metadata( >>> + let (metadata, stale_fd) = get_metadata( >> >> this one is used >> >>> fd.as_raw_fd(), >>> stat, >>> self.flags(), >>> @@ -753,6 +753,11 @@ impl Archiver { >>> self.skip_e2big_xattr, >>> )?; >>> >>> + if stale_fd { >>> + log::warn!("Stale filehandle encountered, skip {:?}", self.path); >>> + return Ok(()); >>> + } >> >> for this warning.. but get_metadata already logs (potentially multiple >> times ;)) that things are incomplete cause of the stale filehandle, this >> only adds the path context.. > > but here there is also an early return, not just the log... this skips > over adding this entry, and any sub entries if the entry is a directory. > > The logging could however be moved to the get_metadata call and only be > logged once, agreed. > >> >>> + >>> if self.previous_payload_index.is_none() { >>> return self >>> .add_entry_to_archive(encoder, &mut None, c_file_name, stat, fd, &metadata, None) >>> @@ -1301,7 +1306,14 @@ impl Archiver { >>> file_name: &Path, >>> metadata: &Metadata, >>> ) -> Result<(), Error> { >>> - let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?; >>> + let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) { >>> + Ok(dest) => dest, >>> + Err(Errno::ESTALE) => { >>> + log::warn!("Stale file handle encountered, skip {file_name:?}"); >>> + return Ok(()); >>> + } >>> + Err(err) => return Err(err.into()), >>> + }; >>> encoder.add_symlink(metadata, file_name, dest).await?; >>> Ok(()) >>> } >>> @@ -1397,9 +1409,10 @@ fn get_metadata( >>> fs_magic: i64, >>> fs_feature_flags: &mut Flags, >>> skip_e2big_xattr: bool, >>> -) -> Result { >>> +) -> Result<(Metadata, bool), Error> { >>> // required for some of these >>> let proc_path = Path::new("/proc/self/fd/").join(fd.to_string()); >>> + let mut stale_fd = false; >>> >>> let mut meta = Metadata { >>> stat: pxar::Stat { >>> @@ -1412,18 +1425,27 @@ fn get_metadata( >>> ..Default::default() >>> }; >>> >>> - get_xattr_fcaps_acl( >>> + if get_xattr_fcaps_acl( >> >> only call site, could just bubble up ESTALE > > As mentioned, this has 2 issues: Loss of anyhow error context for which > sub-function the Errno occurred and sub-functions like `process_acl` > which do not rely on ffi calls at all, returning plain `anyhow::Error`, > which granted could be redefined to return an Errno. > >> >>> &mut meta, >>> fd, >>> &proc_path, >>> flags, >>> fs_feature_flags, >>> skip_e2big_xattr, >>> - )?; >>> - get_chattr(&mut meta, fd)?; >>> + )? { >>> + stale_fd = true; >>> + log::warn!("Stale filehandle, xattrs incomplete"); >>> + } >>> + if get_chattr(&mut meta, fd)? { >> >> same >> >>> + stale_fd = true; >>> + log::warn!("Stale filehandle, chattr incomplete"); >>> + } >>> get_fat_attr(&mut meta, fd, fs_magic)?; >>> - get_quota_project_id(&mut meta, fd, flags, fs_magic)?; >>> - Ok(meta) >>> + if get_quota_project_id(&mut meta, fd, flags, fs_magic)? { >> >> same >> >>> + stale_fd = true; >>> + log::warn!("Stale filehandle, quota project id incomplete"); >>> + } >> >> see above and way down below, IMHO all of these could just bubble up the error.. >> >>> + Ok((meta, stale_fd)) >>> } >>> >>> fn get_fcaps( >>> @@ -1431,22 +1453,23 @@ fn get_fcaps( >>> fd: RawFd, >>> flags: Flags, >>> fs_feature_flags: &mut Flags, >>> -) -> Result<(), Error> { >>> +) -> Result { >> >> this is only called by get_xattr_fcaps_acl, so could just bubble up >> ESTALE as well.. >> >>> if !flags.contains(Flags::WITH_FCAPS) { >>> - return Ok(()); >>> + return Ok(false); >>> } >>> >>> match xattr::fgetxattr(fd, xattr::XATTR_NAME_FCAPS) { >>> Ok(data) => { >>> meta.fcaps = Some(pxar::format::FCaps { data }); >>> - Ok(()) >>> + Ok(false) >>> } >>> - Err(Errno::ENODATA) => Ok(()), >>> + Err(Errno::ENODATA) => Ok(false), >>> Err(Errno::EOPNOTSUPP) => { >>> fs_feature_flags.remove(Flags::WITH_FCAPS); >>> - Ok(()) >>> + Ok(false) >>> } >>> - Err(Errno::EBADF) => Ok(()), // symlinks >>> + Err(Errno::EBADF) => Ok(false), // symlinks >>> + Err(Errno::ESTALE) => Ok(true), >>> Err(err) => Err(err).context("failed to read file capabilities"), >>> } >>> } >>> @@ -1458,32 +1481,35 @@ fn get_xattr_fcaps_acl( >>> flags: Flags, >>> fs_feature_flags: &mut Flags, >>> skip_e2big_xattr: bool, >>> -) -> Result<(), Error> { >>> +) -> Result { >>> if !flags.contains(Flags::WITH_XATTRS) { >>> - return Ok(()); >>> + return Ok(false); >>> } >>> >>> let xattrs = match xattr::flistxattr(fd) { >>> Ok(names) => names, >>> Err(Errno::EOPNOTSUPP) => { >>> fs_feature_flags.remove(Flags::WITH_XATTRS); >>> - return Ok(()); >>> + return Ok(false); >>> } >>> Err(Errno::E2BIG) => { >>> match skip_e2big_xattr { >>> - true => return Ok(()), >>> + true => return Ok(false), >>> false => { >>> bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); >>> } >>> }; >>> } >>> - Err(Errno::EBADF) => return Ok(()), // symlinks >>> + Err(Errno::EBADF) => return Ok(false), // symlinks >>> + Err(Errno::ESTALE) => return Ok(true), >> >> see above >> >>> Err(err) => return Err(err).context("failed to read xattrs"), >>> }; >>> >>> for attr in &xattrs { >>> if xattr::is_security_capability(attr) { >>> - get_fcaps(meta, fd, flags, fs_feature_flags)?; >>> + if get_fcaps(meta, fd, flags, fs_feature_flags)? { >>> + return Ok(true); >> >> see above >> >>> + } >>> continue; >>> } >>> >>> @@ -1505,35 +1531,37 @@ fn get_xattr_fcaps_acl( >>> Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either >>> Err(Errno::E2BIG) => { >>> match skip_e2big_xattr { >>> - true => return Ok(()), >>> + true => return Ok(false), >>> false => { >>> bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); >>> } >>> }; >>> } >>> + Err(Errno::ESTALE) => return Ok(true), // symlinks >> >> same here (and stray copy-paste comment I guess?) >> >>> Err(err) => { >>> return Err(err).context(format!("error reading extended attribute {attr:?}")) >>> } >>> } >>> } >>> >>> - Ok(()) >>> + Ok(false) >>> } >>> >>> -fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> { >>> +fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result { >>> let mut attr: libc::c_long = 0; >>> >>> match unsafe { fs::read_attr_fd(fd, &mut attr) } { >>> Ok(_) => (), >>> + Err(Errno::ESTALE) => return Ok(true), >>> Err(errno) if errno_is_unsupported(errno) => { >>> - return Ok(()); >>> + return Ok(false); >>> } >>> Err(err) => return Err(err).context("failed to read file attributes"), >>> } >>> >>> metadata.stat.flags |= Flags::from_chattr(attr).bits(); >>> >>> - Ok(()) >>> + Ok(false) >>> } >>> >>> fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> { >>> @@ -1564,30 +1592,34 @@ fn get_quota_project_id( >>> fd: RawFd, >>> flags: Flags, >>> magic: i64, >>> -) -> Result<(), Error> { >>> +) -> Result { >> >> see above >> >>> if !(metadata.is_dir() || metadata.is_regular_file()) { >>> - return Ok(()); >>> + return Ok(false); >>> } >>> >>> if !flags.contains(Flags::WITH_QUOTA_PROJID) { >>> - return Ok(()); >>> + return Ok(false); >>> } >>> >>> use proxmox_sys::linux::magic::*; >>> >>> match magic { >>> EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (), >>> - _ => return Ok(()), >>> + _ => return Ok(false), >>> } >>> >>> let mut fsxattr = fs::FSXAttr::default(); >>> let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) }; >>> >>> + if let Err(Errno::ESTALE) = res { >>> + return Ok(true); >>> + } >>> + >>> // On some FUSE filesystems it can happen that ioctl is not supported. >>> // For these cases projid is set to 0 while the error is ignored. >>> if let Err(errno) = res { >>> if errno_is_unsupported(errno) { >>> - return Ok(()); >>> + return Ok(false); >>> } else { >>> return Err(errno).context("error while reading quota project id"); >>> } >>> @@ -1597,7 +1629,7 @@ fn get_quota_project_id( >>> if projid != 0 { >>> metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid }); >>> } >>> - Ok(()) >>> + Ok(false) >>> } >>> >>> fn get_acl( >>> @@ -1840,7 +1872,7 @@ mod tests { >>> let fs_magic = detect_fs_type(dir.as_raw_fd()).unwrap(); >>> let stat = nix::sys::stat::fstat(dir.as_raw_fd()).unwrap(); >>> let mut fs_feature_flags = Flags::from_magic(fs_magic); >>> - let metadata = get_metadata( >>> + let (metadata, _) = get_metadata( >> >> no use of the new return value >> >>> dir.as_raw_fd(), >>> &stat, >>> fs_feature_flags, >>> @@ -1937,7 +1969,7 @@ mod tests { >>> let stat = nix::sys::stat::fstat(source_dir.as_raw_fd()).unwrap(); >>> let mut fs_feature_flags = Flags::from_magic(fs_magic); >>> >>> - let metadata = get_metadata( >>> + let (metadata, _) = get_metadata( >> >> no use either.. so wouldn't it make more sense to pass in a path and log >> the context right in get_metadata? or treat the stale FD as an error, >> and add the context/path as part of error handling? > > The first approach seems better, will however not help to differentiate > the (hard) errors from the soft error ESTALE, which requires to skip > over entries at the `get_metadata` call side conditionally. > > Returning the stale file handle error as `Anyhow::Error` also does not > allow to distinguish from other (hard) errors, so again it cannot be > handled as soft error at the call site. > > And returning all errors as `Errno` has the loss of error context issue > as described above. > > I will see if I can cover this better by refactoring the code, as most > of the helpers have a single call side, so it should be possible to > reorganize without much side effects. > >> >> the four call sites are: >> - two related to tests, we can probably treat ESTALE as hard error there >> - the one for obtaining the metadata of the source dir of the archive, >> if that is stale we can't create an archive -> hard error as well >> - adding an entry: for the stale case, we already log a warning and >> proceed with the next entry, so we don't benefit from the fact that >> (incomplete) metadata and the staleness is returned, as opposed to >> just treating ESTALE as an error that we can "catch" and handle.. >> >>> source_dir.as_raw_fd(), >>> &stat, >>> fs_feature_flags, >>> -- >>> 2.39.5 >>> >>> >>> >>> _______________________________________________ >>> pbs-devel mailing list >>> pbs-devel at lists.proxmox.com >>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >>> >>> >>> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> > > From c.ebner at proxmox.com Wed Nov 13 15:04:47 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 15:04:47 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 4/4] fix #5853: client: pxar: exclude stale files on metadata read In-Reply-To: <1731506079.pge8n3f5vc.astroid@yuna.none> References: <20241105140153.282980-1-c.ebner@proxmox.com> <20241105140153.282980-5-c.ebner@proxmox.com> <1731330267.220wgkqhtc.astroid@yuna.none> <39def709-3df0-4182-8179-8be4022f0f9e@proxmox.com> <1731506079.pge8n3f5vc.astroid@yuna.none> Message-ID: <5e99d832-50b3-475a-a886-ff83f0c007a1@proxmox.com> On 11/13/24 14:55, Fabian Gr?nbichler wrote: > On November 13, 2024 2:45 pm, Christian Ebner wrote: >> On 11/11/24 14:37, Fabian Gr?nbichler wrote: >>> behaviour wise this seems okay to me, but if possible, I'd avoid all the >>> return value tuples, see detailed comments below.. >> >> Agreed, I am not a fan of passing the stale file handle error info along >> a the boolean in the return value as well. >> >> But unfortunately passing along the error without loosing pre-existing >> error context and switching all the `get_metadata` related functions to >> return an `Errno` is not possible as is. >> >> E.g. `process_acl` returns an `anyhow::Error` (could be defined to >> return an e.g. `Errno::EINVALID` instead?), special handling of >> `Errno::E2BIG` for the xattr case only, ... >> >> The current approach was choosen to keep the current anyhow error >> context close to the actual errors when they occur. > > I think that should be solvable with some refactoring/defining of a > proper error type.. > > but you can also attempt to downcast the anyhow error to get the ESTALE? Ah true, thanks for the pointer! I will send a new version of the patches incorporating your feedback. > >> > On November 5, 2024 3:01 pm, Christian Ebner wrote: >>>> Skip and warn the user for files which returned a stale file handle >>>> error while reading the metadata associated to that file. >>>> >>>> Instead of returning with an error when getting the metadata, return >>>> a boolean flag signaling if a stale file handle has been encountered. >>>> >>>> Link to issue in bugtracker: >>>> https://bugzilla.proxmox.com/show_bug.cgi?id=5853 >>>> >>>> Link to thread in community forum: >>>> https://forum.proxmox.com/threads/156822/ >>>> >>>> Signed-off-by: Christian Ebner >>>> --- >>>> pbs-client/src/pxar/create.rs | 100 ++++++++++++++++++++++------------ >>>> 1 file changed, 66 insertions(+), 34 deletions(-) >>>> >>>> diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs >>>> index 2a844922c..85be00db4 100644 >>>> --- a/pbs-client/src/pxar/create.rs >>>> +++ b/pbs-client/src/pxar/create.rs >>>> @@ -228,7 +228,7 @@ where >>>> let mut fs_feature_flags = Flags::from_magic(fs_magic); >>>> >>>> let stat = nix::sys::stat::fstat(source_dir.as_raw_fd())?; >>>> - let metadata = get_metadata( >>>> + let (metadata, stale_fd) = get_metadata( >>> >>> stale_fd here is not used at all.. >> >> Yes, that one should lead to a hard error as you mentioned, so should be >> handled accordingly. I will adapt this to be a hard error. >> >>> >>>> source_dir.as_raw_fd(), >>>> &stat, >>>> feature_flags & fs_feature_flags, >>>> @@ -744,7 +744,7 @@ impl Archiver { >>>> return Ok(()); >>>> } >>>> >>>> - let metadata = get_metadata( >>>> + let (metadata, stale_fd) = get_metadata( >>> >>> this one is used >>> >>>> fd.as_raw_fd(), >>>> stat, >>>> self.flags(), >>>> @@ -753,6 +753,11 @@ impl Archiver { >>>> self.skip_e2big_xattr, >>>> )?; >>>> >>>> + if stale_fd { >>>> + log::warn!("Stale filehandle encountered, skip {:?}", self.path); >>>> + return Ok(()); >>>> + } >>> >>> for this warning.. but get_metadata already logs (potentially multiple >>> times ;)) that things are incomplete cause of the stale filehandle, this >>> only adds the path context.. >> >> but here there is also an early return, not just the log... this skips >> over adding this entry, and any sub entries if the entry is a directory. >> >> The logging could however be moved to the get_metadata call and only be >> logged once, agreed. >> >>> >>>> + >>>> if self.previous_payload_index.is_none() { >>>> return self >>>> .add_entry_to_archive(encoder, &mut None, c_file_name, stat, fd, &metadata, None) >>>> @@ -1301,7 +1306,14 @@ impl Archiver { >>>> file_name: &Path, >>>> metadata: &Metadata, >>>> ) -> Result<(), Error> { >>>> - let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?; >>>> + let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) { >>>> + Ok(dest) => dest, >>>> + Err(Errno::ESTALE) => { >>>> + log::warn!("Stale file handle encountered, skip {file_name:?}"); >>>> + return Ok(()); >>>> + } >>>> + Err(err) => return Err(err.into()), >>>> + }; >>>> encoder.add_symlink(metadata, file_name, dest).await?; >>>> Ok(()) >>>> } >>>> @@ -1397,9 +1409,10 @@ fn get_metadata( >>>> fs_magic: i64, >>>> fs_feature_flags: &mut Flags, >>>> skip_e2big_xattr: bool, >>>> -) -> Result { >>>> +) -> Result<(Metadata, bool), Error> { >>>> // required for some of these >>>> let proc_path = Path::new("/proc/self/fd/").join(fd.to_string()); >>>> + let mut stale_fd = false; >>>> >>>> let mut meta = Metadata { >>>> stat: pxar::Stat { >>>> @@ -1412,18 +1425,27 @@ fn get_metadata( >>>> ..Default::default() >>>> }; >>>> >>>> - get_xattr_fcaps_acl( >>>> + if get_xattr_fcaps_acl( >>> >>> only call site, could just bubble up ESTALE >> >> As mentioned, this has 2 issues: Loss of anyhow error context for which >> sub-function the Errno occurred and sub-functions like `process_acl` >> which do not rely on ffi calls at all, returning plain `anyhow::Error`, >> which granted could be redefined to return an Errno. >> >>> >>>> &mut meta, >>>> fd, >>>> &proc_path, >>>> flags, >>>> fs_feature_flags, >>>> skip_e2big_xattr, >>>> - )?; >>>> - get_chattr(&mut meta, fd)?; >>>> + )? { >>>> + stale_fd = true; >>>> + log::warn!("Stale filehandle, xattrs incomplete"); >>>> + } >>>> + if get_chattr(&mut meta, fd)? { >>> >>> same >>> >>>> + stale_fd = true; >>>> + log::warn!("Stale filehandle, chattr incomplete"); >>>> + } >>>> get_fat_attr(&mut meta, fd, fs_magic)?; >>>> - get_quota_project_id(&mut meta, fd, flags, fs_magic)?; >>>> - Ok(meta) >>>> + if get_quota_project_id(&mut meta, fd, flags, fs_magic)? { >>> >>> same >>> >>>> + stale_fd = true; >>>> + log::warn!("Stale filehandle, quota project id incomplete"); >>>> + } >>> >>> see above and way down below, IMHO all of these could just bubble up the error.. >>> >>>> + Ok((meta, stale_fd)) >>>> } >>>> >>>> fn get_fcaps( >>>> @@ -1431,22 +1453,23 @@ fn get_fcaps( >>>> fd: RawFd, >>>> flags: Flags, >>>> fs_feature_flags: &mut Flags, >>>> -) -> Result<(), Error> { >>>> +) -> Result { >>> >>> this is only called by get_xattr_fcaps_acl, so could just bubble up >>> ESTALE as well.. >>> >>>> if !flags.contains(Flags::WITH_FCAPS) { >>>> - return Ok(()); >>>> + return Ok(false); >>>> } >>>> >>>> match xattr::fgetxattr(fd, xattr::XATTR_NAME_FCAPS) { >>>> Ok(data) => { >>>> meta.fcaps = Some(pxar::format::FCaps { data }); >>>> - Ok(()) >>>> + Ok(false) >>>> } >>>> - Err(Errno::ENODATA) => Ok(()), >>>> + Err(Errno::ENODATA) => Ok(false), >>>> Err(Errno::EOPNOTSUPP) => { >>>> fs_feature_flags.remove(Flags::WITH_FCAPS); >>>> - Ok(()) >>>> + Ok(false) >>>> } >>>> - Err(Errno::EBADF) => Ok(()), // symlinks >>>> + Err(Errno::EBADF) => Ok(false), // symlinks >>>> + Err(Errno::ESTALE) => Ok(true), >>>> Err(err) => Err(err).context("failed to read file capabilities"), >>>> } >>>> } >>>> @@ -1458,32 +1481,35 @@ fn get_xattr_fcaps_acl( >>>> flags: Flags, >>>> fs_feature_flags: &mut Flags, >>>> skip_e2big_xattr: bool, >>>> -) -> Result<(), Error> { >>>> +) -> Result { >>>> if !flags.contains(Flags::WITH_XATTRS) { >>>> - return Ok(()); >>>> + return Ok(false); >>>> } >>>> >>>> let xattrs = match xattr::flistxattr(fd) { >>>> Ok(names) => names, >>>> Err(Errno::EOPNOTSUPP) => { >>>> fs_feature_flags.remove(Flags::WITH_XATTRS); >>>> - return Ok(()); >>>> + return Ok(false); >>>> } >>>> Err(Errno::E2BIG) => { >>>> match skip_e2big_xattr { >>>> - true => return Ok(()), >>>> + true => return Ok(false), >>>> false => { >>>> bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); >>>> } >>>> }; >>>> } >>>> - Err(Errno::EBADF) => return Ok(()), // symlinks >>>> + Err(Errno::EBADF) => return Ok(false), // symlinks >>>> + Err(Errno::ESTALE) => return Ok(true), >>> >>> see above >>> >>>> Err(err) => return Err(err).context("failed to read xattrs"), >>>> }; >>>> >>>> for attr in &xattrs { >>>> if xattr::is_security_capability(attr) { >>>> - get_fcaps(meta, fd, flags, fs_feature_flags)?; >>>> + if get_fcaps(meta, fd, flags, fs_feature_flags)? { >>>> + return Ok(true); >>> >>> see above >>> >>>> + } >>>> continue; >>>> } >>>> >>>> @@ -1505,35 +1531,37 @@ fn get_xattr_fcaps_acl( >>>> Err(Errno::EBADF) => (), // symlinks, shouldn't be able to reach this either >>>> Err(Errno::E2BIG) => { >>>> match skip_e2big_xattr { >>>> - true => return Ok(()), >>>> + true => return Ok(false), >>>> false => { >>>> bail!("{} (try --skip-e2big-xattr)", Errno::E2BIG.to_string()); >>>> } >>>> }; >>>> } >>>> + Err(Errno::ESTALE) => return Ok(true), // symlinks >>> >>> same here (and stray copy-paste comment I guess?) >>> >>>> Err(err) => { >>>> return Err(err).context(format!("error reading extended attribute {attr:?}")) >>>> } >>>> } >>>> } >>>> >>>> - Ok(()) >>>> + Ok(false) >>>> } >>>> >>>> -fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result<(), Error> { >>>> +fn get_chattr(metadata: &mut Metadata, fd: RawFd) -> Result { >>>> let mut attr: libc::c_long = 0; >>>> >>>> match unsafe { fs::read_attr_fd(fd, &mut attr) } { >>>> Ok(_) => (), >>>> + Err(Errno::ESTALE) => return Ok(true), >>>> Err(errno) if errno_is_unsupported(errno) => { >>>> - return Ok(()); >>>> + return Ok(false); >>>> } >>>> Err(err) => return Err(err).context("failed to read file attributes"), >>>> } >>>> >>>> metadata.stat.flags |= Flags::from_chattr(attr).bits(); >>>> >>>> - Ok(()) >>>> + Ok(false) >>>> } >>>> >>>> fn get_fat_attr(metadata: &mut Metadata, fd: RawFd, fs_magic: i64) -> Result<(), Error> { >>>> @@ -1564,30 +1592,34 @@ fn get_quota_project_id( >>>> fd: RawFd, >>>> flags: Flags, >>>> magic: i64, >>>> -) -> Result<(), Error> { >>>> +) -> Result { >>> >>> see above >>> >>>> if !(metadata.is_dir() || metadata.is_regular_file()) { >>>> - return Ok(()); >>>> + return Ok(false); >>>> } >>>> >>>> if !flags.contains(Flags::WITH_QUOTA_PROJID) { >>>> - return Ok(()); >>>> + return Ok(false); >>>> } >>>> >>>> use proxmox_sys::linux::magic::*; >>>> >>>> match magic { >>>> EXT4_SUPER_MAGIC | XFS_SUPER_MAGIC | FUSE_SUPER_MAGIC | ZFS_SUPER_MAGIC => (), >>>> - _ => return Ok(()), >>>> + _ => return Ok(false), >>>> } >>>> >>>> let mut fsxattr = fs::FSXAttr::default(); >>>> let res = unsafe { fs::fs_ioc_fsgetxattr(fd, &mut fsxattr) }; >>>> >>>> + if let Err(Errno::ESTALE) = res { >>>> + return Ok(true); >>>> + } >>>> + >>>> // On some FUSE filesystems it can happen that ioctl is not supported. >>>> // For these cases projid is set to 0 while the error is ignored. >>>> if let Err(errno) = res { >>>> if errno_is_unsupported(errno) { >>>> - return Ok(()); >>>> + return Ok(false); >>>> } else { >>>> return Err(errno).context("error while reading quota project id"); >>>> } >>>> @@ -1597,7 +1629,7 @@ fn get_quota_project_id( >>>> if projid != 0 { >>>> metadata.quota_project_id = Some(pxar::format::QuotaProjectId { projid }); >>>> } >>>> - Ok(()) >>>> + Ok(false) >>>> } >>>> >>>> fn get_acl( >>>> @@ -1840,7 +1872,7 @@ mod tests { >>>> let fs_magic = detect_fs_type(dir.as_raw_fd()).unwrap(); >>>> let stat = nix::sys::stat::fstat(dir.as_raw_fd()).unwrap(); >>>> let mut fs_feature_flags = Flags::from_magic(fs_magic); >>>> - let metadata = get_metadata( >>>> + let (metadata, _) = get_metadata( >>> >>> no use of the new return value >>> >>>> dir.as_raw_fd(), >>>> &stat, >>>> fs_feature_flags, >>>> @@ -1937,7 +1969,7 @@ mod tests { >>>> let stat = nix::sys::stat::fstat(source_dir.as_raw_fd()).unwrap(); >>>> let mut fs_feature_flags = Flags::from_magic(fs_magic); >>>> >>>> - let metadata = get_metadata( >>>> + let (metadata, _) = get_metadata( >>> >>> no use either.. so wouldn't it make more sense to pass in a path and log >>> the context right in get_metadata? or treat the stale FD as an error, >>> and add the context/path as part of error handling? >> >> The first approach seems better, will however not help to differentiate >> the (hard) errors from the soft error ESTALE, which requires to skip >> over entries at the `get_metadata` call side conditionally. >> >> Returning the stale file handle error as `Anyhow::Error` also does not >> allow to distinguish from other (hard) errors, so again it cannot be >> handled as soft error at the call site. >> >> And returning all errors as `Errno` has the loss of error context issue >> as described above. >> >> I will see if I can cover this better by refactoring the code, as most >> of the helpers have a single call side, so it should be possible to >> reorganize without much side effects. >> >>> >>> the four call sites are: >>> - two related to tests, we can probably treat ESTALE as hard error there >>> - the one for obtaining the metadata of the source dir of the archive, >>> if that is stale we can't create an archive -> hard error as well >>> - adding an entry: for the stale case, we already log a warning and >>> proceed with the next entry, so we don't benefit from the fact that >>> (incomplete) metadata and the staleness is returned, as opposed to >>> just treating ESTALE as an error that we can "catch" and handle.. >>> >>>> source_dir.as_raw_fd(), >>>> &stat, >>>> fs_feature_flags, >>>> -- >>>> 2.39.5 >>>> >>>> >>>> >>>> _______________________________________________ >>>> pbs-devel mailing list >>>> pbs-devel at lists.proxmox.com >>>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >>>> >>>> >>>> >>> >>> >>> _______________________________________________ >>> pbs-devel mailing list >>> pbs-devel at lists.proxmox.com >>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >>> >>> >> >> From g.goller at proxmox.com Wed Nov 13 15:20:54 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Wed, 13 Nov 2024 15:20:54 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] chunk_store: fix problem with permission checking In-Reply-To: <20241113124047.97456-1-h.laimer@proxmox.com> References: <20241113124047.97456-1-h.laimer@proxmox.com> Message-ID: On 13.11.2024 13:40, Hannes Laimer wrote: >Permissions are stored in the lower 9 bits (rwxrwxrwx), >so we have to mask `st_mode` with 0o777. >The datastore root dir is created with 755, the `.chunks` dir and its >contents with 750 and the `.lock` file with 644, this changes the >expected permissions accordingly. Oops, this is my bad, I missed this. Matching the whole st_mode exactly would be nice, but not so practical as we would need to be generic over file/dir and symbolic link. Also CC'ing @Wolfgang as he persuaded me to match exactly in the first place :) Consider: Fixes: 6e101ff75777 ("fix #5439: allow to reuse existing datastore") Reviewed-By: Gabriel Goller From w.bumiller at proxmox.com Wed Nov 13 15:42:47 2024 From: w.bumiller at proxmox.com (Wolfgang Bumiller) Date: Wed, 13 Nov 2024 15:42:47 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] chunk_store: fix problem with permission checking In-Reply-To: References: <20241113124047.97456-1-h.laimer@proxmox.com> Message-ID: On Wed, Nov 13, 2024 at 03:20:54PM GMT, Gabriel Goller wrote: > On 13.11.2024 13:40, Hannes Laimer wrote: > > Permissions are stored in the lower 9 bits (rwxrwxrwx), > > so we have to mask `st_mode` with 0o777. > > The datastore root dir is created with 755, the `.chunks` dir and its > > contents with 750 and the `.lock` file with 644, this changes the > > expected permissions accordingly. > > Oops, this is my bad, I missed this. > > Matching the whole st_mode exactly would be nice, but not so practical > as we would need to be generic over file/dir and symbolic link. > > Also CC'ing @Wolfgang as he persuaded me to match exactly in the first > place :) Ah yes, would have had to include the mode bits... But it seems the actual permissions were wrong as well? (Not sure if I mentioned this, but I'm not convinced we should *fail* on unexpected permissions, I mean, we're already changing the values in the check now ?) > Consider: > > Fixes: 6e101ff75777 ("fix #5439: allow to reuse existing datastore") > Reviewed-By: Gabriel Goller From h.laimer at proxmox.com Wed Nov 13 16:00:40 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:40 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 04/26] maintenance: add 'Unmount' maintenance type In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-5-h.laimer@proxmox.com> From: Dietmar Maurer Signed-off-by: Dietmar Maurer Signed-off-by: Hannes Laimer --- I don't remember exactly, but ITRC this is only part of Dietmar's original patch. Not sure if S-o by me makes sense in that case. pbs-api-types/src/datastore.rs | 3 +++ pbs-api-types/src/maintenance.rs | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index f6c255d3..888f5d5b 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -404,6 +404,9 @@ impl DataStoreConfig { match current_type { Some(MaintenanceType::ReadOnly) => { /* always OK */ } Some(MaintenanceType::Offline) => { /* always OK */ } + Some(MaintenanceType::Unmount) => { + bail!("datastore is being unmounted"); + } Some(MaintenanceType::Delete) => { match new_type { Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ } diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 1e3413dc..fd4d3416 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -38,7 +38,6 @@ pub enum Operation { /// Maintenance type. pub enum MaintenanceType { // TODO: - // - Add "unmounting" once we got pluggable datastores // - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate // operation, so that one can enable a mode where nothing new can be added but stuff can be // cleaned @@ -48,6 +47,8 @@ pub enum MaintenanceType { Offline, /// The datastore is being deleted. Delete, + /// The (removable) datastore is being unmounted. + Unmount, } serde_plain::derive_display_from_serialize!(MaintenanceType); serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); @@ -94,6 +95,8 @@ impl MaintenanceMode { if let Some(Operation::Lookup) = operation { return Ok(()); + } else if self.ty == MaintenanceType::Unmount { + bail!("datastore is being unmounted"); } else if self.ty == MaintenanceType::Offline { bail!("offline maintenance mode: {}", message); } else if self.ty == MaintenanceType::ReadOnly { -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:38 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:38 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 02/26] config: factor out method to get the absolute datastore path In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-3-h.laimer@proxmox.com> From: Dietmar Maurer removable datastores will have a PBS-managed mountpoint as path, direct access to the field needs to be replaced with a helper that can account for this. Signed-off-by: Hannes Laimer --- changes since v12: * just commit msg pbs-api-types/src/datastore.rs | 5 +++++ pbs-datastore/src/datastore.rs | 11 +++++++---- src/api2/node/disks/directory.rs | 4 ++-- src/server/metric_collection/mod.rs | 8 ++++++-- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 31767417..a5704c93 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -357,6 +357,11 @@ impl DataStoreConfig { } } + /// Returns the absolute path to the datastore content. + pub fn absolute_path(&self) -> String { + self.path.clone() + } + pub fn get_maintenance_mode(&self) -> Option { self.maintenance_mode.as_ref().and_then(|str| { MaintenanceMode::deserialize(proxmox_schema::de::SchemaDeserializer::new( diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index d0f3c53a..fb37bd5a 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -178,7 +178,7 @@ impl DataStore { )?; Arc::new(ChunkStore::open( name, - &config.path, + config.absolute_path(), tuning.sync_level.unwrap_or_default(), )?) }; @@ -262,8 +262,11 @@ impl DataStore { DatastoreTuning::API_SCHEMA .parse_property_string(config.tuning.as_deref().unwrap_or(""))?, )?; - let chunk_store = - ChunkStore::open(&name, &config.path, tuning.sync_level.unwrap_or_default())?; + let chunk_store = ChunkStore::open( + &name, + config.absolute_path(), + tuning.sync_level.unwrap_or_default(), + )?; let inner = Arc::new(Self::with_store_and_config( Arc::new(chunk_store), config, @@ -1387,7 +1390,7 @@ impl DataStore { bail!("datastore is currently in use"); } - let base = PathBuf::from(&datastore_config.path); + let base = PathBuf::from(datastore_config.absolute_path()); let mut ok = true; if destroy_data { diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 06ad5ba1..7f540220 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -249,12 +249,12 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> { let (config, _) = pbs_config::datastore::config()?; let datastores: Vec = config.convert_to_typed_array("datastore")?; let conflicting_datastore: Option = - datastores.into_iter().find(|ds| ds.path == path); + datastores.into_iter().find(|ds| ds.absolute_path() == path); if let Some(conflicting_datastore) = conflicting_datastore { bail!( "Can't remove '{}' since it's required by datastore '{}'", - conflicting_datastore.path, + conflicting_datastore.absolute_path(), conflicting_datastore.name ); } diff --git a/src/server/metric_collection/mod.rs b/src/server/metric_collection/mod.rs index 3cbd7425..b95dba20 100644 --- a/src/server/metric_collection/mod.rs +++ b/src/server/metric_collection/mod.rs @@ -175,8 +175,12 @@ fn collect_disk_stats_sync() -> (DiskStat, Vec) { { continue; } - let path = Path::new(&config.path); - datastores.push(gather_disk_stats(disk_manager.clone(), path, &config.name)); + + datastores.push(gather_disk_stats( + disk_manager.clone(), + Path::new(&config.absolute_path()), + &config.name, + )); } } Err(err) => { -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:51 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:51 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 15/26] ui: add removable datastore creation support In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-16-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/window/DataStoreEdit.js | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js index b8e866df..7b6aff1e 100644 --- a/www/window/DataStoreEdit.js +++ b/www/window/DataStoreEdit.js @@ -63,6 +63,20 @@ Ext.define('PBS.DataStoreEdit', { emptyText: gettext('An absolute path'), validator: val => val?.trim() !== '/', }, + { + xtype: 'pmxDisplayEditField', + fieldLabel: gettext('Device'), + name: 'backing-device', + disabled: true, + cbind: { + editable: '{isCreate}', + }, + editConfig: { + xtype: 'pbsPartitionSelector', + allowBlank: true, + }, + emptyText: gettext('Device path'), + }, ], column2: [ { @@ -88,6 +102,29 @@ Ext.define('PBS.DataStoreEdit', { }, ], columnB: [ + { + xtype: 'checkbox', + boxLabel: gettext('Removable datastore'), + submitValue: false, + listeners: { + change: function(checkbox, isRemovable) { + let inputPanel = checkbox.up('inputpanel'); + let pathField = inputPanel.down('[name=path]'); + let uuidField = inputPanel.down('pbsPartitionSelector[name=backing-device]'); + let uuidEditField = inputPanel.down('[name=backing-device]'); + + uuidField.allowBlank = !isRemovable; + uuidEditField.setDisabled(!isRemovable); + uuidField.setDisabled(!isRemovable); + uuidField.setValue(''); + if (isRemovable) { + pathField.setFieldLabel(gettext('On device path')); + } else { + pathField.setFieldLabel(gettext('Backing Path')); + } + }, + }, + }, { xtype: 'textfield', name: 'comment', -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:44 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:44 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 08/26] api: removable datastore creation In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-9-h.laimer@proxmox.com> Devices can contains multiple datastores, the only limitations is that they are not allowed to be nested. If the specified path already contains a datastore, `reuse datastore` has to be set so it'll be added without creating a chunckstore. Signed-off-by: Hannes Laimer --- changes since v12: * use recently added 'reuse datastore' * allow creation even if device is already used by datastore, just no nesting src/api2/config/datastore.rs | 50 +++++++++++++++++++++++++++++++----- 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 374c302f..9140a7a4 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -20,7 +20,8 @@ use pbs_config::BackupLockGuard; use pbs_datastore::chunk_store::ChunkStore; use crate::api2::admin::{ - prune::list_prune_jobs, sync::list_sync_jobs, verify::list_verification_jobs, + datastore::do_mount_device, prune::list_prune_jobs, sync::list_sync_jobs, + verify::list_verification_jobs, }; use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; use crate::api2::config::sync::delete_sync_job; @@ -31,6 +32,7 @@ use pbs_config::CachedUserInfo; use proxmox_rest_server::WorkerTask; use crate::server::jobstate; +use crate::tools::disks::unmount_by_mountpoint; #[api( input: { @@ -72,7 +74,11 @@ pub(crate) fn do_create_datastore( datastore: DataStoreConfig, reuse_datastore: bool, ) -> Result<(), Error> { - let path: PathBuf = datastore.path.clone().into(); + let path: PathBuf = datastore.absolute_path().into(); + let need_unmount = datastore.get_mount_point().is_some() && { + do_mount_device(datastore.clone())?; + true + }; if path.parent().is_none() { bail!("cannot create datastore in root path"); @@ -84,24 +90,32 @@ pub(crate) fn do_create_datastore( )?; if reuse_datastore { - ChunkStore::verify_chunkstore(&path)?; + if let Err(e) = ChunkStore::verify_chunkstore(&path) { + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); + return Err(e); + } } else { if let Ok(dir) = std::fs::read_dir(&path) { for file in dir { let name = file?.file_name(); if !name.to_str().map_or(false, |name| name.starts_with('.')) { + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); bail!("datastore path is not empty"); } } } let backup_user = pbs_config::backup_user()?; - let _store = ChunkStore::create( + let res = ChunkStore::create( &datastore.name, - path, + path.clone(), backup_user.uid, backup_user.gid, tuning.sync_level.unwrap_or_default(), - )?; + ); + if let Err(e) = res { + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); + return Err(e); + } } config.set_data(&datastore.name, "datastore", &datastore)?; @@ -145,6 +159,30 @@ pub fn create_datastore( param_bail!("name", "datastore '{}' already exists.", config.name); } + if !config.path.starts_with("/") { + param_bail!("path", "expected an abolute path, '{}' is not", config.path); + } + + if let Some(uuid) = &config.backing_device { + for (store_name, (_, store_config)) in §ion_config.sections { + if let (Some(store_uuid), Some(store_path)) = ( + store_config["backing-device"].as_str(), + store_config["path"].as_str(), + ) { + // We don't allow two datastores to be nested in each other, so if + // ds1: /a/b -> can't create new one at /, /a or /a/b/..., /a/c is fine + if store_uuid == uuid + && (store_path.starts_with(&config.path) || config.path.starts_with(store_path)) + { + param_bail!( + "path", + "can't nest datastores, '{store_name}' already in '{store_path}'", + ); + } + }; + } + } + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:50 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:50 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 14/26] ui: add partition selector form In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-15-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Makefile | 1 + www/form/PartitionSelector.js | 81 +++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 www/form/PartitionSelector.js diff --git a/www/Makefile b/www/Makefile index 609a0ba6..45adfc54 100644 --- a/www/Makefile +++ b/www/Makefile @@ -49,6 +49,7 @@ JSSRC= \ form/NamespaceMaxDepth.js \ form/CalendarEvent.js \ form/PermissionPathSelector.js \ + form/PartitionSelector.js \ form/GroupSelector.js \ form/GroupFilter.js \ form/VerifyOutdatedAfter.js \ diff --git a/www/form/PartitionSelector.js b/www/form/PartitionSelector.js new file mode 100644 index 00000000..162dbe41 --- /dev/null +++ b/www/form/PartitionSelector.js @@ -0,0 +1,81 @@ +Ext.define('pbs-partition-list', { + extend: 'Ext.data.Model', + fields: ['name', 'uuid', 'filesystem', 'devpath', 'size', 'model'], + proxy: { + type: 'proxmox', + url: "/api2/json/nodes/localhost/disks/list?skipsmart=1&include-partitions=1", + reader: { + transform: (rawData) => rawData.data + .flatMap(disk => (disk.partitions + .map(part => ({ ...part, model: disk.model })) ?? []) + .filter(partition => partition.used === 'filesystem')), + }, + }, + idProperty: 'devpath', + +}); + +Ext.define('PBS.form.PartitionSelector', { + extend: 'Proxmox.form.ComboGrid', + alias: 'widget.pbsPartitionSelector', + + allowBlank: false, + autoSelect: false, + submitEmpty: false, + valueField: 'uuid', + displayField: 'devpath', + + store: { + model: 'pbs-partition-list', + autoLoad: true, + sorters: 'devpath', + }, + getSubmitData: function() { + let me = this; + let data = null; + if (!me.disabled && me.submitValue && !me.isFileUpload()) { + let val = me.getSubmitValue(); + if (val !== undefined && val !== null && val !== '') { + data = {}; + data[me.getName()] = val; + } else if (me.getDeleteEmpty()) { + data = {}; + data.delete = me.getName(); + } + } + return data; + }, + listConfig: { + columns: [ + { + header: gettext('Path'), + sortable: true, + dataIndex: 'devpath', + renderer: (v, metaData, rec) => Ext.String.htmlEncode(v), + flex: 1, + }, + { + header: gettext('Filesystem'), + sortable: true, + dataIndex: 'filesystem', + flex: 1, + }, + { + header: gettext('Size'), + sortable: true, + dataIndex: 'size', + renderer: Proxmox.Utils.format_size, + flex: 1, + }, + { + header: gettext('Model'), + sortable: true, + dataIndex: 'model', + flex: 1, + }, + ], + viewConfig: { + emptyText: 'No usable partitions present', + }, + }, +}); -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:46 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:46 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 10/26] bin: manager: add (un)mount command In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-11-h.laimer@proxmox.com> We can't just directly delegate these commands to the API endpoints since both mounting and unmounting are done in a worker, and that one would be killed when the parent ends. In this case that would be the CLI process, which basically ends right after spwaning the worker. Signed-off-by: Hannes Laimer --- pbs-config/src/datastore.rs | 14 ++++ src/bin/proxmox_backup_manager/datastore.rs | 76 ++++++++++++++++++++- 2 files changed, 89 insertions(+), 1 deletion(-) diff --git a/pbs-config/src/datastore.rs b/pbs-config/src/datastore.rs index dc5bb3da..396dcb37 100644 --- a/pbs-config/src/datastore.rs +++ b/pbs-config/src/datastore.rs @@ -62,6 +62,20 @@ pub fn complete_datastore_name(_arg: &str, _param: &HashMap) -> } } +pub fn complete_removable_datastore_name( + _arg: &str, + _param: &HashMap, +) -> Vec { + match config() { + Ok((data, _digest)) => data + .sections + .into_iter() + .filter_map(|(name, (_, c))| c.get("backing-device").map(|_| name)) + .collect(), + Err(_) => Vec::new(), + } +} + pub fn complete_acl_path(_arg: &str, _param: &HashMap) -> Vec { let mut list = vec![ String::from("/"), diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index 383bcd24..f2795b39 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -1,4 +1,4 @@ -use anyhow::Error; +use anyhow::{format_err, Error}; use serde_json::Value; use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; @@ -40,6 +40,34 @@ fn list_datastores(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result Result<(), Error> { + param["node"] = "localhost".into(); + + let info = &api2::admin::datastore::API_METHOD_MOUNT; + let result = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + Ok(()) +} + #[api( input: { properties: { @@ -99,6 +127,34 @@ async fn create_datastore(mut param: Value) -> Result { Ok(Value::Null) } +#[api( + protected: true, + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, +)] +/// Unmount a removable datastore. +async fn unmount_datastore(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + param["node"] = "localhost".into(); + + let info = &api2::admin::datastore::API_METHOD_UNMOUNT; + let result = match info.handler { + ApiHandler::Async(handler) => (handler)(param, info, rpcenv).await?, + _ => unreachable!(), + }; + + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + Ok(()) +} + #[api( protected: true, input: { @@ -142,6 +198,15 @@ async fn delete_datastore(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> pub fn datastore_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) + .insert( + "mount", + CliCommand::new(&API_METHOD_MOUNT_DATASTORE) + .arg_param(&["store"]) + .completion_cb( + "store", + pbs_config::datastore::complete_removable_datastore_name, + ), + ) .insert( "show", CliCommand::new(&API_METHOD_SHOW_DATASTORE) @@ -152,6 +217,15 @@ pub fn datastore_commands() -> CommandLineInterface { "create", CliCommand::new(&API_METHOD_CREATE_DATASTORE).arg_param(&["name", "path"]), ) + .insert( + "unmount", + CliCommand::new(&API_METHOD_UNMOUNT_DATASTORE) + .arg_param(&["store"]) + .completion_cb( + "store", + pbs_config::datastore::complete_removable_datastore_name, + ), + ) .insert( "update", CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE) -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:57 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:57 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 21/26] ui: render 'unmount' maintenance mode correctly In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-22-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Utils.js | 4 +++- www/window/MaintenanceOptions.js | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/www/Utils.js b/www/Utils.js index 7756e9b5..6bae9b70 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -775,7 +775,7 @@ Ext.define('PBS.Utils', { let extra = ''; if (activeTasks !== undefined) { - const conflictingTasks = activeTasks.write + (type === 'offline' ? activeTasks.read : 0); + const conflictingTasks = activeTasks.write + (type === 'offline' || type === 'unmount' ? activeTasks.read : 0); if (conflictingTasks > 0) { extra += '| '; @@ -795,6 +795,8 @@ Ext.define('PBS.Utils', { break; case 'offline': modeText = gettext("Offline"); break; + case 'unmount': modeText = gettext("Unmounting"); + break; } return `${modeText} ${extra}`; }, diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js index 527c3698..d7348cb4 100644 --- a/www/window/MaintenanceOptions.js +++ b/www/window/MaintenanceOptions.js @@ -52,6 +52,7 @@ Ext.define('PBS.window.MaintenanceOptions', { items: [ { xtype: 'pbsMaintenanceType', + id: 'type-field', name: 'maintenance-type', fieldLabel: gettext('Maintenance Type'), value: '__default__', @@ -85,6 +86,15 @@ Ext.define('PBS.window.MaintenanceOptions', { }; } + let unmounting = options['maintenance-type'] === 'unmount'; + let defaultType = options['maintenance-type'] === '__default__'; + if (unmounting) { + options['maintenance-type'] = ''; + } + me.callParent([options]); + + Ext.ComponentManager.get('type-field').setDisabled(unmounting); + Ext.ComponentManager.get('message-field').setDisabled(unmounting || defaultType); }, }); -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:56 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:56 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 20/26] ui: maintenance: fix disable msg field if no type is selected In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-21-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/window/MaintenanceOptions.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js index 1ee92542..527c3698 100644 --- a/www/window/MaintenanceOptions.js +++ b/www/window/MaintenanceOptions.js @@ -56,12 +56,17 @@ Ext.define('PBS.window.MaintenanceOptions', { fieldLabel: gettext('Maintenance Type'), value: '__default__', deleteEmpty: true, + listeners: { + change: (field, newValue) => { + Ext.getCmp('message-field').setDisabled(newValue === '__default__'); + }, + }, }, { xtype: 'proxmoxtextfield', + id: 'message-field', name: 'maintenance-msg', fieldLabel: gettext('Description'), - // FIXME: disable if maintenance type is none }, ], }, -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:53 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:53 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 17/26] ui: tree: render unmounted datastores correctly In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-18-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/NavigationTree.js | 17 ++++++++++++++--- www/css/ext6-pbs.css | 8 ++++++++ www/datastore/DataStoreListSummary.js | 1 + 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/www/NavigationTree.js b/www/NavigationTree.js index 53c8daff..9c7c9208 100644 --- a/www/NavigationTree.js +++ b/www/NavigationTree.js @@ -267,13 +267,24 @@ Ext.define('PBS.view.main.NavigationTree', { j++; } - let [qtip, iconCls] = ['', 'fa fa-database']; + let mainIcon = `fa fa-${records[i].data.removable ? 'plug' : 'database'}`; + let [qtip, iconCls] = ['', mainIcon]; const maintenance = records[i].data.maintenance; + + // mount-status does only exist for removable datastores + const removable_not_mounted = records[i].data['mount-status'] === false; + if (removable_not_mounted) { + iconCls = `${mainIcon} pmx-tree-icon-custom unplugged`; + qtip = gettext('Removable datastore not mounted'); + } if (maintenance) { const [type, message] = PBS.Utils.parseMaintenanceMode(maintenance); qtip = `${type}${message ? ': ' + message : ''}`; - let maintenanceTypeCls = type === 'delete' ? 'destroying' : 'maintenance'; - iconCls = `fa fa-database pmx-tree-icon-custom ${maintenanceTypeCls}`; + let maintenanceTypeCls = 'maintenance'; + if (type === 'delete') { + maintenanceTypeCls = 'destroying'; + } + iconCls = `${mainIcon} pmx-tree-icon-custom ${maintenanceTypeCls}`; } if (getChildTextAt(j).localeCompare(name) !== 0) { diff --git a/www/css/ext6-pbs.css b/www/css/ext6-pbs.css index c33ce684..706e681e 100644 --- a/www/css/ext6-pbs.css +++ b/www/css/ext6-pbs.css @@ -271,6 +271,10 @@ span.snapshot-comment-column { content: "\ "; } +.x-treelist-item-icon.fa-plug, .pmx-tree-icon-custom.fa-plug { + font-size: 12px; +} + /* datastore maintenance */ .pmx-tree-icon-custom.maintenance:after { content: "\f0ad"; @@ -290,6 +294,10 @@ span.snapshot-comment-column { color: #888; } +.pmx-tree-icon-custom.unplugged:before { + color: #888; +} + /*' PBS specific icons */ .pbs-icon-tape { diff --git a/www/datastore/DataStoreListSummary.js b/www/datastore/DataStoreListSummary.js index b908034d..f7ea83e7 100644 --- a/www/datastore/DataStoreListSummary.js +++ b/www/datastore/DataStoreListSummary.js @@ -22,6 +22,7 @@ Ext.define('PBS.datastore.DataStoreListSummary', { stillbad: 0, deduplication: 1.0, error: "", + removable: false, maintenance: '', }, }, -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:01:00 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:01:00 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 24/26] node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-25-h.laimer@proxmox.com> ... since they do have the same value. Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 21d2bcc4..139e753d 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -11,8 +11,8 @@ use proxmox_schema::api; use proxmox_section_config::SectionConfigData; use pbs_api_types::{ - DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, - PRIV_SYS_MODIFY, UPID_SCHEMA, + DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_MOUNT_DIR, DATASTORE_SCHEMA, NODE_SCHEMA, + PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA, }; use crate::tools::disks::{ @@ -23,8 +23,6 @@ use crate::tools::systemd::{self, types::*}; use proxmox_rest_server::WorkerTask; -const BASE_MOUNT_DIR: &str = "/mnt/datastore/"; - #[api( properties: { "filesystem": { @@ -91,7 +89,7 @@ pub fn list_datastore_mounts() -> Result, Error> { let name = data .Where - .strip_prefix(BASE_MOUNT_DIR) + .strip_prefix(DATASTORE_MOUNT_DIR) .unwrap_or(&data.Where) .to_string(); @@ -232,7 +230,7 @@ pub fn create_datastore_disk( return Ok(upid_str); }; - let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); + let mount_point = format!("{}/{}", DATASTORE_MOUNT_DIR, &name); // check if the default path exists already. // bail if it is not empty or another filesystem mounted on top let default_path = std::path::PathBuf::from(&mount_point); @@ -240,7 +238,7 @@ pub fn create_datastore_disk( match std::fs::metadata(&default_path) { Err(_) => {} // path does not exist Ok(stat) => { - let basedir_dev = std::fs::metadata(BASE_MOUNT_DIR)?.st_dev(); + let basedir_dev = std::fs::metadata(DATASTORE_MOUNT_DIR)?.st_dev(); if stat.st_dev() != basedir_dev { bail!("path {default_path:?} already exists and is mountpoint"); } @@ -319,7 +317,7 @@ pub fn create_datastore_disk( )] /// Remove a Filesystem mounted under `/mnt/datastore/`. pub fn delete_datastore_disk(name: String) -> Result<(), Error> { - let path = format!("{}{}", BASE_MOUNT_DIR, name); + let path = format!("{}/{}", DATASTORE_MOUNT_DIR, name); // path of datastore cannot be changed let (config, _) = pbs_config::datastore::config()?; let datastores: Vec = config.convert_to_typed_array("datastore")?; -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:01:02 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:01:02 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 26/26] bin: debug: add inspect device command In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-27-h.laimer@proxmox.com> ... to get information about (removable) datastores a device contains Signed-off-by: Hannes Laimer --- new since v12. Something like this would also make sense to somehow integrate into the UI, making it easier to select and add existsing datastores on the device. But since the device has to be mounted, I was not sure where it makes sense, except maybe after selecting a device, so the `path` field could be some sort of selection. But we'd need a new endpoint and this can definitely also be added later, so I did not include anything in this series. src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++++++++++++++++ 1 file changed, 149 insertions(+) diff --git a/src/bin/proxmox_backup_debug/inspect.rs b/src/bin/proxmox_backup_debug/inspect.rs index 28a472b0..17df09be 100644 --- a/src/bin/proxmox_backup_debug/inspect.rs +++ b/src/bin/proxmox_backup_debug/inspect.rs @@ -331,6 +331,151 @@ fn inspect_file( Ok(()) } +/// Return the count of VM, CT and host backup groups and the count of namespaces +/// as this tuple (vm, ct, host, ns) +fn get_basic_ds_info(path: String) -> Result<(i64, i64, i64, i64), Error> { + let mut vms = 0; + let mut cts = 0; + let mut hosts = 0; + let mut ns = 0; + let mut walker = WalkDir::new(path).into_iter(); + + while let Some(entry_result) = walker.next() { + let entry = entry_result?; + if !entry.file_type().is_dir() { + continue; + } + + let Some(name) = entry.path().file_name().and_then(|a| a.to_str()) else { + continue; + }; + + if name == ".chunks" { + walker.skip_current_dir(); + continue; + } + + let dir_count = std::fs::read_dir(entry.path())? + .filter_map(Result::ok) + .filter(|entry| entry.path().is_dir()) + .count() as i64; + + match name { + "ns" => ns += dir_count, + "vm" => { + vms += dir_count; + walker.skip_current_dir(); + } + "ct" => { + cts += dir_count; + walker.skip_current_dir(); + } + "host" => { + hosts += dir_count; + walker.skip_current_dir(); + } + _ => { + // root or ns dir + } + } + } + + Ok((vms, cts, hosts, ns)) +} + +#[api( + input: { + properties: { + device: { + description: "Device path, usually /dev/...", + type: String, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Inspect a device for possible datastores on it +fn inspect_device(device: String, param: Value) -> Result<(), Error> { + let output_format = get_output_format(¶m); + let tmp_mount_path = format!( + "{}/{:x}", + pbs_buildcfg::rundir!("/mount"), + proxmox_uuid::Uuid::generate() + ); + + let default_options = proxmox_sys::fs::CreateOptions::new(); + proxmox_sys::fs::create_path( + &tmp_mount_path, + Some(default_options.clone()), + Some(default_options.clone()), + )?; + let mut mount_cmd = std::process::Command::new("mount"); + mount_cmd.arg(device.clone()); + mount_cmd.arg(tmp_mount_path.clone()); + proxmox_sys::command::run_command(mount_cmd, None)?; + + let mut walker = WalkDir::new(tmp_mount_path.clone()).into_iter(); + + let mut stores = Vec::new(); + + let mut ds_count = 0; + while let Some(entry_result) = walker.next() { + let entry = entry_result?; + + if entry.file_type().is_dir() + && entry + .file_name() + .to_str() + .map_or(false, |name| name == ".chunks") + { + let store_path = entry + .path() + .to_str() + .and_then(|n| n.strip_suffix("/.chunks")); + + if let Some(store_path) = store_path { + ds_count += 1; + let (vm, ct, host, ns) = get_basic_ds_info(store_path.to_string())?; + stores.push(json!({ + "path": store_path.strip_prefix(&tmp_mount_path).unwrap_or("???"), + "vm-count": vm, + "ct-count": ct, + "host-count": host, + "ns-count": ns, + })); + }; + + walker.skip_current_dir(); + } + } + + let mut umount_cmd = std::process::Command::new("umount"); + umount_cmd.arg(tmp_mount_path.clone()); + proxmox_sys::command::run_command(umount_cmd, None)?; + std::fs::remove_dir(std::path::Path::new(&tmp_mount_path))?; + + if output_format == "text" { + println!("Device containes {} stores", ds_count); + println!("---------------"); + for s in stores { + println!( + "Datastore at {} | VM: {}, CT: {}, HOST: {}, NS: {}", + s["path"], s["vm-count"], s["ct-count"], s["host-count"], s["ns-count"] + ); + } + } else { + format_and_print_result( + &json!({"store_count": stores.len(), "stores": stores}), + &output_format, + ); + } + + Ok(()) +} + pub fn inspect_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert( @@ -340,6 +485,10 @@ pub fn inspect_commands() -> CommandLineInterface { .insert( "file", CliCommand::new(&API_METHOD_INSPECT_FILE).arg_param(&["file"]), + ) + .insert( + "device", + CliCommand::new(&API_METHOD_INSPECT_DEVICE).arg_param(&["device"]), ); cmd_def.into() -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:37 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:37 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 01/26] tools: add disks utility functions In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-2-h.laimer@proxmox.com> ... for mounting and unmounting Signed-off-by: Hannes Laimer --- changes since v12: * use &Path everywhere, instead of &str src/tools/disks/mod.rs | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs index 8c479e94..10c4eed0 100644 --- a/src/tools/disks/mod.rs +++ b/src/tools/disks/mod.rs @@ -1338,3 +1338,33 @@ pub fn get_fs_uuid(disk: &Disk) -> Result { bail!("get_fs_uuid failed - missing UUID"); } + +/// Mount a disk by its UUID and the mount point. +pub fn mount_by_uuid(uuid: &str, mount_point: &Path) -> Result<(), Error> { + let mut command = std::process::Command::new("mount"); + command.arg(&format!("UUID={uuid}")); + command.arg(mount_point); + + proxmox_sys::command::run_command(command, None)?; + Ok(()) +} + +/// Create bind mount. +pub fn bind_mount(path: &Path, target: &Path) -> Result<(), Error> { + let mut command = std::process::Command::new("mount"); + command.arg("--bind"); + command.arg(path); + command.arg(target); + + proxmox_sys::command::run_command(command, None)?; + Ok(()) +} + +/// Unmount a disk by its mount point. +pub fn unmount_by_mountpoint(path: &Path) -> Result<(), Error> { + let mut command = std::process::Command::new("umount"); + command.arg(path); + + proxmox_sys::command::run_command(command, None)?; + Ok(()) +} -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:39 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:39 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 03/26] pbs-api-types: add backing-device to DataStoreConfig In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-4-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- changes since v12: * clearify/improve description of `DATASTORE_DIR_NAME_SCHAME` pbs-api-types/src/datastore.rs | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index a5704c93..f6c255d3 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -42,7 +42,7 @@ const_regex! { pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); -pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") +pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or a relative on-device path for removable datastores.") .min_length(1) .max_length(4096) .schema(); @@ -160,6 +160,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .minimum(1) .schema(); +/// Base directory where datastores are mounted +pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; + #[api] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -234,7 +237,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore schema: DATASTORE_SCHEMA, }, path: { - schema: DIR_NAME_SCHEMA, + schema: DATASTORE_DIR_NAME_SCHEMA, }, "notify-user": { optional: true, @@ -273,6 +276,12 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), type: String, }, + "backing-device": { + description: "The UUID of the filesystem partition for removable datastores.", + optional: true, + format: &proxmox_schema::api_types::UUID_FORMAT, + type: String, + } } )] #[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] @@ -320,6 +329,11 @@ pub struct DataStoreConfig { /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " #[serde(skip_serializing_if = "Option::is_none")] pub maintenance_mode: Option, + + /// The UUID of the device(for removable datastores) + #[updater(skip)] + #[serde(skip_serializing_if = "Option::is_none")] + pub backing_device: Option, } #[api] @@ -354,12 +368,23 @@ impl DataStoreConfig { notification_mode: None, tuning: None, maintenance_mode: None, + backing_device: None, } } /// Returns the absolute path to the datastore content. pub fn absolute_path(&self) -> String { - self.path.clone() + if self.backing_device.is_some() { + format!("{DATASTORE_MOUNT_DIR}/{}", self.name) + } else { + self.path.clone() + } + } + + pub fn get_mount_point(&self) -> Option { + self.backing_device + .is_some() + .then(|| format!("{DATASTORE_MOUNT_DIR}/{}", self.name)) } pub fn get_maintenance_mode(&self) -> Option { -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:41 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:41 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 05/26] disks: add UUID to partition info In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-6-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/tools/disks/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs index 10c4eed0..9f47be36 100644 --- a/src/tools/disks/mod.rs +++ b/src/tools/disks/mod.rs @@ -57,6 +57,8 @@ pub struct LsblkInfo { /// File system label. #[serde(rename = "fstype")] file_system_type: Option, + /// File system UUID. + uuid: Option, } impl DiskManage { @@ -615,7 +617,7 @@ pub struct BlockDevStat { /// Use lsblk to read partition type uuids and file system types. pub fn get_lsblk_info() -> Result, Error> { let mut command = std::process::Command::new("lsblk"); - command.args(["--json", "-o", "path,parttype,fstype"]); + command.args(["--json", "-o", "path,parttype,fstype,uuid"]); let output = proxmox_sys::command::run_command(command, None)?; @@ -701,6 +703,8 @@ pub struct PartitionInfo { pub size: Option, /// GPT partition pub gpt: bool, + /// UUID + pub uuid: Option, } #[api( @@ -891,8 +895,10 @@ fn get_partitions_info( let mounted = disk.is_mounted().unwrap_or(false); let mut filesystem = None; + let mut uuid = None; if let Some(devpath) = devpath.as_ref() { for info in lsblk_infos.iter().filter(|i| i.path.eq(devpath)) { + uuid = info.uuid.clone(); used = match info.partition_type.as_deref() { Some("21686148-6449-6e6f-744e-656564454649") => PartitionUsageType::BIOS, Some("c12a7328-f81f-11d2-ba4b-00a0c93ec93b") => PartitionUsageType::EFI, @@ -915,6 +921,7 @@ fn get_partitions_info( filesystem, size: disk.size().ok(), gpt: disk.has_gpt(), + uuid, } }) .collect() -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:36 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:36 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 00/26] add removable datastores Message-ID: <20241113150102.164820-1-h.laimer@proxmox.com> These patches add support for removable datastores. All removable datastores have a backing-device(a UUID) associated with them. Removable datastores work like normal ones, just that they can be unplugged. It is possible to create a removable datastore, sync backups onto it, unplug it and use it on a different PBS. The datastore path specified is relative to the root of the used device. Removable datastores are bind mounted to /mnt/datastore/. Multiple datastores can be created on a single device, but only device with a single datastore on them will be auto-mounted. When a removable datastore is deleted and 'destroy-data' is set, the device has to be mounted. If 'destroy-data' is not set the datastore can be deleted even if the device is not present. Removable datastores are automatically mounted when plugged in. v13: thanks @Fabian * allow multiple datastore on devices * replace `is_datastore_available` by a more specific function, it is now removable datastore specific and won't be called for normal ones * replace removable/is_available in status structs with mount_state, which is `None` for normal datastore as it makes it less ambiguous what is meant * remove notion of 'available' from normal datastores and replace it with mounted/mount_status for removable ones, as it never really made sense in the first place * abort of an unmount task will now reset the maintanance mode * add check for race when setting maintenance at end of unmounting task * improve documentation and commit messages * remove not needed tokio::spawn * only auto mount devices with single datastore on them * drop ptach that added flag for excluding used partitions * make auto mount service not dynamic * add debug command to scan devices for datastores they may contain * rebase onto master v12: thanks @Wolfgang * use bind mounts, so now /path/to/ds is mounted to /mnt/datastore/ this is a bit cleaner and allows for multiple datastores on a single device to be mounted individually, if we want to allow that in the future * small code improvements v11: * rebase onto master v10: thanks @Gabriel and @Wolfgang * make is_datastore_available more robust * fix a lot of wording * drop format on uuid_mount command for UUID * only gather_disk_stats if datastore is available * overall code improvements * ui: include model in partition selector * rebased onto master v9: * change mount point to `/mnt/datastore/` * update "Directory" list UI * add `absolute_path()` from Dietmar's RFC * update docs v8: * still depends on [1] * paths for removable datastores are now relative to `/mnt/removable_datastore/` * add support for creation of removable datastore through the "create directory" endpoint (last 3 patches) * update datastore creation UI * update docs v7: * depends on [1] * improve logging when waiting for tasks * drop `update-datatore-cache` refactoring * fix some commit messages [1] https://lists.proxmox.com/pipermail/pbs-devel/2024-April/008739.html v6: * remove 'drop' flag in datastore cache * use maintenance-mode 'unmount' for unmounting process, only for the unmounting not for being unmounted * rename/simplify update-datastore-cache command * ui: integrate new unmounting maintenance mode * basically a mix of v3 and v4 v5: thanks @Dietmar and @Christian * drop --force for unmount since it'll always fail if tasks are still running, and if there are not normal unount will work * improve several commit messages * improve error message wording * add removable datastore section to docs * add documentation for is_datastore_available v4: thanks a lot @Dietmar and @Christian * make check if mounted wayyy faster * don't keep track of mounting state * drop Unplugged maintenance mode * use UUID_FORMAT for uuid field * a lot of small things, like use of bail!, inline format!, ... * include improvement to cache handling v3: * remove lazy unmounting (since 9cba51ac782d04085c0af55128f32178e5132358 is applied) * fix CLI (un)mount command, thanks @Gabriel * add removable datastore CLI autocomplete helper * rebase onto master * move ui patches to the end thanks @Lukas and @Thomas for the feedback v2: * fix datastore 'add' button in the UI * some format!("{}", a) -> format!("{a}") * replace `const` with `let` in js code * change icon `fa-usb` -> `fa-plug` * add some docs * add JDoc for parseMaintenanceMode * proxmox-schema dep bump Dietmar Maurer (2): config: factor out method to get the absolute datastore path maintenance: add 'Unmount' maintenance type Hannes Laimer (24): tools: add disks utility functions pbs-api-types: add backing-device to DataStoreConfig disks: add UUID to partition info datastore: add helper for checking if a datastore is mounted api: admin: add (un)mount endpoint for removable datastores api: removable datastore creation pbs-api-types: add mount_status field to DataStoreListItem bin: manager: add (un)mount command add auto-mounting for removable datastores datastore: handle deletion of removable datastore properly docs: add removable datastores section ui: add partition selector form ui: add removable datastore creation support ui: add (un)mount button to summary ui: tree: render unmounted datastores correctly ui: utils: make parseMaintenanceMode more robust ui: add datastore status mask for unmounted removable datastores ui: maintenance: fix disable msg field if no type is selected ui: render 'unmount' maintenance mode correctly api: node: allow creation of removable datastore through directory endpoint api: node: include removable datastores in directory list node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR ui: support create removable datastore through directory creation bin: debug: add inspect device command debian/proxmox-backup-server.install | 1 + debian/proxmox-backup-server.udev | 3 + docs/storage.rst | 38 +++ etc/Makefile | 3 +- etc/removable-device-attach at .service | 8 + pbs-api-types/src/datastore.rs | 46 +++- pbs-api-types/src/maintenance.rs | 7 +- pbs-config/src/datastore.rs | 14 + pbs-datastore/src/datastore.rs | 88 +++++- pbs-datastore/src/lib.rs | 2 +- src/api2/admin/datastore.rs | 289 ++++++++++++++++++-- src/api2/config/datastore.rs | 87 +++++- src/api2/node/disks/directory.rs | 104 ++++++- src/api2/status/mod.rs | 29 +- src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++ src/bin/proxmox_backup_manager/datastore.rs | 136 ++++++++- src/server/metric_collection/mod.rs | 18 +- src/tools/disks/mod.rs | 39 ++- www/DirectoryList.js | 13 + www/Makefile | 1 + www/NavigationTree.js | 17 +- www/Utils.js | 33 ++- www/css/ext6-pbs.css | 20 ++ www/datastore/DataStoreListSummary.js | 1 + www/datastore/Summary.js | 113 +++++++- www/form/PartitionSelector.js | 81 ++++++ www/window/CreateDirectory.js | 14 + www/window/DataStoreEdit.js | 37 +++ www/window/MaintenanceOptions.js | 17 +- 29 files changed, 1328 insertions(+), 80 deletions(-) create mode 100644 etc/removable-device-attach at .service create mode 100644 www/form/PartitionSelector.js -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:42 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:42 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 06/26] datastore: add helper for checking if a datastore is mounted In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-7-h.laimer@proxmox.com> ... at a specific location. This is removable datastore specific so it takes both a uuid and mount location. Co-authored-by: Wolfgang Bumiller Signed-off-by: Hannes Laimer --- changes since v12: * clearify documentation * make function more removable datastore specific to remove ambiguity about what it does and what it is meant for * only use for removable datastore pbs-api-types/src/maintenance.rs | 2 + pbs-datastore/src/datastore.rs | 73 +++++++++++++++++++++++++++++ pbs-datastore/src/lib.rs | 2 +- src/server/metric_collection/mod.rs | 10 ++++ 4 files changed, 86 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index fd4d3416..9f51292e 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -82,6 +82,8 @@ impl MaintenanceMode { /// task finishes, so all open files are closed. pub fn is_offline(&self) -> bool { self.ty == MaintenanceType::Offline + || self.ty == MaintenanceType::Unmount + || self.ty == MaintenanceType::Delete } pub fn check(&self, operation: Option) -> Result<(), Error> { diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index fb37bd5a..cadf9245 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; use std::io::{self, Write}; +use std::os::unix::ffi::OsStrExt; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; use std::sync::{Arc, LazyLock, Mutex}; @@ -14,6 +15,7 @@ use proxmox_schema::ApiType; use proxmox_sys::error::SysError; use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions}; use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard}; +use proxmox_sys::linux::procfs::MountInfo; use proxmox_sys::process_locker::ProcessLockSharedGuard; use proxmox_worker_task::WorkerTaskContext; @@ -46,6 +48,55 @@ pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> Ok(()) } +/// Check if a device with a given UUID is currently mounted at store_mount_point by +/// comparing the `st_rdev` values of `/dev/disk/by-uuid/` and the source device in +/// /proc/self/mountinfo. +/// +/// If we can't check if it is mounted, we treat that as not mounted, +/// returning false. +/// +/// Reasons it could fail other than not being mounted where expected: +/// - could not read /proc/self/mountinfo +/// - could not stat /dev/disk/by-uuid/ +/// - /dev/disk/by-uuid/ is not a block device +/// +/// Since these are very much out of our control, there is no real value in distinguishing +/// between them, so for this function they all are treated as 'device not mounted' +pub fn is_datastore_mounted_at(store_mount_point: String, device_uuid: String) -> bool { + use nix::sys::stat::SFlag; + + let store_mount_point = Path::new(&store_mount_point); + + let dev_node = match nix::sys::stat::stat(format!("/dev/disk/by-uuid/{device_uuid}").as_str()) { + Ok(stat) if SFlag::from_bits_truncate(stat.st_mode) == SFlag::S_IFBLK => stat.st_rdev, + _ => return false, + }; + + let Ok(mount_info) = MountInfo::read() else { + return false; + }; + + for (_, entry) in mount_info { + let Some(source) = entry.mount_source else { + continue; + }; + + if entry.mount_point != store_mount_point || !source.as_bytes().starts_with(b"/") { + continue; + } + + if let Ok(stat) = nix::sys::stat::stat(source.as_os_str()) { + let sflag = SFlag::from_bits_truncate(stat.st_mode); + + if sflag == SFlag::S_IFBLK && stat.st_rdev == dev_node { + return true; + } + } + } + + false +} + /// Datastore Management /// /// A Datastore can store severals backups, and provides the @@ -154,6 +205,18 @@ impl DataStore { bail!("datastore '{name}' is in {error}"); } } + let mount_status = config + .get_mount_point() + .zip(config.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + + if mount_status == Some(false) { + let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); + datastore_cache.remove(&config.name); + bail!("Removable Datastore is not mounted"); + } let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); let entry = datastore_cache.get(name); @@ -258,6 +321,16 @@ impl DataStore { ) -> Result, Error> { let name = config.name.clone(); + let mount_status = config + .get_mount_point() + .zip(config.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + if mount_status == Some(false) { + bail!("Datastore is not available") + } + let tuning: DatastoreTuning = serde_json::from_value( DatastoreTuning::API_SCHEMA .parse_property_string(config.tuning.as_deref().unwrap_or(""))?, diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 202b0955..34113261 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -204,7 +204,7 @@ pub use manifest::BackupManifest; pub use store_progress::StoreProgress; mod datastore; -pub use datastore::{check_backup_owner, DataStore}; +pub use datastore::{check_backup_owner, is_datastore_mounted_at, DataStore}; mod hierarchy; pub use hierarchy::{ diff --git a/src/server/metric_collection/mod.rs b/src/server/metric_collection/mod.rs index b95dba20..edba512c 100644 --- a/src/server/metric_collection/mod.rs +++ b/src/server/metric_collection/mod.rs @@ -176,6 +176,16 @@ fn collect_disk_stats_sync() -> (DiskStat, Vec) { continue; } + let mount_status = config + .get_mount_point() + .zip(config.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + pbs_datastore::is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + if mount_status == Some(false) { + continue; + } + datastores.push(gather_disk_stats( disk_manager.clone(), Path::new(&config.absolute_path()), -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:49 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:49 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 13/26] docs: add removable datastores section In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-14-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- docs/storage.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/docs/storage.rst b/docs/storage.rst index f1e15d52..d9871e79 100644 --- a/docs/storage.rst +++ b/docs/storage.rst @@ -165,6 +165,44 @@ following command creates a new datastore called ``store1`` on # proxmox-backup-manager datastore create store1 /backup/disk1/store1 +Removable Datastores +^^^^^^^^^^^^^^^^^^^^ +Removable datastores have a ``backing-device`` associated with them, they can be +mounted and unmounted. Other than that they behave the same way a normal datastore +would. + +They can be created on already correctly formatted partitions, which, as with normal +datastores, should be either ``ext4`` or ``xfs``. It is also possible to create them +on completely unused disks through "Administartion" > "Disks / Storage" > "Directory", +using this method the disk will be partitioned and formatted automatically for the datastore. + +Devices with only one datastore on them will be mounted automatically. It is possible to create a +removable datastore on one PBS and use it on multiple instances, the device just has to be added +on each instance as a removable datastore by checking "reuse datastore" on creation. +If the device already contains a datastore at the specified path it'll just be added as +a new datastore to the PBS instance and will be mounted whenever plugged in. Unmounting has +to be done through the UI by clicking "Unmount" on the summary page or using the CLI. + +A single device can house multiple datastores, they only limitation is that they are not +allowed to be nested. + +.. code-block:: console + + # proxmox-backup-manager datastore unmount store1 + +both will wait for any running tasks to finish and unmount the device. + +All removable datastores are mounted under /mnt/datastore/, and the specified path +refers to the path on the device. + +All datastores present on a device can be listed using ``proxmox-backup-debug``. + +.. code-block:: console + + # proxmox-backup-debug inspect device /dev/... + + + Managing Datastores ^^^^^^^^^^^^^^^^^^^ -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:48 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:48 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 12/26] datastore: handle deletion of removable datastore properly In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-13-h.laimer@proxmox.com> Data deletion is only possible if the datastore is mounted, won't attempt mounting it for the purpose of deleting data is made. Signed-off-by: Hannes Laimer --- pbs-datastore/src/datastore.rs | 4 +++- src/api2/config/datastore.rs | 37 +++++++++++++++++++++++++++++++++- 2 files changed, 39 insertions(+), 2 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index cadf9245..83e4dcb0 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1525,7 +1525,9 @@ impl DataStore { // weird, but ok } Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => { - warn!("Cannot delete datastore directory (is it a mount point?).") + if datastore_config.backing_device.is_none() { + warn!("Cannot delete datastore directory (is it a mount point?).") + } } Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => { warn!("Datastore directory not empty, not deleting.") diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 9140a7a4..60bff9e2 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -1,4 +1,4 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use ::serde::{Deserialize, Serialize}; use anyhow::{bail, Error}; @@ -29,6 +29,7 @@ use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_bac use crate::api2::config::verify::delete_verification_job; use pbs_config::CachedUserInfo; +use pbs_datastore::is_datastore_mounted_at; use proxmox_rest_server::WorkerTask; use crate::server::jobstate; @@ -557,6 +558,21 @@ pub async fn delete_datastore( http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name); } + let store_config: DataStoreConfig = config.lookup("datastore", &name)?; + let mount_status = store_config + .get_mount_point() + .zip(store_config.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + + if destroy_data && mount_status == Some(false) { + http_bail!( + BAD_REQUEST, + "cannot destroy data on '{name}' unless the datastore is mounted" + ); + } + if !keep_job_configs { for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { delete_verification_job(job.config.id, None, rpcenv)? @@ -583,6 +599,19 @@ pub async fn delete_datastore( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + let name_copy = name.clone(); + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) + { + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( + sock, + &format!( + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", + name_copy + ), + ) + .await; + }; let upid = WorkerTask::new_thread( "delete-datastore", @@ -595,6 +624,12 @@ pub async fn delete_datastore( // ignore errors let _ = jobstate::remove_state_file("prune", &name); let _ = jobstate::remove_state_file("garbage_collection", &name); + if destroy_data { + if let Some(mount_point) = store_config.get_mount_point() { + let _ = unmount_by_mountpoint(Path::new(&mount_point)); + let _ = std::fs::remove_dir(&mount_point); + } + } if let Err(err) = proxmox_async::runtime::block_on(crate::server::notify_datastore_removed()) -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:45 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:45 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 09/26] pbs-api-types: add mount_status field to DataStoreListItem In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-10-h.laimer@proxmox.com> Only removable datastores have a mount status, so normal ones will have `None`, and for removable ones it is either mounted (`Some(true)`) or not mounted (`Some(false)`). Signed-off-by: Hannes Laimer --- changes since v12: * replace is_availabl+removable field combo, with single mount_status field pbs-api-types/src/datastore.rs | 9 ++++++++- src/api2/admin/datastore.rs | 22 ++++++++++++++-------- src/api2/status/mod.rs | 29 +++++++++++++++++++++++++---- 3 files changed, 47 insertions(+), 13 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 888f5d5b..e111d692 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -454,6 +454,9 @@ impl DataStoreConfig { pub struct DataStoreListItem { pub store: String, pub comment: Option, + /// Is datastore mounted, None for not-removable datastores + #[serde(skip_serializing_if = "Option::is_none")] + pub mount_status: Option, /// If the datastore is in maintenance mode, information about it #[serde(skip_serializing_if = "Option::is_none")] pub maintenance: Option, @@ -1453,6 +1456,9 @@ pub struct DataStoreStatusListItem { /// The available bytes of the underlying storage. (-1 on error) #[serde(skip_serializing_if = "Option::is_none")] pub avail: Option, + /// The datastore is mounted, None for not-removable datastores + #[serde(skip_serializing_if = "Option::is_none")] + pub mount_status: Option, /// A list of usages of the past (last Month). #[serde(skip_serializing_if = "Option::is_none")] pub history: Option>>, @@ -1477,12 +1483,13 @@ pub struct DataStoreStatusListItem { } impl DataStoreStatusListItem { - pub fn empty(store: &str, err: Option) -> Self { + pub fn empty(store: &str, err: Option, mount_status: Option) -> Self { DataStoreStatusListItem { store: store.to_owned(), total: None, used: None, avail: None, + mount_status, history: None, history_start: None, history_delta: None, diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index a12262e7..a9d9040f 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -1310,8 +1310,8 @@ pub fn get_datastore_list( let mut list = Vec::new(); - for (store, (_, data)) in &config.sections { - let acl_path = &["datastore", store]; + for (store, (_, data)) in config.sections { + let acl_path = &["datastore", &store]; let user_privs = user_info.lookup_privs(&auth_id, acl_path); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; @@ -1322,15 +1322,21 @@ pub fn get_datastore_list( } } + let store_config: DataStoreConfig = serde_json::from_value(data)?; + + let mount_status = store_config + .get_mount_point() + .zip(store_config.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + if allowed || allow_id { list.push(DataStoreListItem { store: store.clone(), - comment: if !allowed { - None - } else { - data["comment"].as_str().map(String::from) - }, - maintenance: data["maintenance-mode"].as_str().map(String::from), + comment: store_config.comment.filter(|_| allowed), + mount_status, + maintenance: store_config.maintenance_mode, }); } } diff --git a/src/api2/status/mod.rs b/src/api2/status/mod.rs index 113aa985..508331fe 100644 --- a/src/api2/status/mod.rs +++ b/src/api2/status/mod.rs @@ -10,11 +10,12 @@ use proxmox_schema::api; use proxmox_sortable_macro::sortable; use pbs_api_types::{ - Authid, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + Authid, DataStoreConfig, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, + PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; -use pbs_datastore::DataStore; +use pbs_datastore::{is_datastore_mounted_at, DataStore}; use crate::server::metric_collection::rrd::extract_rrd_data; use crate::tools::statistics::linear_regression; @@ -51,10 +52,25 @@ pub async fn datastore_status( for (store, (_, _)) in &config.sections { let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; + + let store_config = config.lookup::("datastore", store)?; + + let mount_status = store_config + .get_mount_point() + .zip(store_config.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + + if let Some(false) = mount_status { + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); + continue; + } + if !allowed { if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) { if can_access_any_namespace(datastore, &auth_id, &user_info) { - list.push(DataStoreStatusListItem::empty(store, None)); + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); } } continue; @@ -63,7 +79,11 @@ pub async fn datastore_status( let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) { Ok(datastore) => datastore, Err(err) => { - list.push(DataStoreStatusListItem::empty(store, Some(err.to_string()))); + list.push(DataStoreStatusListItem::empty( + store, + Some(err.to_string()), + mount_status, + )); continue; } }; @@ -74,6 +94,7 @@ pub async fn datastore_status( total: Some(status.total), used: Some(status.used), avail: Some(status.available), + mount_status, history: None, history_start: None, history_delta: None, -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:54 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:54 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 18/26] ui: utils: make parseMaintenanceMode more robust In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-19-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Utils.js | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/www/Utils.js b/www/Utils.js index 4853be36..7756e9b5 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -740,14 +740,29 @@ Ext.define('PBS.Utils', { return `${icon} ${value}`; }, - // FIXME: this "parser" is brittle and relies on the order the arguments will appear in + /** + * Parses maintenance mode property string. + * Examples: + * "offline,message=foo" -> ["offline", "foo"] + * "offline" -> ["offline", null] + * "message=foo,offline" -> ["offline", "foo"] + * null/undefined -> [null, null] + * + * @param {string|null} mode - Maintenance mode string to parse. + * @return {Array} - Parsed maintenance mode values. + */ parseMaintenanceMode: function(mode) { - let [type, message] = mode.split(/,(.+)/); - type = type.split("=").pop(); - message = message ? message.split("=")[1] - .replace(/^"(.*)"$/, '$1') - .replaceAll('\\"', '"') : null; - return [type, message]; + if (!mode) { + return [null, null]; + } + return mode.split(',').reduce(([m, msg], pair) => { + const [key, value] = pair.split('='); + if (key === 'message') { + return [m, value.replace(/^"(.*)"$/, '$1').replace(/\\"/g, '"')]; + } else { + return [value ?? key, msg]; + } + }, [null, null]); }, renderMaintenance: function(mode, activeTasks) { -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:55 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:55 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 19/26] ui: add datastore status mask for unmounted removable datastores In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-20-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/css/ext6-pbs.css | 12 ++++++++++++ www/datastore/Summary.js | 21 +++++++++++++-------- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/www/css/ext6-pbs.css b/www/css/ext6-pbs.css index 706e681e..891189ae 100644 --- a/www/css/ext6-pbs.css +++ b/www/css/ext6-pbs.css @@ -261,6 +261,18 @@ span.snapshot-comment-column { content: "\f0ad"; } +.pbs-unplugged-mask div.x-mask-msg-text { + background: None; + padding: 12px 0 0; +} + +.pbs-unplugged-mask:before { + font-size: 3em; + display: flex; + justify-content: center; + content: "\f1e6"; +} + /* the small icons TODO move to proxmox-widget-toolkit */ .pmx-tree-icon-custom:after { position: relative; diff --git a/www/datastore/Summary.js b/www/datastore/Summary.js index 49aa3b3c..33b4e18a 100644 --- a/www/datastore/Summary.js +++ b/www/datastore/Summary.js @@ -61,16 +61,21 @@ Ext.define('PBS.DataStoreInfo', { Proxmox.Utils.API2Request({ url: `/config/datastore/${me.view.datastore}`, success: function(response) { - const config = response.result.data; - if (config['maintenance-mode']) { - const [_type, msg] = PBS.Utils.parseMaintenanceMode(config['maintenance-mode']); - me.view.el.mask( - `${gettext('Datastore is in maintenance mode')}${msg ? ': ' + msg : ''}`, - 'fa pbs-maintenance-mask', - ); - } else { + let maintenanceString = response.result.data['maintenance-mode']; + let removable = !!response.result.data['backing-device']; + if (!maintenanceString && !removable) { me.view.el.mask(gettext('Datastore is not available')); + return; } + + let [_type, msg] = PBS.Utils.parseMaintenanceMode(maintenanceString); + let isUnplugged = !maintenanceString && removable; + let maskMessage = isUnplugged + ? gettext('Datastore is not mounted') + : `${gettext('Datastore is in maintenance mode')}${msg ? ': ' + msg : ''}`; + + let maskIcon = isUnplugged ? 'fa pbs-unplugged-mask' : 'fa pbs-maintenance-mask'; + me.view.el.mask(maskMessage, maskIcon); }, }); return; -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:52 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:52 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 16/26] ui: add (un)mount button to summary In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-17-h.laimer@proxmox.com> And only try to load datastore information if the datastore is available. Signed-off-by: Hannes Laimer --- www/datastore/Summary.js | 92 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 90 insertions(+), 2 deletions(-) diff --git a/www/datastore/Summary.js b/www/datastore/Summary.js index a932b4e0..49aa3b3c 100644 --- a/www/datastore/Summary.js +++ b/www/datastore/Summary.js @@ -309,7 +309,82 @@ Ext.define('PBS.DataStoreSummary', { model: 'pve-rrd-datastore', }); - me.callParent(); + me.statusStore = Ext.create('Proxmox.data.ObjectStore', { + url: `/api2/json/admin/datastore/${me.datastore}/status`, + interval: 1000, + }); + + let unmountBtn = Ext.create('Ext.Button', { + text: gettext('Unmount'), + hidden: true, + handler: () => { + Proxmox.Utils.API2Request({ + url: `/admin/datastore/${me.datastore}/unmount`, + method: 'POST', + failure: function(response) { + Ext.Msg.alert(gettext('Error'), response.htmlStatus); + }, + success: function(response, options) { + Ext.create('Proxmox.window.TaskViewer', { + upid: response.result.data, + }).show(); + }, + }); + }, + }); + + let mountBtn = Ext.create('Ext.Button', { + text: gettext('Mount'), + hidden: true, + handler: () => { + Proxmox.Utils.API2Request({ + url: `/admin/datastore/${me.datastore}/mount`, + method: 'POST', + failure: function(response) { + Ext.Msg.alert(gettext('Error'), response.htmlStatus); + }, + success: function(response, options) { + Ext.create('Proxmox.window.TaskViewer', { + upid: response.result.data, + }).show(); + }, + }); + }, + }); + + Ext.apply(me, { + tbar: [unmountBtn, mountBtn, '->', { xtype: 'proxmoxRRDTypeSelector' }], + }); + + me.mon(me.statusStore, 'load', (s, records, success) => { + if (!success) { + me.down('pbsDataStoreInfo').fireEvent('deactivate'); + Proxmox.Utils.API2Request({ + url: `/config/datastore/${me.datastore}`, + success: response => { + let mode = response.result.data['maintenance-mode']; + let [type, _message] = PBS.Utils.parseMaintenanceMode(mode); + if (!response.result.data['backing-device']) { + return; + } + if (!type || type === 'read-only') { + unmountBtn.setDisabled(true); + mountBtn.setDisabled(false); + } else if (type === 'unmount') { + unmountBtn.setDisabled(true); + mountBtn.setDisabled(true); + } else { + unmountBtn.setDisabled(false); + mountBtn.setDisabled(false); + } + }, + }); + } else { + me.down('pbsDataStoreInfo').fireEvent('activate'); + unmountBtn.setDisabled(false); + mountBtn.setDisabled(true); + } + }); let sp = Ext.state.Manager.getProvider(); me.mon(sp, 'statechange', function(provider, key, value) { @@ -322,11 +397,17 @@ Ext.define('PBS.DataStoreSummary', { Proxmox.Utils.updateColumns(me); }); + me.callParent(); + Proxmox.Utils.API2Request({ url: `/config/datastore/${me.datastore}`, waitMsgTarget: me.down('pbsDataStoreInfo'), success: function(response) { - let path = Ext.htmlEncode(response.result.data.path); + let data = response.result.data; + let path = Ext.htmlEncode(data.path); + const removable = Object.prototype.hasOwnProperty.call(data, "backing-device"); + unmountBtn.setHidden(!removable); + mountBtn.setHidden(!removable); me.down('pbsDataStoreInfo').setTitle(`${me.datastore} (${path})`); me.down('pbsDataStoreNotes').setNotes(response.result.data.comment); }, @@ -344,6 +425,13 @@ Ext.define('PBS.DataStoreSummary', { let hasIoTicks = records?.some((rec) => rec?.data?.io_ticks !== undefined); me.down('#ioDelayChart').setVisible(!success || hasIoTicks); }, undefined, { single: true }); + me.on('afterrender', () => { + me.statusStore.startUpdate(); + }); + + me.on('destroy', () => { + me.statusStore.stopUpdate(); + }); me.query('proxmoxRRDChart').forEach((chart) => { chart.setStore(me.rrdstore); -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:47 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:47 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 11/26] add auto-mounting for removable datastores In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-12-h.laimer@proxmox.com> If a device houses multiple datastore, none of them will be mounted automatically. If a device only contains a single datastore it will be mounted automatically. The reason for not mounting multiple datastore automatically is that we don't know which is actually wanted, and since mounting all means also all have to be unmounted manually, it made sense to have the user choose which to mount. Signed-off-by: Hannes Laimer --- changes since v12: * make service not dynamic * don't logs UUIDs that don't contains known datastores debian/proxmox-backup-server.install | 1 + debian/proxmox-backup-server.udev | 3 + etc/Makefile | 3 +- etc/removable-device-attach at .service | 8 +++ src/bin/proxmox_backup_manager/datastore.rs | 62 ++++++++++++++++++++- 5 files changed, 75 insertions(+), 2 deletions(-) create mode 100644 etc/removable-device-attach at .service diff --git a/debian/proxmox-backup-server.install b/debian/proxmox-backup-server.install index 79757ead..ff581e3d 100644 --- a/debian/proxmox-backup-server.install +++ b/debian/proxmox-backup-server.install @@ -4,6 +4,7 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/ etc/proxmox-backup-daily-update.timer /lib/systemd/system/ etc/proxmox-backup-proxy.service /lib/systemd/system/ etc/proxmox-backup.service /lib/systemd/system/ +etc/removable-device-attach at .service /lib/systemd/system/ usr/bin/pmt usr/bin/pmtx usr/bin/proxmox-tape diff --git a/debian/proxmox-backup-server.udev b/debian/proxmox-backup-server.udev index afdfb2bc..e21b8bc7 100644 --- a/debian/proxmox-backup-server.udev +++ b/debian/proxmox-backup-server.udev @@ -16,3 +16,6 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg" LABEL="persistent_storage_tape_end" + +# triggers the mounting of a removable device +ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}" \ No newline at end of file diff --git a/etc/Makefile b/etc/Makefile index 42f639f6..b206b9ca 100644 --- a/etc/Makefile +++ b/etc/Makefile @@ -2,12 +2,13 @@ include ../defines.mk UNITS := \ proxmox-backup-daily-update.timer \ + removable-device-attach at .service \ DYNAMIC_UNITS := \ proxmox-backup-banner.service \ proxmox-backup-daily-update.service \ proxmox-backup.service \ - proxmox-backup-proxy.service + proxmox-backup-proxy.service \ all: $(UNITS) $(DYNAMIC_UNITS) pbs-enterprise.list diff --git a/etc/removable-device-attach at .service b/etc/removable-device-attach at .service new file mode 100644 index 00000000..e10d1ea3 --- /dev/null +++ b/etc/removable-device-attach at .service @@ -0,0 +1,8 @@ +[Unit] +Description=Try to mount the removable device of a datastore with uuid '%i'. +After=proxmox-backup-proxy.service +Requires=proxmox-backup-proxy.service + +[Service] +Type=simple +ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index f2795b39..05f35279 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -1,4 +1,4 @@ -use anyhow::{format_err, Error}; +use anyhow::{bail, format_err, Error}; use serde_json::Value; use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; @@ -195,6 +195,62 @@ async fn delete_datastore(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Ok(()) } +#[api( + protected: true, + input: { + properties: { + uuid: { + type: String, + description: "The UUID of the device that should be mounted", + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + }, + }, +)] +/// Try mounting a removable datastore given the UUID. +async fn uuid_mount(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { + let uuid = param["uuid"] + .as_str() + .ok_or_else(|| format_err!("uuid has to be specified"))?; + + let info = &api2::config::datastore::API_METHOD_LIST_DATASTORES; + let data: Value = match info.handler { + ApiHandler::Sync(handler) => (handler)(serde_json::json!({}), info, rpcenv)?, + _ => unreachable!(), + }; + + let matching_stores = data.as_array().map_or(Vec::new(), |list| { + list.iter() + .filter_map(Value::as_object) + .filter(|store| store.get("backing-device").map_or(false, |d| d.eq(&uuid))) + .collect() + }); + + if matching_stores.len() != 1 { + return Ok(Value::Null); + } + + let store_name = matching_stores + .get(0) + .and_then(|s| s.get("name").and_then(Value::as_str)); + if let Some(store_name) = store_name { + let info = &api2::admin::datastore::API_METHOD_MOUNT; + let mount_param = serde_json::json!({ + "store": store_name, + }); + let result = match info.handler { + ApiHandler::Sync(handler) => (handler)(mount_param, info, rpcenv)?, + _ => unreachable!(), + }; + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + return Ok(Value::Null); + } + bail!("'{uuid}' is not associated with any datastore") +} + pub fn datastore_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) @@ -240,6 +296,10 @@ pub fn datastore_commands() -> CommandLineInterface { pbs_config::datastore::complete_calendar_event, ), ) + .insert( + "uuid-mount", + CliCommand::new(&API_METHOD_UUID_MOUNT).arg_param(&["uuid"]), + ) .insert( "remove", CliCommand::new(&API_METHOD_DELETE_DATASTORE) -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:43 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:43 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 07/26] api: admin: add (un)mount endpoint for removable datastores In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-8-h.laimer@proxmox.com> Removable datastores can be mounted unless - they are already - their device is not present For unmounting the maintenance mode is set to `unmount`, which prohibits the starting of any new tasks envolving any IO, this mode is unset either - on completion of the unmount - on abort of the unmount tasks If the unmounting itself should fail, the maintenance mode stays in place and requires manual intervention by unsetting it in the config file directly. This is intentional, as unmounting should not fail, and if it should the situation should be looked at. Signed-off-by: Hannes Laimer --- changes since v12: * allow multiple stores on one device * add best effort attempt to unmount after failed creation src/api2/admin/datastore.rs | 267 ++++++++++++++++++++++++++++++++++-- 1 file changed, 257 insertions(+), 10 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index b73ad0ff..a12262e7 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -3,7 +3,7 @@ use std::collections::HashSet; use std::ffi::OsStr; use std::os::unix::ffi::OsStrExt; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{bail, format_err, Error}; @@ -13,7 +13,7 @@ use hyper::{header, Body, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; use tokio_stream::wrappers::ReceiverStream; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use proxmox_async::blocking::WrappedReaderStream; use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; @@ -29,6 +29,7 @@ use proxmox_sys::fs::{ file_read_firstline, file_read_optional_string, replace_file, CreateOptions, }; use proxmox_time::CalendarEvent; +use proxmox_worker_task::WorkerTaskContext; use pxar::accessor::aio::Accessor; use pxar::EntryKind; @@ -36,12 +37,12 @@ use pxar::EntryKind; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, - GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, - PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, - BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, - DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, + GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, MaintenanceMode, + MaintenanceType, Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, + BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, + NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, + PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; @@ -57,8 +58,8 @@ use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{ - check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, - StoreProgress, CATALOG_NAME, + check_backup_owner, is_datastore_mounted_at, task_tracking, BackupDir, BackupGroup, DataStore, + LocalChunkReader, StoreProgress, CATALOG_NAME, }; use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; @@ -2384,6 +2385,250 @@ pub async fn set_backup_owner( .await? } +/// Here we +/// +/// 1. mount the removable device to `/mount/` +/// 2. bind mount `/mount//` to `/mnt/datastore/` +/// 3. unmount `/mount/` +/// +/// leaving us with the datastore being mounted directly with its name under /mnt/datastore/... +/// +/// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to +/// the same path, this is *very* unlikely since the device is only mounted really shortly, but +/// technically possible. +pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> { + if let (Some(uuid), Some(mount_point)) = ( + datastore.backing_device.as_ref(), + datastore.get_mount_point(), + ) { + if pbs_datastore::is_datastore_mounted_at(mount_point.clone(), uuid.to_string()) { + bail!("device is already mounted at '{}'", mount_point); + } + let tmp_mount_path = format!( + "{}/{:x}", + pbs_buildcfg::rundir!("/mount"), + proxmox_uuid::Uuid::generate() + ); + + let default_options = proxmox_sys::fs::CreateOptions::new(); + proxmox_sys::fs::create_path( + &tmp_mount_path, + Some(default_options.clone()), + Some(default_options.clone()), + )?; + + debug!("mounting '{uuid}' to '{}'", tmp_mount_path); + crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path))?; + + let full_store_path = format!( + "{tmp_mount_path}/{}", + datastore.path.trim_start_matches('/') + ); + let backup_user = pbs_config::backup_user()?; + let options = CreateOptions::new() + .owner(backup_user.uid) + .group(backup_user.gid); + + proxmox_sys::fs::create_path( + &mount_point, + Some(default_options.clone()), + Some(options.clone()), + )?; + + // can't be created before it is mounted, so we have to do it here + proxmox_sys::fs::create_path( + &full_store_path, + Some(default_options.clone()), + Some(options.clone()), + )?; + + info!( + "mounting '{}'({}) to '{}'", + datastore.name, datastore.path, mount_point + ); + if let Err(err) = + crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point)) + { + debug!("unmounting '{}'", tmp_mount_path); + let _ = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)); + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)); + return Err(format_err!( + "Datastore '{}' cound not be mounted: {}.", + datastore.name, + err + )); + } + + debug!("unmounting '{}'", tmp_mount_path); + crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path))?; + std::fs::remove_dir(std::path::Path::new(&tmp_mount_path))?; + + Ok(()) + } else { + Err(format_err!( + "Datastore '{}' cannot be mounted because it is not removable.", + datastore.name + )) + } +} + +#[api( + protected: true, + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + } + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), + }, +)] +/// Mount removable datastore. +pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let (section_config, _digest) = pbs_config::datastore::config()?; + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + let upid = WorkerTask::new_thread( + "mount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |_worker| do_mount_device(datastore), + )?; + + Ok(json!(upid)) +} + +fn unset_unmount_maintenance(store: &str) -> Result<(), Error> { + let _lock = pbs_config::datastore::lock_config()?; + let (mut section_config, _digest) = pbs_config::datastore::config()?; + let mut store_config: DataStoreConfig = section_config.lookup("datastore", store)?; + if store_config + .get_maintenance_mode() + .map_or(true, |m| m.ty != MaintenanceType::Unmount) + { + bail!("Maintenance mode should have been 'Unmount'") + } + store_config.maintenance_mode = None; + section_config.set_data(store, "datastore", &store_config)?; + pbs_config::datastore::save_config(§ion_config)?; + Ok(()) +} + +fn do_unmount_device( + datastore: DataStoreConfig, + worker: Option<&dyn WorkerTaskContext>, +) -> Result<(), Error> { + let mut active_operations = task_tracking::get_active_operations(&datastore.name)?; + let mut old_status = String::new(); + while active_operations.read + active_operations.write > 0 { + if let Some(worker) = worker { + if worker.abort_requested() { + unset_unmount_maintenance(&datastore.name)?; + bail!("aborted, due to user request"); + } + let status = format!( + "cannot unmount yet, still {} read and {} write operations active", + active_operations.read, active_operations.write + ); + if status != old_status { + info!("{status}"); + old_status = status; + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + active_operations = task_tracking::get_active_operations(&datastore.name)?; + } + if let Some(mount_point) = datastore.get_mount_point() { + crate::tools::disks::unmount_by_mountpoint(Path::new(&mount_point))?; + unset_unmount_maintenance(&datastore.name)?; + } + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + store: { schema: DATASTORE_SCHEMA }, + }, + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), + } +)] +/// Unmount a removable device that is associated with the datastore +pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let _lock = pbs_config::datastore::lock_config()?; + let (mut section_config, _digest) = pbs_config::datastore::config()?; + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + let mount_status = datastore + .get_mount_point() + .zip(datastore.backing_device.as_ref()) + .map(|(mount_point, device_uuid)| { + is_datastore_mounted_at(mount_point, device_uuid.to_string()) + }); + + if mount_status == Some(false) { + bail!("datastore '{store}' is not mounted"); + } + + datastore.set_maintenance_mode(Some(MaintenanceMode { + ty: MaintenanceType::Unmount, + message: None, + }))?; + section_config.set_data(&store, "datastore", &datastore)?; + pbs_config::datastore::save_config(§ion_config)?; + + drop(_lock); + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) + { + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( + sock, + &format!( + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", + &store + ), + ) + .await; + } + + let upid = WorkerTask::new_thread( + "unmount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |worker| do_unmount_device(datastore, Some(&worker)), + )?; + + Ok(json!(upid)) +} + #[sortable] const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ ( @@ -2422,6 +2667,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .get(&API_METHOD_LIST_GROUPS) .delete(&API_METHOD_DELETE_GROUP), ), + ("mount", &Router::new().post(&API_METHOD_MOUNT)), ( "namespace", // FIXME: move into datastore:: sub-module?! @@ -2456,6 +2702,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .delete(&API_METHOD_DELETE_SNAPSHOT), ), ("status", &Router::new().get(&API_METHOD_STATUS)), + ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)), ( "upload-backup-log", &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:01:01 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:01:01 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 25/26] ui: support create removable datastore through directory creation In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-26-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 2 ++ www/DirectoryList.js | 13 +++++++++++++ www/window/CreateDirectory.js | 14 ++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 139e753d..36c85ccf 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -153,6 +153,8 @@ pub fn list_datastore_mounts() -> Result, Error> { "removable-datastore": { description: "The added datastore is removable.", type: bool, + optional: true, + default: false, }, filesystem: { type: FileSystemType, diff --git a/www/DirectoryList.js b/www/DirectoryList.js index adefa9ab..25921a62 100644 --- a/www/DirectoryList.js +++ b/www/DirectoryList.js @@ -121,6 +121,19 @@ Ext.define('PBS.admin.Directorylist', { ], columns: [ + { + text: '', + flex: 0, + width: 35, + dataIndex: 'removable', + renderer: function(_text, _, row) { + if (row.data.removable) { + return ``; + } else { + return ''; + } + }, + }, { text: gettext('Path'), dataIndex: 'path', diff --git a/www/window/CreateDirectory.js b/www/window/CreateDirectory.js index 6aabe21a..38d6979d 100644 --- a/www/window/CreateDirectory.js +++ b/www/window/CreateDirectory.js @@ -43,6 +43,20 @@ Ext.define('PBS.window.CreateDirectory', { name: 'add-datastore', fieldLabel: gettext('Add as Datastore'), value: '1', + listeners: { + change(field, newValue, _oldValue) { + let form = field.up('form'); + let rmBox = form.down('[name=removable-datastore]'); + + rmBox.setDisabled(!newValue); + rmBox.setValue(false); + }, + }, + }, + { + xtype: 'proxmoxcheckbox', + name: 'removable-datastore', + fieldLabel: gettext('is removable'), }, ], }); -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:59 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:59 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 23/26] api: node: include removable datastores in directory list In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-24-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 7e020e27..21d2bcc4 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -45,6 +45,8 @@ pub struct DatastoreMountInfo { pub path: String, /// The mounted device. pub device: String, + /// This is removable + pub removable: bool, /// File system type pub filesystem: Option, /// Mount options @@ -61,7 +63,7 @@ pub struct DatastoreMountInfo { } }, returns: { - description: "List of systemd datastore mount units.", + description: "List of removable-datastore devices and systemd datastore mount units.", type: Array, items: { type: DatastoreMountInfo, @@ -100,6 +102,31 @@ pub fn list_datastore_mounts() -> Result, Error> { path: data.Where, filesystem: data.Type, options: data.Options, + removable: false, + }); + } + + let (config, _digest) = pbs_config::datastore::config()?; + let store_list: Vec = config.convert_to_typed_array("datastore")?; + + for item in store_list + .into_iter() + .filter(|store| store.backing_device.is_some()) + { + let Some(backing_device) = item.backing_device.as_deref() else { + continue; + }; + let Some(mount_point) = item.get_mount_point() else { + continue; + }; + list.push(DatastoreMountInfo { + unitfile: "datastore config".to_string(), + name: item.name.clone(), + device: format!("/dev/disk/by-uuid/{backing_device}"), + path: mount_point, + filesystem: None, + options: None, + removable: true, }); } -- 2.39.5 From h.laimer at proxmox.com Wed Nov 13 16:00:58 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Wed, 13 Nov 2024 16:00:58 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 22/26] api: node: allow creation of removable datastore through directory endpoint In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <20241113150102.164820-23-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 59 +++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 5 deletions(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 7f540220..7e020e27 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -123,6 +123,11 @@ pub fn list_datastore_mounts() -> Result, Error> { description: "Configure a datastore using the directory.", type: bool, optional: true, + default: false, + }, + "removable-datastore": { + description: "The added datastore is removable.", + type: bool, }, filesystem: { type: FileSystemType, @@ -141,7 +146,8 @@ pub fn list_datastore_mounts() -> Result, Error> { pub fn create_datastore_disk( name: String, disk: String, - add_datastore: Option, + add_datastore: bool, + removable_datastore: bool, filesystem: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { @@ -155,8 +161,51 @@ pub fn create_datastore_disk( bail!("disk '{}' is already in use.", disk); } - let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); + if add_datastore && removable_datastore { + let upid_str = WorkerTask::new_thread( + "dircreate", + Some(name.clone()), + auth_id, + to_stdout, + move |_worker| { + info!("create removable datastore '{name}' on disk {disk}"); + + let filesystem = filesystem.unwrap_or(FileSystemType::Ext4); + + let manager = DiskManage::new(); + + let disk = manager.disk_by_name(&disk)?; + + let partition = create_single_linux_partition(&disk)?; + create_file_system(&partition, filesystem)?; + + let uuid = get_fs_uuid(&partition)?; + + let lock = pbs_config::datastore::lock_config()?; + let datastore: DataStoreConfig = serde_json::from_value( + json!({ "name": name, "path": name, "backing-device": uuid }), + )?; + + let (config, _digest) = pbs_config::datastore::config()?; + if config.sections.get(&datastore.name).is_some() { + bail!("datastore '{}' already exists.", datastore.name); + } + + // we don't have to check if the UUID is already in use since we just created the + // fs ourself + + crate::api2::config::datastore::do_create_datastore( + lock, config, datastore, false, + )?; + + Ok(()) + }, + )?; + return Ok(upid_str); + }; + + let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); // check if the default path exists already. // bail if it is not empty or another filesystem mounted on top let default_path = std::path::PathBuf::from(&mount_point); @@ -183,7 +232,6 @@ pub fn create_datastore_disk( move |_worker| { info!("create datastore '{name}' on disk {disk}"); - let add_datastore = add_datastore.unwrap_or(false); let filesystem = filesystem.unwrap_or(FileSystemType::Ext4); let manager = DiskManage::new(); @@ -248,8 +296,9 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> { // path of datastore cannot be changed let (config, _) = pbs_config::datastore::config()?; let datastores: Vec = config.convert_to_typed_array("datastore")?; - let conflicting_datastore: Option = - datastores.into_iter().find(|ds| ds.absolute_path() == path); + let conflicting_datastore: Option = datastores.into_iter().find(|ds| { + ds.absolute_path() == path || ds.get_mount_point().map_or(false, |mp| mp == path) + }); if let Some(conflicting_datastore) = conflicting_datastore { bail!( -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 16:55:44 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 16:55:44 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 1/2] docs: add security implications of prune and change detection mode Message-ID: <20241113155545.354045-1-c.ebner@proxmox.com> Users should be made aware that the data stored in chunks outlives the backup snapshots on pruning and that backups created using the change-detection-mode set to metadata might reference chunks containing files which have vanished since the previous backup, but might still be accessible when access to the chunks raw data is possible (client or server side). Signed-off-by: Christian Ebner --- changes since version 1: - explicitly mention steps to get rid of chunks for both, regular and file-level backups with change detection mode metadata - reworded and restructured according to feedback diff --git a/docs/maintenance.rst b/docs/maintenance.rst index 4bb135e4e..e8a26d69c 100644 --- a/docs/maintenance.rst +++ b/docs/maintenance.rst @@ -6,8 +6,34 @@ Maintenance Tasks Pruning ------- -Prune lets you specify which backup snapshots you want to keep. -The following retention options are available: +Prune lets you specify which backup snapshots you want to keep, removing others. +When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs, +log and notes) is removed. The chunks containing the actual backup data and +previously referenced by the pruned snapshot, have to be removed by a garbage +collection run. + +.. Caution:: Take into consideration that sensitive information stored in a + given data chunk will outlive pruned snapshots and remain present in the + datastore as long as referenced by at least one backup snapshot. Further, + *even* if no snapshot references a given chunk, it will remain present until + removed by the garbage collection. + + Further, file-level backups created using the change detection mode + `metadata` can reference backup chunks containing files which have vanished + since the previous backup, but might still be accessible when reading the + chunks raw data is possible (client or server side). + + To remove chunks containing sensitive data, prune any snapshot made while the + data was part of the backup input and run a garbage collection. Further, if + using file-based backups with change detection mode `metadata`, additionally + prune all snapshots since the sensitive data was no longer part of the backup + input and run a garbage collection. + + The no longer referenced chunks will then be marked for deletion on the next + garbage collection run and removed by a subsequent run after the grace + period. + +The following retention options are available for pruning: ``keep-last `` Keep the last ```` backup snapshots. -- 2.39.5 From c.ebner at proxmox.com Wed Nov 13 16:55:45 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 13 Nov 2024 16:55:45 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/2] docs: deduplicate background details for garbage collection In-Reply-To: <20241113155545.354045-1-c.ebner@proxmox.com> References: <20241113155545.354045-1-c.ebner@proxmox.com> Message-ID: <20241113155545.354045-2-c.ebner@proxmox.com> Currently, common details regarding garbage collection are documented in the backup client and the maintenance task. Deduplicate this information by moving the details to the background section of the maintenance task and reference that section in the backup client part. Signed-off-by: Christian Ebner --- changes since version 1: - refine phase 2 garbage collection description - s/referred/referenced/ - reworded and restructured according to feedback docs/backup-client.rst | 28 ++++++++++++---------------- docs/maintenance.rst | 38 +++++++++++++++++++++++++++----------- 2 files changed, 39 insertions(+), 27 deletions(-) diff --git a/docs/backup-client.rst b/docs/backup-client.rst index e56e0625b..892be11d9 100644 --- a/docs/backup-client.rst +++ b/docs/backup-client.rst @@ -789,29 +789,25 @@ Garbage Collection ------------------ The ``prune`` command removes only the backup index files, not the data -from the datastore. This task is left to the garbage collection -command. It is recommended to carry out garbage collection on a regular basis. +from the datastore. Deletion of unused backup data from the datastore is done by +:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to +schedule garbage collection tasks on a regular basis. The working principle of +garbage collection is described in more details in the related :ref:`background +section `. -The garbage collection works in two phases. In the first phase, all -data blocks that are still in use are marked. In the second phase, -unused data blocks are removed. +To start garbage collection from the client side, run the following command: + +.. code-block:: console + + # proxmox-backup-client garbage-collect .. note:: This command needs to read all existing backup index files and touches the complete chunk-store. This can take a long time depending on the number of chunks and the speed of the underlying disks. -.. note:: The garbage collection will only remove chunks that haven't been used - for at least one day (exactly 24h 5m). This grace period is necessary because - chunks in use are marked by touching the chunk which updates the ``atime`` - (access time) property. Filesystems are mounted with the ``relatime`` option - by default. This results in a better performance by only updating the - ``atime`` property if the last access has been at least 24 hours ago. The - downside is that touching a chunk within these 24 hours will not always - update its ``atime`` property. - - Chunks in the grace period will be logged at the end of the garbage - collection task as *Pending removals*. +The progress of the garbage collection will be displayed as shown in the example +below: .. code-block:: console diff --git a/docs/maintenance.rst b/docs/maintenance.rst index e8a26d69c..bba3feff4 100644 --- a/docs/maintenance.rst +++ b/docs/maintenance.rst @@ -197,6 +197,8 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up periodically. For most setups a weekly schedule provides a good interval to start. +.. _gc_background: + GC Background ^^^^^^^^^^^^^ @@ -222,17 +224,31 @@ datastore or interfering with other backups. The garbage collection (GC) process is performed per datastore and is split into two phases: -- Phase one: Mark - All index files are read, and the access time of the referred chunk files is - updated. - -- Phase two: Sweep - The task iterates over all chunks, checks their file access time, and if it - is older than the cutoff time (i.e., the time when GC started, plus some - headroom for safety and Linux file system behavior), the task knows that the - chunk was neither referred to in any backup index nor part of any currently - running backup that has no index to scan for. As such, the chunk can be - safely deleted. +- Phase one (Mark): + + All index files are read, and the access time (``atime``) of the referenced + chunk files is updated. + +- Phase two (Sweep): + + The task iterates over all chunks and checks their file access time against a + cutoff time. The cutoff time is given by either the oldest backup writer + instance, if present, or 24 hours and 5 minutes after the start of garbage + collection. + + Garbage collection can consider chunk files with access time older than the + cutoff time to be neither referenced by any backup snapshot's index, nor part + of any currently running backup job. Therefore, these chunks can safeley be + deleted. + + Chunks within the grace period will not be deleted and logged at the end of + the garbage collection task as *Pending removals*. + +.. note:: The grace period for backup chunk removal is not arbitrary, but stems + from the fact that filesystems are typically mounted with the ``relatime`` + option by default. This results in better performance by only updating the + ``atime`` property if a file has been modified since the last access or the + last access has been at least 24 hours ago. Manually Starting GC ^^^^^^^^^^^^^^^^^^^^ -- 2.39.5 From f.schauer at proxmox.com Wed Nov 13 16:57:58 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 16:57:58 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v6 0/4] add support for bulk import of a dump directory Message-ID: <20241113155802.190824-1-f.schauer@proxmox.com> When a path to a directory is provided in the vma_file argument, try to upload all VMA backups in the directory. This also handles compressed VMA files, notes and logs. If a vmid is specified with --vmid, only the backups of that particular vmid are uploaded. Also improve the readability of the log messages to keep track of all imported backups. Changed since v5: * Extend the help text by seperate usages for single VMA import and bulk import * Move variables into format strings where possible Changed since v4: * Switch grouped_vmas from Vec> to HashMap> * Remove dependency on itertools * bail when no backups were found * Default to yes on the bulk import confirmation prompt * bail on invalid input to the bulk import confirmation prompt Changed since v3: * Mention in the description of the --vmid argument, that it is required if a single VMA file is provided * Construct grouped_vmas in place * Add debug logs when gathering files for bulk import * Log a summary of the files gathered for bulk import * Remove the "confusing VMA file path" error message in the second commit * Switch chunk_stats from Arc> to Arc<[AtomicU64; 256]> and use fetch_add to atomically increment and fetch the chunk stat * Ask for confirmation before bulk import * Add --yes option to skip the confirmation prompt Changed since v2: * Make skipping a VMID on error optional with the --skip-failed option * Switch log output from stderr to stdout * Bump itertools to 0.13 Changed since v1: * Do not recurse through dump directory * Compile regex once before iterating over the files in the dump directory * Use extract on regex capture groups * Do not use deprecated method `chrono::NaiveDateTime::timestamp` * Use proxmox_sys::fs::file_read_optional_string * Group VMA files by VMID and continue with next VMID on error * Move the BackupVmaToPbsArgs split into its own commit * Remove hard coded occurences of 255 * Use level-based logging instead of println Filip Schauer (4): add support for bulk import of a dump directory add option to skip vmids whose backups failed to upload use level-based logging instead of println log device upload progress as a percentage Cargo.toml | 4 + src/main.rs | 198 +++++++++++++++++++++++++++++++++++++++++++++---- src/vma2pbs.rs | 108 +++++++++++++++++++++------ 3 files changed, 271 insertions(+), 39 deletions(-) -- 2.39.5 From f.schauer at proxmox.com Wed Nov 13 16:58:01 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 16:58:01 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v6 3/4] use level-based logging instead of println In-Reply-To: <20241113155802.190824-1-f.schauer@proxmox.com> References: <20241113155802.190824-1-f.schauer@proxmox.com> Message-ID: <20241113155802.190824-4-f.schauer@proxmox.com> Use log level "info" by default and prevent spamming messages for every single chunk uploaded. To re-enable these messages, set the RUST_LOG environment variable to "debug". Signed-off-by: Filip Schauer --- Cargo.toml | 2 ++ src/main.rs | 28 ++++++++++++++++++++++------ src/vma2pbs.rs | 37 ++++++++++++++++--------------------- 3 files changed, 40 insertions(+), 27 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ad80304..7951bbc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,9 @@ edition = "2021" anyhow = "1.0" bincode = "1.3" chrono = "0.4" +env_logger = "0.10" hyper = "0.14.5" +log = "0.4" pico-args = "0.5" md5 = "0.7.0" regex = "1.7" diff --git a/src/main.rs b/src/main.rs index e87da87..f942a73 100644 --- a/src/main.rs +++ b/src/main.rs @@ -6,6 +6,7 @@ use std::path::PathBuf; use anyhow::{bail, Context, Error}; use chrono::NaiveDateTime; +use env_logger::Target; use proxmox_sys::linux::tty; use proxmox_time::epoch_i64; use regex::Regex; @@ -132,7 +133,7 @@ fn parse_args() -> Result { match (encrypt, keyfile.is_some()) { (true, false) => bail!("--encrypt requires a --keyfile!"), - (false, true) => println!( + (false, true) => log::info!( "--keyfile given, but --encrypt not set -> backup will be signed, but not encrypted!" ), _ => {} @@ -194,7 +195,7 @@ fn parse_args() -> Result { Some(key_password) } else if vma_file_path.is_none() { - println!( + log::info!( "Please use --key-password-file to provide the password when passing the VMA file \ to stdin, if required." ); @@ -250,13 +251,17 @@ fn parse_args() -> Result { let Some((_, [backup_id, timestr, ext])) = re.captures(file_name).map(|c| c.extract()) else { - // Skip the file, since it is not a VMA backup + log::debug!("Skip \"{file_name}\", since it is not a VMA backup"); continue; }; if let Some(ref vmid) = vmid { if backup_id != vmid { - // Skip the backup, since it does not match the specified vmid + log::debug!( + "Skip backup with VMID {}, since it does not match specified VMID {}", + backup_id, + vmid + ); continue; } } @@ -312,13 +317,13 @@ fn parse_args() -> Result { bail!("Did not find any backup archives"); } - println!( + log::info!( "Found {total_vma_count} backup archive(s) of {} different VMID(s):", grouped_vmas.len() ); for (backup_id, vma_group) in &grouped_vmas { - println!("- VMID {backup_id}: {} backups", vma_group.len()); + log::info!("- VMID {backup_id}: {} backups", vma_group.len()); } if !yes { @@ -361,7 +366,18 @@ fn parse_args() -> Result { Ok(options) } +fn init_cli_logger() { + env_logger::Builder::from_env(env_logger::Env::new().filter_or("RUST_LOG", "info")) + .format_level(false) + .format_target(false) + .format_timestamp(None) + .target(Target::Stdout) + .init(); +} + fn main() -> Result<(), Error> { + init_cli_logger(); + let args = parse_args()?; vma2pbs(args)?; diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index b7de0cd..af60e44 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -82,8 +82,8 @@ fn create_pbs_backup_task( pbs_args: &PbsArgs, backup_args: &VmaBackupArgs, ) -> Result<*mut ProxmoxBackupHandle, Error> { - println!( - "backup time: {}", + log::info!( + "\tbackup time: {}", epoch_to_rfc3339(backup_args.backup_time)? ); @@ -152,7 +152,7 @@ where let config_name = config.name; let config_data = config.content; - println!("CFG: size: {} name: {}", config_data.len(), config_name); + log::info!("\tCFG: size: {} name: {config_name}", config_data.len()); let config_name_cstr = CString::new(config_name)?; @@ -190,10 +190,7 @@ where let device_name = vma_reader.get_device_name(device_id.try_into()?)?; let device_size = vma_reader.get_device_size(device_id.try_into()?)?; - println!( - "DEV: dev_id={} size: {} devname: {}", - device_id, device_size, device_name - ); + log::info!("\tDEV: dev_id={device_id} size: {device_size} devname: {device_name}"); let device_name_cstr = CString::new(device_name)?; let pbs_device_id = proxmox_backup_register_image( @@ -276,10 +273,8 @@ where }; let pbs_upload_chunk = |pbs_chunk_buffer: Option<&[u8]>| { - println!( - "Uploading dev_id: {} offset: {:#0X} - {:#0X}", - dev_id, - pbs_chunk_offset, + log::debug!( + "\tUploading dev_id: {dev_id} offset: {pbs_chunk_offset:#0X} - {:#0X}", pbs_chunk_offset + pbs_chunk_size, ); @@ -466,13 +461,13 @@ fn set_notes( pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { let pbs_args = &args.pbs_args; - println!("PBS repository: {}", pbs_args.pbs_repository); + log::info!("PBS repository: {}", pbs_args.pbs_repository); if let Some(ns) = &pbs_args.namespace { - println!("PBS namespace: {}", ns); + log::info!("PBS namespace: {ns}"); } - println!("PBS fingerprint: {}", pbs_args.fingerprint); - println!("compress: {}", pbs_args.compress); - println!("encrypt: {}", pbs_args.encrypt); + log::info!("PBS fingerprint: {}", pbs_args.fingerprint); + log::info!("compress: {}", pbs_args.compress); + log::info!("encrypt: {}", pbs_args.encrypt); let start_transfer_time = SystemTime::now(); @@ -485,8 +480,8 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { ); if args.skip_failed { - eprintln!("{}", err_msg); - println!("Skipping VMID {}", backup_args.backup_id); + log::warn!("{}", err_msg); + log::info!("Skipping VMID {}", backup_args.backup_id); break; } else { bail!(err_msg); @@ -500,15 +495,15 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { let minutes = total_seconds / 60; let seconds = total_seconds % 60; let milliseconds = transfer_duration.as_millis() % 1000; - println!("Backup finished within {minutes} minutes, {seconds} seconds and {milliseconds} ms"); + log::info!("Backup finished within {minutes} minutes, {seconds} seconds and {milliseconds} ms"); Ok(()) } fn upload_vma_file(pbs_args: &PbsArgs, backup_args: &VmaBackupArgs) -> Result<(), Error> { match &backup_args.vma_file_path { - Some(vma_file_path) => println!("Uploading VMA backup from {vma_file_path:?}"), - None => println!("Uploading VMA backup from (stdin)"), + Some(vma_file_path) => log::info!("Uploading VMA backup from {vma_file_path:?}"), + None => log::info!("Uploading VMA backup from (stdin)"), }; let vma_file: Box = match &backup_args.compression { -- 2.39.5 From f.schauer at proxmox.com Wed Nov 13 16:58:02 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 16:58:02 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v6 4/4] log device upload progress as a percentage In-Reply-To: <20241113155802.190824-1-f.schauer@proxmox.com> References: <20241113155802.190824-1-f.schauer@proxmox.com> Message-ID: <20241113155802.190824-5-f.schauer@proxmox.com> Log the upload progress of a device as a percentage with log level info every 1000 chunks. Signed-off-by: Filip Schauer --- src/vma2pbs.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index af60e44..7457b9a 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -6,6 +6,8 @@ use std::fs::File; use std::io::{stdin, BufRead, BufReader, Read}; use std::process::{Command, Stdio}; use std::ptr; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::Arc; use std::time::SystemTime; use anyhow::{anyhow, bail, Error}; @@ -229,6 +231,8 @@ where non_zero_mask: u64, } + let chunk_stats = Arc::new([const { AtomicU64::new(0) }; VMA_MAX_DEVICES]); + let images_chunks: RefCell>> = RefCell::new(HashMap::new()); @@ -277,6 +281,11 @@ where "\tUploading dev_id: {dev_id} offset: {pbs_chunk_offset:#0X} - {:#0X}", pbs_chunk_offset + pbs_chunk_size, ); + let chunk_stat = chunk_stats[dev_id as usize].fetch_add(1, Ordering::SeqCst); + if (chunk_stat % 1000) == 0 { + let percentage = 100 * PROXMOX_BACKUP_DEFAULT_CHUNK_SIZE * chunk_stat / device_size; + log::info!("\tUploading dev_id: {dev_id} ({percentage}%)"); + } let mut pbs_err: *mut c_char = ptr::null_mut(); -- 2.39.5 From f.schauer at proxmox.com Wed Nov 13 16:58:00 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 16:58:00 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v6 2/4] add option to skip vmids whose backups failed to upload In-Reply-To: <20241113155802.190824-1-f.schauer@proxmox.com> References: <20241113155802.190824-1-f.schauer@proxmox.com> Message-ID: <20241113155802.190824-3-f.schauer@proxmox.com> Signed-off-by: Filip Schauer --- src/main.rs | 6 ++++++ src/vma2pbs.rs | 13 ++++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index de1e6e0..e87da87 100644 --- a/src/main.rs +++ b/src/main.rs @@ -54,6 +54,9 @@ Options: File containing a comment/notes [--log-file ] Log file + --skip-failed + Skip VMIDs that failed to be uploaded and continue onto the next VMID if a dump directory + is specified. -y, --yes Automatic yes to prompts -h, --help @@ -74,6 +77,7 @@ fn parse_args() -> Result { "--compress", "-e", "--encrypt", + "--skip-failed", "-y", "--yes", ]; @@ -123,6 +127,7 @@ fn parse_args() -> Result { let key_password_file: Option = args.opt_value_from_str("--key-password-file")?; let notes_file: Option = args.opt_value_from_str("--notes-file")?; let log_file_path: Option = args.opt_value_from_str("--log-file")?; + let skip_failed = args.contains("--skip-failed"); let yes = args.contains(["-y", "--yes"]); match (encrypt, keyfile.is_some()) { @@ -350,6 +355,7 @@ fn parse_args() -> Result { let options = BackupVmaToPbsArgs { pbs_args, grouped_vmas, + skip_failed, }; Ok(options) diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index b26c62e..b7de0cd 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -32,6 +32,7 @@ const VMA_CLUSTER_SIZE: usize = 65536; pub struct BackupVmaToPbsArgs { pub pbs_args: PbsArgs, pub grouped_vmas: HashMap>, + pub skip_failed: bool, } pub struct PbsArgs { @@ -478,12 +479,18 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { for (_, vma_group) in args.grouped_vmas { for backup_args in vma_group { if let Err(e) = upload_vma_file(pbs_args, &backup_args) { - eprintln!( + let err_msg = format!( "Failed to upload vma file at {:?} - {e}", backup_args.vma_file_path.unwrap_or("(stdin)".into()), ); - println!("Skipping VMID {}", backup_args.backup_id); - break; + + if args.skip_failed { + eprintln!("{}", err_msg); + println!("Skipping VMID {}", backup_args.backup_id); + break; + } else { + bail!(err_msg); + } } } } -- 2.39.5 From f.schauer at proxmox.com Wed Nov 13 16:57:59 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 16:57:59 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v6 1/4] add support for bulk import of a dump directory In-Reply-To: <20241113155802.190824-1-f.schauer@proxmox.com> References: <20241113155802.190824-1-f.schauer@proxmox.com> Message-ID: <20241113155802.190824-2-f.schauer@proxmox.com> When a path to a directory is provided in the vma_file argument, try to upload all VMA backups in the directory. This also handles compressed VMA files, notes and logs. If a vmid is specified with --vmid, only the backups of that particular vmid are uploaded. This is intended for use on a dump directory: PBS_FINGERPRINT='PBS_FINGERPRINT' vma-to-pbs \ --repository 'user at realm!token at server:port:datastore' \ /var/lib/vz/dump Signed-off-by: Filip Schauer --- Cargo.toml | 2 + src/main.rs | 172 +++++++++++++++++++++++++++++++++++++++++++++---- src/vma2pbs.rs | 63 ++++++++++++++++-- 3 files changed, 217 insertions(+), 20 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cd13426..ad80304 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,11 @@ edition = "2021" [dependencies] anyhow = "1.0" bincode = "1.3" +chrono = "0.4" hyper = "0.14.5" pico-args = "0.5" md5 = "0.7.0" +regex = "1.7" scopeguard = "1.1.0" serde = "1.0" serde_json = "1.0" diff --git a/src/main.rs b/src/main.rs index 3e25591..de1e6e0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,26 +1,39 @@ +use std::collections::HashMap; use std::ffi::OsString; +use std::fs::read_dir; +use std::io::{BufRead, BufReader, Write}; +use std::path::PathBuf; use anyhow::{bail, Context, Error}; +use chrono::NaiveDateTime; use proxmox_sys::linux::tty; use proxmox_time::epoch_i64; +use regex::Regex; mod vma; mod vma2pbs; -use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, PbsArgs, VmaBackupArgs}; +use vma2pbs::{vma2pbs, BackupVmaToPbsArgs, Compression, PbsArgs, VmaBackupArgs}; const CMD_HELP: &str = "\ -Usage: vma-to-pbs [OPTIONS] --repository --vmid [vma_file] +Single VMA file usage: +vma-to-pbs [OPTIONS] --repository --vmid [vma_file] + +Bulk import usage: +vma-to-pbs [OPTIONS] --repository [--vmid ] [dump_directory] Arguments: - [vma_file] + [vma_file | dump_directory] Options: --repository Repository URL [--ns ] Namespace - --vmid + [--vmid ] Backup ID + This is required if a single VMA file is provided. + If not specified, bulk import all VMA backups in the provided directory. + If specified with a dump directory, only import backups of the specified vmid. [--backup-time ] Backup timestamp --fingerprint @@ -41,6 +54,8 @@ Options: File containing a comment/notes [--log-file ] Log file + -y, --yes + Automatic yes to prompts -h, --help Print help -V, --version @@ -52,7 +67,16 @@ fn parse_args() -> Result { args.remove(0); // remove the executable path. let mut first_later_args_index = 0; - let options = ["-h", "--help", "-c", "--compress", "-e", "--encrypt"]; + let options = [ + "-h", + "--help", + "-c", + "--compress", + "-e", + "--encrypt", + "-y", + "--yes", + ]; for (i, arg) in args.iter().enumerate() { if let Some(arg) = arg.to_str() { @@ -87,7 +111,7 @@ fn parse_args() -> Result { let pbs_repository = args.value_from_str("--repository")?; let namespace = args.opt_value_from_str("--ns")?; - let vmid = args.value_from_str("--vmid")?; + let vmid: Option = args.opt_value_from_str("--vmid")?; let backup_time: Option = args.opt_value_from_str("--backup-time")?; let backup_time = backup_time.unwrap_or_else(epoch_i64); let fingerprint = args.opt_value_from_str("--fingerprint")?; @@ -99,6 +123,7 @@ fn parse_args() -> Result { let key_password_file: Option = args.opt_value_from_str("--key-password-file")?; let notes_file: Option = args.opt_value_from_str("--notes-file")?; let log_file_path: Option = args.opt_value_from_str("--log-file")?; + let yes = args.contains(["-y", "--yes"]); match (encrypt, keyfile.is_some()) { (true, false) => bail!("--encrypt requires a --keyfile!"), @@ -196,15 +221,136 @@ fn parse_args() -> Result { encrypt, }; - let vma_args = VmaBackupArgs { - vma_file_path: vma_file_path.cloned(), - backup_id: vmid, - backup_time, - notes, - log_file_path, + let bulk = + vma_file_path + .map(PathBuf::from) + .and_then(|path| if path.is_dir() { Some(path) } else { None }); + + let grouped_vmas = if let Some(dump_dir_path) = bulk { + let re = Regex::new( + r"vzdump-qemu-(\d+)-(\d{4}_\d{2}_\d{2}-\d{2}_\d{2}_\d{2}).vma(|.zst|.lzo|.gz)$", + )?; + + let mut vmas = Vec::new(); + + for entry in read_dir(dump_dir_path)? { + let entry = entry?; + let path = entry.path(); + + if !path.is_file() { + continue; + } + + if let Some(file_name) = path.file_name().and_then(|n| n.to_str()) { + let Some((_, [backup_id, timestr, ext])) = + re.captures(file_name).map(|c| c.extract()) + else { + // Skip the file, since it is not a VMA backup + continue; + }; + + if let Some(ref vmid) = vmid { + if backup_id != vmid { + // Skip the backup, since it does not match the specified vmid + continue; + } + } + + let compression = match ext { + "" => None, + ".zst" => Some(Compression::Zstd), + ".lzo" => Some(Compression::Lzo), + ".gz" => Some(Compression::GZip), + _ => bail!("Unexpected file extension: {ext}"), + }; + + let backup_time = NaiveDateTime::parse_from_str(timestr, "%Y_%m_%d-%H_%M_%S")? + .and_utc() + .timestamp(); + + let notes_path = path.with_file_name(format!("{file_name}.notes")); + let notes = proxmox_sys::fs::file_read_optional_string(notes_path)?; + + let log_path = path.with_file_name(format!("{file_name}.log")); + let log_file_path = if log_path.exists() { + Some(log_path.to_path_buf().into_os_string()) + } else { + None + }; + + let backup_args = VmaBackupArgs { + vma_file_path: Some(path.clone().into()), + compression, + backup_id: backup_id.to_string(), + backup_time, + notes, + log_file_path, + }; + vmas.push(backup_args); + } + } + + vmas.sort_by_key(|d| d.backup_time); + let total_vma_count = vmas.len(); + let grouped_vmas = vmas.into_iter().fold( + HashMap::new(), + |mut grouped: HashMap>, vma_args| { + grouped + .entry(vma_args.backup_id.clone()) + .or_default() + .push(vma_args); + grouped + }, + ); + + if grouped_vmas.is_empty() { + bail!("Did not find any backup archives"); + } + + println!( + "Found {total_vma_count} backup archive(s) of {} different VMID(s):", + grouped_vmas.len() + ); + + for (backup_id, vma_group) in &grouped_vmas { + println!("- VMID {backup_id}: {} backups", vma_group.len()); + } + + if !yes { + eprint!("Proceed with the bulk import? (Y/n): "); + std::io::stdout().flush()?; + let mut line = String::new(); + + BufReader::new(std::io::stdin()).read_line(&mut line)?; + let trimmed = line.trim(); + match trimmed { + "y" | "Y" | "" => {} + "n" | "N" => bail!("Bulk import was not confirmed."), + _ => bail!("Unexpected choice '{trimmed}'!"), + } + } + + grouped_vmas + } else if let Some(vmid) = vmid { + HashMap::from([( + vmid.clone(), + vec![VmaBackupArgs { + vma_file_path: vma_file_path.cloned(), + compression: None, + backup_id: vmid, + backup_time, + notes, + log_file_path, + }], + )]) + } else { + bail!("No vmid specified for single backup file"); }; - let options = BackupVmaToPbsArgs { pbs_args, vma_args }; + let options = BackupVmaToPbsArgs { + pbs_args, + grouped_vmas, + }; Ok(options) } diff --git a/src/vma2pbs.rs b/src/vma2pbs.rs index a888a7b..b26c62e 100644 --- a/src/vma2pbs.rs +++ b/src/vma2pbs.rs @@ -4,6 +4,7 @@ use std::collections::HashMap; use std::ffi::{c_char, CStr, CString, OsString}; use std::fs::File; use std::io::{stdin, BufRead, BufReader, Read}; +use std::process::{Command, Stdio}; use std::ptr; use std::time::SystemTime; @@ -30,7 +31,7 @@ const VMA_CLUSTER_SIZE: usize = 65536; pub struct BackupVmaToPbsArgs { pub pbs_args: PbsArgs, - pub vma_args: VmaBackupArgs, + pub grouped_vmas: HashMap>, } pub struct PbsArgs { @@ -45,8 +46,15 @@ pub struct PbsArgs { pub encrypt: bool, } +pub enum Compression { + Zstd, + Lzo, + GZip, +} + pub struct VmaBackupArgs { pub vma_file_path: Option, + pub compression: Option, pub backup_id: String, pub backup_time: i64, pub notes: Option, @@ -467,7 +475,18 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { let start_transfer_time = SystemTime::now(); - upload_vma_file(pbs_args, &args.vma_args)?; + for (_, vma_group) in args.grouped_vmas { + for backup_args in vma_group { + if let Err(e) = upload_vma_file(pbs_args, &backup_args) { + eprintln!( + "Failed to upload vma file at {:?} - {e}", + backup_args.vma_file_path.unwrap_or("(stdin)".into()), + ); + println!("Skipping VMID {}", backup_args.backup_id); + break; + } + } + } let transfer_duration = SystemTime::now().duration_since(start_transfer_time)?; let total_seconds = transfer_duration.as_secs(); @@ -480,13 +499,43 @@ pub fn vma2pbs(args: BackupVmaToPbsArgs) -> Result<(), Error> { } fn upload_vma_file(pbs_args: &PbsArgs, backup_args: &VmaBackupArgs) -> Result<(), Error> { - let vma_file: Box = match &backup_args.vma_file_path { - Some(vma_file_path) => match File::open(vma_file_path) { - Err(why) => return Err(anyhow!("Couldn't open file: {}", why)), - Ok(file) => Box::new(BufReader::new(file)), + match &backup_args.vma_file_path { + Some(vma_file_path) => println!("Uploading VMA backup from {vma_file_path:?}"), + None => println!("Uploading VMA backup from (stdin)"), + }; + + let vma_file: Box = match &backup_args.compression { + Some(compression) => { + let vma_file_path = backup_args + .vma_file_path + .as_ref() + .expect("No VMA file path provided"); + let mut cmd = match compression { + Compression::Zstd => { + let mut cmd = Command::new("zstd"); + cmd.args(["-q", "-d", "-c"]); + cmd + } + Compression::Lzo => { + let mut cmd = Command::new("lzop"); + cmd.args(["-d", "-c"]); + cmd + } + Compression::GZip => Command::new("zcat"), + }; + let process = cmd.arg(vma_file_path).stdout(Stdio::piped()).spawn()?; + let stdout = process.stdout.expect("Failed to capture stdout"); + Box::new(BufReader::new(stdout)) + } + None => match &backup_args.vma_file_path { + Some(vma_file_path) => match File::open(vma_file_path) { + Err(why) => return Err(anyhow!("Couldn't open file: {why}")), + Ok(file) => Box::new(BufReader::new(file)), + }, + None => Box::new(BufReader::new(stdin())), }, - None => Box::new(BufReader::new(stdin())), }; + let vma_reader = VmaReader::new(vma_file)?; let pbs = create_pbs_backup_task(pbs_args, backup_args)?; -- 2.39.5 From f.schauer at proxmox.com Wed Nov 13 17:02:00 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 17:02:00 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 1/4] add support for bulk import of a dump directory In-Reply-To: References: <20241111130822.124584-1-f.schauer@proxmox.com> <20241111130822.124584-2-f.schauer@proxmox.com> Message-ID: <77ed0a36-8302-4ee4-82ab-a547994b9f78@proxmox.com> On 13/11/2024 12:41, Shannon Sterz wrote: >> const CMD_HELP: &str = "\ >> Usage: vma-to-pbs [OPTIONS] --repository --vmid [vma_file] >> >> Arguments: >> - [vma_file] >> + [vma_file | dump_directory] >> >> Options: >> --repository >> Repository URL >> [--ns ] >> Namespace >> - --vmid >> + [--vmid ] > nit: this is marked as optional here (and in the code), but the usage > line above still make it look like it's required. That usage line describes the command for a single VMA file. In that case `--vmid` actually is required. It is however not required for bulk import. So to make that clearer I made two usage lines in v6. On 13/11/2024 12:41, Shannon Sterz wrote: >> + let vma_file: Box = match &backup_args.compression { >> + Some(compression) => { >> + let vma_file_path = backup_args >> + .vma_file_path >> + .as_ref() >> + .expect("No VMA file path provided"); >> + let mut cmd = match compression { >> + Compression::Zstd => { >> + let mut cmd = Command::new("zstd"); >> + cmd.args(["-q", "-d", "-c"]); >> + cmd > i think the following would be more elegant here: > > > ```rs > Compression::Zstd => Command::new("zstd") > .args(["-q", "-d", "-c"]), > ``` > > it's a bit more concise imo > >> + } >> + Compression::Lzo => { >> + let mut cmd = Command::new("lzop"); >> + cmd.args(["-d", "-c"]); >> + cmd > same as above Yeah I tried that, but unfortunatelly it does not compile: ``` error[E0716]: temporary value dropped while borrowed ?? --> src/vma2pbs.rs:532:38 ??? | 531 |???????????? let mut cmd = match compression { ??? |???????????????? ------- borrow later stored here 532 |???????????????? Compression::Zstd => Command::new("zstd").args(["-q", "-d", "-c"]), ??? | ^^^^^^^^^^^^^^^^^^^^??????????????????????? - temporary value is freed at the end of this statement ??? |????????????????????????????????????? | ??? |????????????????????????????????????? creates a temporary value which is freed while still in use ??? | ??? = note: consider using a `let` binding to create a longer lived value ``` From f.schauer at proxmox.com Wed Nov 13 17:02:57 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 17:02:57 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 3/4] use level-based logging instead of println In-Reply-To: References: <20241111130822.124584-1-f.schauer@proxmox.com> <20241111130822.124584-4-f.schauer@proxmox.com> Message-ID: <7c54e235-ba3d-42f7-bb86-67ffa13731bf@proxmox.com> On 13/11/2024 12:41, Shannon Sterz wrote: >> - // Skip the backup, since it does not match the specified vmid >> + log::debug!( >> + "Skip backup with VMID {}, since it does not match specified VMID {}", >> + backup_id, >> + vmid > nit: you can use format strings here I could, but then the line would exceed the 100 character limit and I would rather not split the string over multiple lines. From f.schauer at proxmox.com Wed Nov 13 17:07:00 2024 From: f.schauer at proxmox.com (Filip Schauer) Date: Wed, 13 Nov 2024 17:07:00 +0100 Subject: [pbs-devel] [PATCH vma-to-pbs v5 4/4] log device upload progress as a percentage In-Reply-To: References: <20241111130822.124584-1-f.schauer@proxmox.com> <20241111130822.124584-5-f.schauer@proxmox.com> Message-ID: <0099d78d-e110-485a-8587-bbdf8a564275@proxmox.com> On 13/11/2024 12:41, Shannon Sterz wrote: > Other than the nits across the four patches, consider this series: > > Reviewed-by: Shannon Sterz Thanks for the review! The nits are addressed in v6. Superseded by: https://lists.proxmox.com/pipermail/pbs-devel/2024-November/011465.html From g.goller at proxmox.com Thu Nov 14 10:25:31 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 14 Nov 2024 10:25:31 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 1/2] docs: add security implications of prune and change detection mode In-Reply-To: <20241113155545.354045-1-c.ebner@proxmox.com> References: <20241113155545.354045-1-c.ebner@proxmox.com> Message-ID: On 13.11.2024 16:55, Christian Ebner wrote: >diff --git a/docs/maintenance.rst b/docs/maintenance.rst >index 4bb135e4e..e8a26d69c 100644 >--- a/docs/maintenance.rst >+++ b/docs/maintenance.rst >@@ -6,8 +6,34 @@ Maintenance Tasks > Pruning > ------- > >-Prune lets you specify which backup snapshots you want to keep. >-The following retention options are available: >+Prune lets you specify which backup snapshots you want to keep, removing others. >+When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs, >+log and notes) is removed. The chunks containing the actual backup data and >+previously referenced by the pruned snapshot, have to be removed by a garbage >+collection run. >+ >+.. Caution:: Take into consideration that sensitive information stored in a >+ given data chunk will outlive pruned snapshots and remain present in the >+ datastore as long as referenced by at least one backup snapshot. Further, >+ *even* if no snapshot references a given chunk, it will remain present until >+ removed by the garbage collection. >+ >+ Further, file-level backups created using the change detection mode Second sentence that begins with 'Further' ? maybe substitute this one with 'Moreover' or 'Additionally' so it reads better. >+ `metadata` can reference backup chunks containing files which have vanished use double backticks here to highlight correctly, so: ``metadata``. >+ since the previous backup, but might still be accessible when reading the >+ chunks raw data is possible (client or server side). This sentence is a bit messy and long, maybe we could rewrite it as: Moreover, file-level backups created using the change detection mode ``metadata`` can reference backup chunks containing files which have vanished since the previous backup. These might still be accessible by reading the raw data (client or server side). >+ To remove chunks containing sensitive data, prune any snapshot made while the >+ data was part of the backup input and run a garbage collection. Further, if >+ using file-based backups with change detection mode `metadata`, additionally s/`metadata`/``metadata``/ >+ prune all snapshots since the sensitive data was no longer part of the backup >+ input and run a garbage collection. >+ >+ The no longer referenced chunks will then be marked for deletion on the next >+ garbage collection run and removed by a subsequent run after the grace >+ period. >+ >+The following retention options are available for pruning: > > ``keep-last `` > Keep the last ```` backup snapshots. Everything else is fine! From g.goller at proxmox.com Thu Nov 14 10:39:45 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 14 Nov 2024 10:39:45 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/2] docs: deduplicate background details for garbage collection In-Reply-To: <20241113155545.354045-2-c.ebner@proxmox.com> References: <20241113155545.354045-1-c.ebner@proxmox.com> <20241113155545.354045-2-c.ebner@proxmox.com> Message-ID: On 13.11.2024 16:55, Christian Ebner wrote: >diff --git a/docs/maintenance.rst b/docs/maintenance.rst >index e8a26d69c..bba3feff4 100644 >--- a/docs/maintenance.rst >+++ b/docs/maintenance.rst >@@ -197,6 +197,8 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up > periodically. For most setups a weekly schedule provides a good interval to > start. > >+.. _gc_background: >+ > GC Background > ^^^^^^^^^^^^^ > >@@ -222,17 +224,31 @@ datastore or interfering with other backups. > The garbage collection (GC) process is performed per datastore and is split > into two phases: > >-- Phase one: Mark >- All index files are read, and the access time of the referred chunk files is >- updated. >- >-- Phase two: Sweep >- The task iterates over all chunks, checks their file access time, and if it >- is older than the cutoff time (i.e., the time when GC started, plus some >- headroom for safety and Linux file system behavior), the task knows that the >- chunk was neither referred to in any backup index nor part of any currently >- running backup that has no index to scan for. As such, the chunk can be >- safely deleted. >+- Phase one (Mark): >+ >+ All index files are read, and the access time (``atime``) of the referenced >+ chunk files is updated. >+ >+- Phase two (Sweep): >+ >+ The task iterates over all chunks and checks their file access time against a >+ cutoff time. The cutoff time is given by either the oldest backup writer >+ instance, if present, or 24 hours and 5 minutes after the start of garbage >+ collection. >+ >+ Garbage collection can consider chunk files with access time older than the s/can consider/considers/ It always considers chunks with atime older than cutoff to be dangling afaik. >+ cutoff time to be neither referenced by any backup snapshot's index, nor part >+ of any currently running backup job. Therefore, these chunks can safeley be >+ deleted. s/safeley/safely/ >+ >+ Chunks within the grace period will not be deleted and logged at the end of >+ the garbage collection task as *Pending removals*. >+ >+.. note:: The grace period for backup chunk removal is not arbitrary, but stems >+ from the fact that filesystems are typically mounted with the ``relatime`` >+ option by default. This results in better performance by only updating the >+ ``atime`` property if a file has been modified since the last access or the >+ last access has been at least 24 hours ago. > > Manually Starting GC > ^^^^^^^^^^^^^^^^^^^^ Otherwise this is great! From c.ebner at proxmox.com Thu Nov 14 10:43:41 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 10:43:41 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 1/2] docs: add security implications of prune and change detection mode In-Reply-To: References: <20241113155545.354045-1-c.ebner@proxmox.com> Message-ID: On 11/14/24 10:25, Gabriel Goller wrote: > On 13.11.2024 16:55, Christian Ebner wrote: >> diff --git a/docs/maintenance.rst b/docs/maintenance.rst >> index 4bb135e4e..e8a26d69c 100644 >> --- a/docs/maintenance.rst >> +++ b/docs/maintenance.rst >> @@ -6,8 +6,34 @@ Maintenance Tasks >> Pruning >> ------- >> >> -Prune lets you specify which backup snapshots you want to keep. >> -The following retention options are available: >> +Prune lets you specify which backup snapshots you want to keep, >> removing others. >> +When pruning a snapshot, only the snapshot metadata (manifest, >> indices, blobs, >> +log and notes) is removed. The chunks containing the actual backup >> data and >> +previously referenced by the pruned snapshot, have to be removed by a >> garbage >> +collection run. >> + >> +.. Caution:: Take into consideration that sensitive information >> stored in a >> +?? given data chunk will outlive pruned snapshots and remain present >> in the >> +?? datastore as long as referenced by at least one backup snapshot. >> Further, >> +?? *even* if no snapshot references a given chunk, it will remain >> present until >> +?? removed by the garbage collection. >> + >> +?? Further, file-level backups created using the change detection mode > > Second sentence that begins with 'Further' ? maybe substitute this one > with 'Moreover' or 'Additionally' so it reads better. True, `Moreover` sounds better to me... > >> +?? `metadata` can reference backup chunks containing files which have >> vanished > > use double backticks here to highlight correctly, so: ``metadata``. Acked, thx! > >> +?? since the previous backup, but might still be accessible when >> reading the >> +?? chunks raw data is possible (client or server side). > > This sentence is a bit messy and long, maybe we could rewrite it as: > > ??? Moreover, file-level backups created using the change detection mode > ??? ``metadata`` can reference backup chunks containing files which have > ??? vanished since the previous backup. These might still be accessible Yes, I do agree that splitting this into two sentences makes it easier to read. In that case I would even suggest to explicitly mention that this refers to the files, non just the chunks, e.g. by: ... vanished since the previous backup. These files might still be accessible. ... > ??? by reading the raw data (client or server side). > >> +?? To remove chunks containing sensitive data, prune any snapshot >> made while the >> +?? data was part of the backup input and run a garbage collection. >> Further, if >> +?? using file-based backups with change detection mode `metadata`, >> additionally > > s/`metadata`/``metadata``/ Acked, thx! > >> +?? prune all snapshots since the sensitive data was no longer part of >> the backup >> +?? input and run a garbage collection. >> + >> +?? The no longer referenced chunks will then be marked for deletion >> on the next >> +?? garbage collection run and removed by a subsequent run after the >> grace >> +?? period. >> + >> +The following retention options are available for pruning: >> >> ``keep-last `` >> ? Keep the last ```` backup snapshots. > > Everything else is fine! OK, will send a new version, thx! From g.goller at proxmox.com Thu Nov 14 10:45:44 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 14 Nov 2024 10:45:44 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] chunk_store: fix problem with permission checking In-Reply-To: References: <20241113124047.97456-1-h.laimer@proxmox.com> Message-ID: <44i2jo6d7ybd7imoeaavcu2wdk2mibajvflulr5ugzmzjomwdy@a2kczrvq6sub> On 13.11.2024 15:42, Wolfgang Bumiller wrote: >On Wed, Nov 13, 2024 at 03:20:54PM GMT, Gabriel Goller wrote: >> On 13.11.2024 13:40, Hannes Laimer wrote: >> > Permissions are stored in the lower 9 bits (rwxrwxrwx), >> > so we have to mask `st_mode` with 0o777. >> > The datastore root dir is created with 755, the `.chunks` dir and its >> > contents with 750 and the `.lock` file with 644, this changes the >> > expected permissions accordingly. >> >> Oops, this is my bad, I missed this. >> >> Matching the whole st_mode exactly would be nice, but not so practical >> as we would need to be generic over file/dir and symbolic link. >> >> Also CC'ing @Wolfgang as he persuaded me to match exactly in the first >> place :) > >Ah yes, would have had to include the mode bits... > >But it seems the actual permissions were wrong as well? Yep :) >(Not sure if I mentioned this, but I'm not convinced we should *fail* on >unexpected permissions, I mean, we're already changing the values in the >check now ?) Hmm I think we should fail on permissions that are too low :) But anyway, I think it's better to be strict here ? we don't want a user to import a datastore and then something failing. (and also the user can always change the permissions.) From c.ebner at proxmox.com Thu Nov 14 10:47:21 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 10:47:21 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/2] docs: deduplicate background details for garbage collection In-Reply-To: References: <20241113155545.354045-1-c.ebner@proxmox.com> <20241113155545.354045-2-c.ebner@proxmox.com> Message-ID: On 11/14/24 10:39, Gabriel Goller wrote: > On 13.11.2024 16:55, Christian Ebner wrote: >> diff --git a/docs/maintenance.rst b/docs/maintenance.rst >> index e8a26d69c..bba3feff4 100644 >> --- a/docs/maintenance.rst >> +++ b/docs/maintenance.rst >> @@ -197,6 +197,8 @@ It's recommended to setup a schedule to ensure >> that unused space is cleaned up >> periodically. For most setups a weekly schedule provides a good >> interval to >> start. >> >> +.. _gc_background: >> + >> GC Background >> ^^^^^^^^^^^^^ >> >> @@ -222,17 +224,31 @@ datastore or interfering with other backups. >> The garbage collection (GC) process is performed per datastore and is >> split >> into two phases: >> >> -- Phase one: Mark >> -? All index files are read, and the access time of the referred chunk >> files is >> -? updated. >> - >> -- Phase two: Sweep >> -? The task iterates over all chunks, checks their file access time, >> and if it >> -? is older than the cutoff time (i.e., the time when GC started, plus >> some >> -? headroom for safety and Linux file system behavior), the task knows >> that the >> -? chunk was neither referred to in any backup index nor part of any >> currently >> -? running backup that has no index to scan for. As such, the chunk >> can be >> -? safely deleted. >> +- Phase one (Mark): >> + >> +? All index files are read, and the access time (``atime``) of the >> referenced >> +? chunk files is updated. >> + >> +- Phase two (Sweep): >> + >> +? The task iterates over all chunks and checks their file access time >> against a >> +? cutoff time. The cutoff time is given by either the oldest backup >> writer >> +? instance, if present, or 24 hours and 5 minutes after the start of >> garbage >> +? collection. >> + > >> +? Garbage collection can consider chunk files with access time older >> than the > > s/can consider/considers/ Acked! > It always considers chunks with atime older than cutoff to be dangling > afaik. This part here is referring to the second phase, so cleaning up the chunks. Or what do you mean here by dangling? > >> +? cutoff time to be neither referenced by any backup snapshot's >> index, nor part >> +? of any currently running backup job. Therefore, these chunks can >> safeley be >> +? deleted. > > s/safeley/safely/ Acked! > >> + >> +? Chunks within the grace period will not be deleted and logged at >> the end of >> +? the garbage collection task as *Pending removals*. >> + >> +.. note:: The grace period for backup chunk removal is not arbitrary, >> but stems >> +?? from the fact that filesystems are typically mounted with the >> ``relatime`` >> +?? option by default. This results in better performance by only >> updating the >> +?? ``atime`` property if a file has been modified since the last >> access or the >> +?? last access has been at least 24 hours ago. >> >> Manually Starting GC >> ^^^^^^^^^^^^^^^^^^^^ > > Otherwise this is great! Thx, will fold in your comments and send a new version From g.goller at proxmox.com Thu Nov 14 11:23:30 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 14 Nov 2024 11:23:30 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/2] docs: deduplicate background details for garbage collection In-Reply-To: References: <20241113155545.354045-1-c.ebner@proxmox.com> <20241113155545.354045-2-c.ebner@proxmox.com> Message-ID: On 14.11.2024 10:47, Christian Ebner wrote: >On 11/14/24 10:39, Gabriel Goller wrote: >>On 13.11.2024 16:55, Christian Ebner wrote: >>>diff --git a/docs/maintenance.rst b/docs/maintenance.rst >>>index e8a26d69c..bba3feff4 100644 >>>--- a/docs/maintenance.rst >>>+++ b/docs/maintenance.rst >>>@@ -197,6 +197,8 @@ It's recommended to setup a schedule to ensure >>>that unused space is cleaned up >>>periodically. For most setups a weekly schedule provides a good >>>interval to >>>start. >>> >>>+.. _gc_background: >>>+ >>>GC Background >>>^^^^^^^^^^^^^ >>> >>>@@ -222,17 +224,31 @@ datastore or interfering with other backups. >>>The garbage collection (GC) process is performed per datastore and >>>is split >>>into two phases: >>> >>>-- Phase one: Mark >>>-? All index files are read, and the access time of the referred >>>chunk files is >>>-? updated. >>>- >>>-- Phase two: Sweep >>>-? The task iterates over all chunks, checks their file access >>>time, and if it >>>-? is older than the cutoff time (i.e., the time when GC started, >>>plus some >>>-? headroom for safety and Linux file system behavior), the task >>>knows that the >>>-? chunk was neither referred to in any backup index nor part of >>>any currently >>>-? running backup that has no index to scan for. As such, the >>>chunk can be >>>-? safely deleted. >>>+- Phase one (Mark): >>>+ >>>+? All index files are read, and the access time (``atime``) of >>>the referenced >>>+? chunk files is updated. >>>+ >>>+- Phase two (Sweep): >>>+ >>>+? The task iterates over all chunks and checks their file access >>>time against a >>>+? cutoff time. The cutoff time is given by either the oldest >>>backup writer >>>+? instance, if present, or 24 hours and 5 minutes after the start >>>of garbage >>>+? collection. >>>+ >> >>>+? Garbage collection can consider chunk files with access time >>>older than the >> >>s/can consider/considers/ > >Acked! > >>It always considers chunks with atime older than cutoff to be dangling >>afaik. >This part here is referring to the second phase, so cleaning up the >chunks. Or what do you mean here by dangling? Yep that's what I meant, the gc *must* consider these chunks, it's not optional ? so no 'can'. >>>+? cutoff time to be neither referenced by any backup snapshot's >>>index, nor part >>>+? of any currently running backup job. Therefore, these chunks >>>can safeley be >>>+? deleted. >> >>s/safeley/safely/ > >Acked! Oh and consider: Reviewed-by: Gabriel Goller for both patches! From t.lamprecht at proxmox.com Thu Nov 14 14:29:49 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 14 Nov 2024 14:29:49 +0100 Subject: [pbs-devel] [PATCH 1/2] rrd: relay error to update database to caller Message-ID: <20241114132950.3536172-1-t.lamprecht@proxmox.com> It does not make much sense to just log here, especially as the update fn has no context about what RRD series it's operating on. I.e., logged message previously: > rrd update failed: time in past (...) vs logged message now: > rrd::update_value 'host/cpu' failed - time in past (...) The callers of the Database::update fn in the RRD Cache map can already handle errors, albeit it won't save the freshly loaded RRD in the map anymore if the update fails, any load will still do that though. Signed-off-by: Thomas Lamprecht --- It might be slightly nicer to factor out the common call to update to happen after getting/creating the RRD, but it's not trivial to do so as efficiently due to ownership handover when inserting the RRD in the map. proxmox-rrd/src/cache/rrd_map.rs | 4 ++-- proxmox-rrd/src/rrd.rs | 14 ++++++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/proxmox-rrd/src/cache/rrd_map.rs b/proxmox-rrd/src/cache/rrd_map.rs index 0ef61cfa..4bcedade 100644 --- a/proxmox-rrd/src/cache/rrd_map.rs +++ b/proxmox-rrd/src/cache/rrd_map.rs @@ -42,7 +42,7 @@ impl RRDMap { ) -> Result<(), Error> { if let Some(rrd) = self.map.get_mut(rel_path) { if !new_only || time > rrd.last_update() { - rrd.update(time, value); + rrd.update(time, value)?; } } else { let mut path = self.config.basedir.clone(); @@ -61,7 +61,7 @@ impl RRDMap { }; if !new_only || time > rrd.last_update() { - rrd.update(time, value); + rrd.update(time, value)?; } self.map.insert(rel_path.to_string(), rrd); } diff --git a/proxmox-rrd/src/rrd.rs b/proxmox-rrd/src/rrd.rs index 440abe06..4bf4f01b 100644 --- a/proxmox-rrd/src/rrd.rs +++ b/proxmox-rrd/src/rrd.rs @@ -469,14 +469,10 @@ impl Database { /// Update the value (in memory) /// /// Note: This does not call [Self::save]. - pub fn update(&mut self, time: f64, value: f64) { - let value = match self.source.compute_new_value(time, value) { - Ok(value) => value, - Err(err) => { - log::error!("rrd update failed: {}", err); - return; - } - }; + pub fn update(&mut self, time: f64, value: f64) -> Result<(), Error> { + let value = self + .source + .compute_new_value(time, value)?; let last_update = self.source.last_update; self.source.last_update = time; @@ -485,6 +481,8 @@ impl Database { rra.delete_old_slots(time, last_update); rra.compute_new_value(time, last_update, value); } + + Ok(()) } /// Extract data from the archive -- 2.39.5 From t.lamprecht at proxmox.com Thu Nov 14 14:29:50 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 14 Nov 2024 14:29:50 +0100 Subject: [pbs-devel] [PATCH 2/2] rrd: clamp future last_update time on load In-Reply-To: <20241114132950.3536172-1-t.lamprecht@proxmox.com> References: <20241114132950.3536172-1-t.lamprecht@proxmox.com> Message-ID: <20241114132950.3536172-2-t.lamprecht@proxmox.com> We had already cases reported about systems where the BIOS had a time rather far in the future and thus anything that requires some time ordering might fail if it was initialised before an NTP system managed to sync the clock again. RRD updates are one such things, so as stop-gap just clam the last_update time on load. Signed-off-by: Thomas Lamprecht --- it might be nicer to clamp when saving the file, as that also has a higher chance to a NTP client having run and thus avoiding an error in the other direction, i.e., when the system is booted with time in the past. So feell free to take this over and rework for that case, just sending it out as I had a prototype around for some recent debug session on my machine. proxmox-rrd/src/rrd.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/proxmox-rrd/src/rrd.rs b/proxmox-rrd/src/rrd.rs index 4bf4f01b..73a0ebd4 100644 --- a/proxmox-rrd/src/rrd.rs +++ b/proxmox-rrd/src/rrd.rs @@ -378,6 +378,11 @@ impl Database { if rrd.source.last_update < 0.0 { bail!("rrd file has negative last_update time"); + } else if rrd.source.last_update > proxmox_time::epoch_f64() { + let mut rrd = rrd; + log::error!("rrd file has last_update time from the future, clamping to now!"); + rrd.source.last_update = proxmox_time::epoch_f64(); + return Ok(rrd); } Ok(rrd) -- 2.39.5 From t.lamprecht at proxmox.com Thu Nov 14 15:17:45 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 14 Nov 2024 15:17:45 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v2] fix #5233: don't require root for some tape operations In-Reply-To: <20241113113742.2278769-1-d.csapak@proxmox.com> References: <20241113113742.2278769-1-d.csapak@proxmox.com> Message-ID: <2891f884-2260-4866-8cf4-6a822243d5ee@proxmox.com> Am 13.11.24 um 12:37 schrieb Dominik Csapak: > instead, require 'Tape.Write' or 'Tape.Modify' on '/tape' path. > This makes it possible for a TapeOperator to destroy tapes and for a > TapeAdmin to update the tape status, instead of just root at pam. > > I opted for the path '/tape' since we don't have a dedicated acl > structure for single tapes, just '/tape/pool' (which does not apply > since not all tapes have to have a pool), '/tape/device' (which is > intended for drives/changers) and '/tape/jobs' (which is for jobs only). > > Also we use that path for e.g. move_tape already. > > Signed-off-by: Dominik Csapak > --- > changes from v1: > * rebase on master > * change permission required for update status to TAPE_MODIFY > > src/api2/tape/media.rs | 8 +++++++- > 1 file changed, 7 insertions(+), 1 deletion(-) > > applied, thanks! From t.lamprecht at proxmox.com Thu Nov 14 15:21:45 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 14 Nov 2024 15:21:45 +0100 Subject: [pbs-devel] applied: [PATCH proxmox v2] fix #5868: rest-server: connection: fix busy waiting on closed connections pre tls In-Reply-To: <20241113103937.1554474-1-d.csapak@proxmox.com> References: <20241113103937.1554474-1-d.csapak@proxmox.com> Message-ID: <145720f3-ca29-4b93-8e00-d08dc11576a3@proxmox.com> Am 13.11.24 um 11:39 schrieb Dominik Csapak: > when a connection is closed before we have enough data to determine > if it's tls or not, the socket stays in a readable state. > > Sadly, the tokio timeout we use here gets starved by the async_io > callback. > > To fix this, save the amount of bytes peek returned and if they did not > change between invocations of the callback, we assume that the > connection was closed and exit with an error. > > Signed-off-by: Dominik Csapak > --- > changes from v1: > * removed leftover unrelated test code > * fixed up the commit message with the bug # > > proxmox-rest-server/src/connection.rs | 10 +++++++++- > 1 file changed, 9 insertions(+), 1 deletion(-) > > applied this as stop-gap, thanks! Like I mentioned off-list, I changed the error to ConnectionAborted and reworded the commit message a bit. From c.ebner at proxmox.com Thu Nov 14 15:41:09 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 15:41:09 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 0/5] fix #5853: ignore stale files Message-ID: <20241114144114.375987-1-c.ebner@proxmox.com> When files and their associated metadata get invalidated, I/O operations on network filesystems return ESTALE to indicate that the filehandle does not reference a valid file anymore. Currently, the proxmox-backup-client does not cover such cases, it will fail with a hard error when a stale file handle is encountered. Any concurrent operation invalidating file handles has the potential to lead to the backups failing if timed accordingly. For local filesystems this is not an issue, as the file remains accessible until the file handle is closed. Make the backup client more resilient by handling the ESTALE errors gracefully, warning the user about the vanished/invalidated files, while generating a valid and consistent backup archive nevertheless. Changes since version 1: - Avoid tuples in return values by downcasting anyhow::Error to Errno when latter is required - Add report stale file handle helper - Refactor report vanished/changed file helpers Christian Ebner (5): client: pxar: refactor report vanished/changed helpers client: pxar: skip directories on stale file handle client: pxar: skip directory entries on stale file handle client: pxar: warn user and ignore stale file handles on file open fix #5853: client: pxar: exclude stale files on metadata/link read pbs-client/src/pxar/create.rs | 94 +++++++++++++++++++++++++---------- 1 file changed, 69 insertions(+), 25 deletions(-) -- 2.39.5 From c.ebner at proxmox.com Thu Nov 14 15:41:12 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 15:41:12 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 3/5] client: pxar: skip directory entries on stale file handle In-Reply-To: <20241114144114.375987-1-c.ebner@proxmox.com> References: <20241114144114.375987-1-c.ebner@proxmox.com> Message-ID: <20241114144114.375987-4-c.ebner@proxmox.com> Skip over the entries when a stale file handle is encountered during generation of the entry list of a directory entry. This will lead to the directory not being backed up if the directory itself was invalidated, as then reading all child entries will fail also, or the directory is backed up without entries which have been invalidated. Signed-off-by: Christian Ebner --- changes since version 1: - use report_stale_file_handle helper method pbs-client/src/pxar/create.rs | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index 8196c49d8..a7521424f 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -641,15 +641,30 @@ impl Archiver { } Ok(_) => (), Err(err) if err.not_found() => continue, + Err(Errno::ESTALE) => { + self.report_stale_file_handle(Some(&full_path)); + continue; + } Err(err) => { return Err(err).with_context(|| format!("stat failed on {full_path:?}")) } } - let stat = stat_results - .map(Ok) - .unwrap_or_else(get_file_mode) - .with_context(|| format!("stat failed on {full_path:?}"))?; + let stat = match stat_results { + Some(mode) => mode, + None => match get_file_mode() { + Ok(mode) => mode, + Err(Errno::ESTALE) => { + self.report_stale_file_handle(Some(&full_path)); + continue; + } + Err(err) => { + return Err( + Error::from(err).context(format!("stat failed on {full_path:?}")) + ) + } + }, + }; self.entry_counter += 1; if self.entry_counter > self.entry_limit { -- 2.39.5 From c.ebner at proxmox.com Thu Nov 14 15:41:10 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 15:41:10 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 1/5] client: pxar: refactor report vanished/changed helpers In-Reply-To: <20241114144114.375987-1-c.ebner@proxmox.com> References: <20241114144114.375987-1-c.ebner@proxmox.com> Message-ID: <20241114144114.375987-2-c.ebner@proxmox.com> Switch from mutable reference to shared reference on `self` and drop unused return value. These helpers only write log messages, there is currently no need for a mutable reference to `self`, nor to return a `Result`. Signed-off-by: Christian Ebner --- changes since version 1: - not present in previous version pbs-client/src/pxar/create.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index c0c492f8d..4d1883118 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -476,7 +476,7 @@ impl Archiver { Ok(fd) => Ok(Some(fd)), Err(Errno::ENOENT) => { if existed { - self.report_vanished_file()?; + self.report_vanished_file(); } Ok(None) } @@ -671,25 +671,22 @@ impl Archiver { Ok(file_list) } - fn report_vanished_file(&mut self) -> Result<(), Error> { + fn report_vanished_file(&self) { log::warn!("warning: file vanished while reading: {:?}", self.path); - Ok(()) } - fn report_file_shrunk_while_reading(&mut self) -> Result<(), Error> { + fn report_file_shrunk_while_reading(&self) { log::warn!( "warning: file size shrunk while reading: {:?}, file will be padded with zeros!", self.path, ); - Ok(()) } - fn report_file_grew_while_reading(&mut self) -> Result<(), Error> { + fn report_file_grew_while_reading(&self) { log::warn!( "warning: file size increased while reading: {:?}, file will be truncated!", self.path, ); - Ok(()) } async fn add_entry( @@ -1239,14 +1236,14 @@ impl Archiver { Err(err) => bail!(err), }; if got as u64 > remaining { - self.report_file_grew_while_reading()?; + self.report_file_grew_while_reading(); got = remaining as usize; } out.write_all(&self.file_copy_buffer[..got]).await?; remaining -= got as u64; } if remaining > 0 { - self.report_file_shrunk_while_reading()?; + self.report_file_shrunk_while_reading(); let to_zero = remaining.min(self.file_copy_buffer.len() as u64) as usize; vec::clear(&mut self.file_copy_buffer[..to_zero]); while remaining != 0 { -- 2.39.5 From c.ebner at proxmox.com Thu Nov 14 15:41:14 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 15:41:14 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 5/5] fix #5853: client: pxar: exclude stale files on metadata/link read In-Reply-To: <20241114144114.375987-1-c.ebner@proxmox.com> References: <20241114144114.375987-1-c.ebner@proxmox.com> Message-ID: <20241114144114.375987-6-c.ebner@proxmox.com> Skip and warn the user for files which returned a stale file handle error while reading the metadata associated to that file, or the target in case of a symbolic link. Instead of returning with a hard error, report the stale file handle and skip over encoding this file entry in the pxar archive. Link to issue in bugtracker: https://bugzilla.proxmox.com/show_bug.cgi?id=5853 Link to thread in community forum: https://forum.proxmox.com/threads/156822/ Signed-off-by: Christian Ebner --- changes since version 1: - avoid return value tuples to signal stale file handles downcast anyhow::Error to Errno when check is required - use report_stale_file_handle helper method pbs-client/src/pxar/create.rs | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index 3a6e9b157..5d7957970 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -740,14 +740,23 @@ impl Archiver { None => return Ok(()), }; - let metadata = get_metadata( + let metadata = match get_metadata( fd.as_raw_fd(), stat, self.flags(), self.fs_magic, &mut self.fs_feature_flags, self.skip_e2big_xattr, - )?; + ) { + Ok(metadata) => metadata, + Err(err) => { + if let Some(Errno::ESTALE) = err.downcast_ref::() { + self.report_stale_file_handle(None); + return Ok(()); + } + return Err(err); + } + }; if self.previous_payload_index.is_none() { return self @@ -1294,7 +1303,14 @@ impl Archiver { file_name: &Path, metadata: &Metadata, ) -> Result<(), Error> { - let dest = nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..])?; + let dest = match nix::fcntl::readlinkat(fd.as_raw_fd(), &b""[..]) { + Ok(dest) => dest, + Err(Errno::ESTALE) => { + self.report_stale_file_handle(None); + return Ok(()); + } + Err(err) => return Err(err.into()), + }; encoder.add_symlink(metadata, file_name, dest).await?; Ok(()) } -- 2.39.5 From c.ebner at proxmox.com Thu Nov 14 15:41:11 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 15:41:11 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/5] client: pxar: skip directories on stale file handle In-Reply-To: <20241114144114.375987-1-c.ebner@proxmox.com> References: <20241114144114.375987-1-c.ebner@proxmox.com> Message-ID: <20241114144114.375987-3-c.ebner@proxmox.com> Skip over the whole directory in case the file handle was invalidated and therefore the filesystem type check returns with ESTALE. Encode the directory start entry in the archive and the catalog only after the filesystem type check, so the directory can be fully skipped. At this point it is still possible to ignore the invalidated directory. If the directory is invalidated afterwards, it will be backed up only partially. Introduce a helper method to report entries for which a stale file handle was encountered, providing an optional path for cases where the `Archiver`s state does not store the correct path. Signed-off-by: Christian Ebner --- changes since version 1: - introduce and use report_stale_file_handle helper method pbs-client/src/pxar/create.rs | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index 4d1883118..8196c49d8 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -72,7 +72,7 @@ pub struct PxarPrevRef { pub archive_name: String, } -fn detect_fs_type(fd: RawFd) -> Result { +fn detect_fs_type(fd: RawFd) -> Result { let mut fs_stat = std::mem::MaybeUninit::uninit(); let res = unsafe { libc::fstatfs(fd, fs_stat.as_mut_ptr()) }; Errno::result(res)?; @@ -671,6 +671,11 @@ impl Archiver { Ok(file_list) } + fn report_stale_file_handle(&self, path: Option<&PathBuf>) { + let path = path.unwrap_or(&self.path); + log::warn!("warning: stale file handle encountered while reading: {path:?}"); + } + fn report_vanished_file(&self) { log::warn!("warning: file vanished while reading: {:?}", self.path); } @@ -1160,20 +1165,20 @@ impl Archiver { ) -> Result<(), Error> { let dir_name = OsStr::from_bytes(c_dir_name.to_bytes()); - if !self.cache.caching_enabled() { - if let Some(ref catalog) = self.catalog { - catalog.lock().unwrap().start_directory(c_dir_name)?; - } - encoder.create_directory(dir_name, metadata).await?; - } - let old_fs_magic = self.fs_magic; let old_fs_feature_flags = self.fs_feature_flags; let old_st_dev = self.current_st_dev; let mut skip_contents = false; if old_st_dev != stat.st_dev { - self.fs_magic = detect_fs_type(dir.as_raw_fd())?; + match detect_fs_type(dir.as_raw_fd()) { + Ok(fs_magic) => self.fs_magic = fs_magic, + Err(Errno::ESTALE) => { + self.report_stale_file_handle(None); + return Ok(()); + } + Err(err) => return Err(err.into()), + } self.fs_feature_flags = Flags::from_magic(self.fs_magic); self.current_st_dev = stat.st_dev; @@ -1184,6 +1189,13 @@ impl Archiver { } } + if !self.cache.caching_enabled() { + if let Some(ref catalog) = self.catalog { + catalog.lock().unwrap().start_directory(c_dir_name)?; + } + encoder.create_directory(dir_name, metadata).await?; + } + let result = if skip_contents { log::info!("skipping mount point: {:?}", self.path); Ok(()) -- 2.39.5 From c.ebner at proxmox.com Thu Nov 14 15:41:13 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 15:41:13 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup 4/5] client: pxar: warn user and ignore stale file handles on file open In-Reply-To: <20241114144114.375987-1-c.ebner@proxmox.com> References: <20241114144114.375987-1-c.ebner@proxmox.com> Message-ID: <20241114144114.375987-5-c.ebner@proxmox.com> Do not fail hard if a file open fails because of a stale file handle. Warn the user and ignore the file, just like the client already does in case of missing privileges to access the file. Signed-off-by: Christian Ebner --- changes since version 1: - use report_stale_file_handle helper method pbs-client/src/pxar/create.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pbs-client/src/pxar/create.rs b/pbs-client/src/pxar/create.rs index a7521424f..3a6e9b157 100644 --- a/pbs-client/src/pxar/create.rs +++ b/pbs-client/src/pxar/create.rs @@ -484,6 +484,10 @@ impl Archiver { log::warn!("failed to open file: {:?}: access denied", file_name); Ok(None) } + Err(Errno::ESTALE) => { + self.report_stale_file_handle(None); + Ok(None) + } Err(Errno::EPERM) if !noatime.is_empty() => { // Retry without O_NOATIME: noatime = OFlag::empty(); -- 2.39.5 From c.ebner at proxmox.com Thu Nov 14 15:43:24 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 15:43:24 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] fix #5853: ignore stale files In-Reply-To: <20241105140153.282980-1-c.ebner@proxmox.com> References: <20241105140153.282980-1-c.ebner@proxmox.com> Message-ID: <7cd75f3a-d664-4408-b0fe-21ad4db1b245@proxmox.com> superseded-by version 2: https://lore.proxmox.com/pbs-devel/20241114144114.375987-1-c.ebner at proxmox.com/T/ From c.ebner at proxmox.com Thu Nov 14 16:15:50 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 16:15:50 +0100 Subject: [pbs-devel] [PATCH v3 proxmox-backup 1/2] docs: add security implications of prune and change detection mode Message-ID: <20241114151551.407971-1-c.ebner@proxmox.com> Users should be made aware that the data stored in chunks outlives the backup snapshots on pruning and that backups created using the change-detection-mode set to metadata might reference chunks containing files which have vanished since the previous backup, but might still be accessible when access to the chunks raw data is possible (client or server side). Reviewed-by: Gabriel Goller Signed-off-by: Christian Ebner --- changes since version 2: - s/Further/Moreover/ for second sentence starting with Further - fix formatting for metadata by using double backticks - Improve text flow based on suggestions docs/maintenance.rst | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/docs/maintenance.rst b/docs/maintenance.rst index 4bb135e4e..601756246 100644 --- a/docs/maintenance.rst +++ b/docs/maintenance.rst @@ -6,8 +6,34 @@ Maintenance Tasks Pruning ------- -Prune lets you specify which backup snapshots you want to keep. -The following retention options are available: +Prune lets you specify which backup snapshots you want to keep, removing others. +When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs, +log and notes) is removed. The chunks containing the actual backup data and +previously referenced by the pruned snapshot, have to be removed by a garbage +collection run. + +.. Caution:: Take into consideration that sensitive information stored in a + given data chunk will outlive pruned snapshots and remain present in the + datastore as long as referenced by at least one backup snapshot. Further, + *even* if no snapshot references a given chunk, it will remain present until + removed by the garbage collection. + + Moreover, file-level backups created using the change detection mode + ``metadata`` can reference backup chunks containing files which have vanished + since the previous backup. These files might still be accessible by reading + the chunks raw data (client or server side). + + To remove chunks containing sensitive data, prune any snapshot made while the + data was part of the backup input and run a garbage collection. Further, if + using file-based backups with change detection mode ``metadata``, + additionally prune all snapshots since the sensitive data was no longer part + of the backup input and run a garbage collection. + + The no longer referenced chunks will then be marked for deletion on the next + garbage collection run and removed by a subsequent run after the grace + period. + +The following retention options are available for pruning: ``keep-last `` Keep the last ```` backup snapshots. -- 2.39.5 From c.ebner at proxmox.com Thu Nov 14 16:15:51 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 14 Nov 2024 16:15:51 +0100 Subject: [pbs-devel] [PATCH v3 proxmox-backup 2/2] docs: deduplicate background details for garbage collection In-Reply-To: <20241114151551.407971-1-c.ebner@proxmox.com> References: <20241114151551.407971-1-c.ebner@proxmox.com> Message-ID: <20241114151551.407971-2-c.ebner@proxmox.com> Currently, common details regarding garbage collection are documented in the backup client and the maintenance task. Deduplicate this information by moving the details to the background section of the maintenance task and reference that section in the backup client part. Reviewed-by: Gabriel Goller Signed-off-by: Christian Ebner --- changes since version 2: - s/can consider/considers/ - s/safeley/safely/ docs/backup-client.rst | 28 ++++++++++++---------------- docs/maintenance.rst | 38 +++++++++++++++++++++++++++----------- 2 files changed, 39 insertions(+), 27 deletions(-) diff --git a/docs/backup-client.rst b/docs/backup-client.rst index e56e0625b..892be11d9 100644 --- a/docs/backup-client.rst +++ b/docs/backup-client.rst @@ -789,29 +789,25 @@ Garbage Collection ------------------ The ``prune`` command removes only the backup index files, not the data -from the datastore. This task is left to the garbage collection -command. It is recommended to carry out garbage collection on a regular basis. +from the datastore. Deletion of unused backup data from the datastore is done by +:ref:`garbage collection<_maintenance_gc>`. It is therefore recommended to +schedule garbage collection tasks on a regular basis. The working principle of +garbage collection is described in more details in the related :ref:`background +section `. -The garbage collection works in two phases. In the first phase, all -data blocks that are still in use are marked. In the second phase, -unused data blocks are removed. +To start garbage collection from the client side, run the following command: + +.. code-block:: console + + # proxmox-backup-client garbage-collect .. note:: This command needs to read all existing backup index files and touches the complete chunk-store. This can take a long time depending on the number of chunks and the speed of the underlying disks. -.. note:: The garbage collection will only remove chunks that haven't been used - for at least one day (exactly 24h 5m). This grace period is necessary because - chunks in use are marked by touching the chunk which updates the ``atime`` - (access time) property. Filesystems are mounted with the ``relatime`` option - by default. This results in a better performance by only updating the - ``atime`` property if the last access has been at least 24 hours ago. The - downside is that touching a chunk within these 24 hours will not always - update its ``atime`` property. - - Chunks in the grace period will be logged at the end of the garbage - collection task as *Pending removals*. +The progress of the garbage collection will be displayed as shown in the example +below: .. code-block:: console diff --git a/docs/maintenance.rst b/docs/maintenance.rst index 601756246..a64769138 100644 --- a/docs/maintenance.rst +++ b/docs/maintenance.rst @@ -197,6 +197,8 @@ It's recommended to setup a schedule to ensure that unused space is cleaned up periodically. For most setups a weekly schedule provides a good interval to start. +.. _gc_background: + GC Background ^^^^^^^^^^^^^ @@ -222,17 +224,31 @@ datastore or interfering with other backups. The garbage collection (GC) process is performed per datastore and is split into two phases: -- Phase one: Mark - All index files are read, and the access time of the referred chunk files is - updated. - -- Phase two: Sweep - The task iterates over all chunks, checks their file access time, and if it - is older than the cutoff time (i.e., the time when GC started, plus some - headroom for safety and Linux file system behavior), the task knows that the - chunk was neither referred to in any backup index nor part of any currently - running backup that has no index to scan for. As such, the chunk can be - safely deleted. +- Phase one (Mark): + + All index files are read, and the access time (``atime``) of the referenced + chunk files is updated. + +- Phase two (Sweep): + + The task iterates over all chunks and checks their file access time against a + cutoff time. The cutoff time is given by either the oldest backup writer + instance, if present, or 24 hours and 5 minutes after the start of garbage + collection. + + Garbage collection considers chunk files with access time older than the + cutoff time to be neither referenced by any backup snapshot's index, nor part + of any currently running backup job. Therefore, these chunks can safely be + deleted. + + Chunks within the grace period will not be deleted and logged at the end of + the garbage collection task as *Pending removals*. + +.. note:: The grace period for backup chunk removal is not arbitrary, but stems + from the fact that filesystems are typically mounted with the ``relatime`` + option by default. This results in better performance by only updating the + ``atime`` property if a file has been modified since the last access or the + last access has been at least 24 hours ago. Manually Starting GC ^^^^^^^^^^^^^^^^^^^^ -- 2.39.5 From t.lamprecht at proxmox.com Sun Nov 17 20:27:29 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Sun, 17 Nov 2024 20:27:29 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 03/26] pbs-api-types: add backing-device to DataStoreConfig In-Reply-To: <20241113150102.164820-4-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-4-h.laimer@proxmox.com> Message-ID: <2ca6ce0e-ecdb-4c46-ac1d-eb289dd7076a@proxmox.com> Am 13.11.24 um 16:00 schrieb Hannes Laimer: > Signed-off-by: Hannes Laimer > --- > changes since v12: > * clearify/improve description of `DATASTORE_DIR_NAME_SCHAME` > > pbs-api-types/src/datastore.rs | 31 ++++++++++++++++++++++++++++--- > 1 file changed, 28 insertions(+), 3 deletions(-) > > diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs > index a5704c93..f6c255d3 100644 > --- a/pbs-api-types/src/datastore.rs > +++ b/pbs-api-types/src/datastore.rs > @@ -42,7 +42,7 @@ const_regex! { > > pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); > > -pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") > +pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or a relative on-device path for removable datastores.") > .min_length(1) > .max_length(4096) > .schema(); > @@ -160,6 +160,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = > .minimum(1) > .schema(); > > +/// Base directory where datastores are mounted > +pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; > + > #[api] > #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] > #[serde(rename_all = "lowercase")] > @@ -234,7 +237,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore > schema: DATASTORE_SCHEMA, > }, > path: { > - schema: DIR_NAME_SCHEMA, > + schema: DATASTORE_DIR_NAME_SCHEMA, > }, > "notify-user": { > optional: true, > @@ -273,6 +276,12 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore > format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), > type: String, > }, > + "backing-device": { > + description: "The UUID of the filesystem partition for removable datastores.", > + optional: true, > + format: &proxmox_schema::api_types::UUID_FORMAT, FWIW, I get an error about this regex not matching for a USB pen drive I'm testing. The POST data is: { "name": "samsung-stick-foo", "path": "foo", "backing-device": "64A5-F009", "gc-schedule": "daily", "prune-schedule": "daily", "comment": "", "notification-mode": "notification-system" } The data of the usb disk I selected { "3": { "devpath": "/dev/sdd", "disk-type": "hdd", "gpt": false, "model": "Flash_Drive_FIT", "name": "sdd", "partitions": [ { "devpath": "/dev/sdd1", "filesystem": "exfat", "gpt": false, "mounted": false, "name": "sdd1", "size": 128320719872, "used": "filesystem", "uuid": "64A5-F009" } ], "rpm": null, "serial": "0392523110004665", "size": 128320801792, "status": "unknown", "used": "partitions", "vendor": "Samsung", "wearout": null, "wwn": null } } note: this pen drive is brand new, got just unwrapped and passed through to my dev VM, and as such it's still coming with the formatting from factoring. Now, I first did not even expect that it shows up in the selector, but it did, so I'm wondering if it either should not be available or if it should work to use this disk too. No worries, I do not want an immediate fix or the like, just would like to know what's the expected outcome here is ? as I think quite some other users might also plug in their freshly unwrapped and proudly exfat/vfat formatted pen drives to see how this goes. That they have to do something might be fine, but a regex not matching error won't shove them in the right direction I think. From t.lamprecht at proxmox.com Sun Nov 17 20:32:54 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Sun, 17 Nov 2024 20:32:54 +0100 Subject: [pbs-devel] applied [PATCH] api: disk list: do not fail but just log error on gathering smart data Message-ID: <20241117193254.3900015-1-t.lamprecht@proxmox.com> I plugged in a USB pen drive and the whole disk list UI became completely unusable because smartctl fails to handle that device due to some `Unknown USB bridge [0x090c:0x1000 (0x1100)]` error. That itself might be improvable, but most often I do not care at all about smart data, and certainly not enough to make failing gathering it disallow me from viewing my disks (or the smart data from disks where it still could be gathered, for that matter!) Signed-off-by: Thomas Lamprecht --- src/tools/disks/mod.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs index 9f47be36..6345fde7 100644 --- a/src/tools/disks/mod.rs +++ b/src/tools/disks/mod.rs @@ -1083,8 +1083,11 @@ fn get_disks( let parallel_handler = ParallelHandler::new("smartctl data", 4, move |device: (String, String)| { - let smart_data = get_smart_data(Path::new(&device.1), false)?; - tx.send((device.0, smart_data))?; + match get_smart_data(Path::new(&device.1), false) { + Ok(smart_data) => tx.send((device.0, smart_data))?, + // do not fail the whole disk output just because smartctl couldn't query one + Err(err) => log::error!("failed to gather smart data for {} ? {err}", device.1), + } Ok(()) }); -- 2.39.5 From t.lamprecht at proxmox.com Sun Nov 17 20:34:04 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Sun, 17 Nov 2024 20:34:04 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v13 05/26] disks: add UUID to partition info In-Reply-To: <20241113150102.164820-6-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-6-h.laimer@proxmox.com> Message-ID: Am 13.11.24 um 16:00 schrieb Hannes Laimer: > Signed-off-by: Hannes Laimer > --- > src/tools/disks/mod.rs | 9 ++++++++- > 1 file changed, 8 insertions(+), 1 deletion(-) > > applied, thanks! From t.lamprecht at proxmox.com Sun Nov 17 20:34:06 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Sun, 17 Nov 2024 20:34:06 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v13 02/26] config: factor out method to get the absolute datastore path In-Reply-To: <20241113150102.164820-3-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-3-h.laimer@proxmox.com> Message-ID: Am 13.11.24 um 16:00 schrieb Hannes Laimer: > From: Dietmar Maurer > > removable datastores will have a PBS-managed mountpoint as path, direct > access to the field needs to be replaced with a helper that can account > for this. > > Signed-off-by: Hannes Laimer > --- > changes since v12: > * just commit msg > > pbs-api-types/src/datastore.rs | 5 +++++ > pbs-datastore/src/datastore.rs | 11 +++++++---- > src/api2/node/disks/directory.rs | 4 ++-- > src/server/metric_collection/mod.rs | 8 ++++++-- > 4 files changed, 20 insertions(+), 8 deletions(-) > > applied, thanks! From t.lamprecht at proxmox.com Sun Nov 17 20:34:05 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Sun, 17 Nov 2024 20:34:05 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v13 01/26] tools: add disks utility functions In-Reply-To: <20241113150102.164820-2-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-2-h.laimer@proxmox.com> Message-ID: Am 13.11.24 um 16:00 schrieb Hannes Laimer: > ... for mounting and unmounting > > Signed-off-by: Hannes Laimer > --- > changes since v12: > * use &Path everywhere, instead of &str > > src/tools/disks/mod.rs | 30 ++++++++++++++++++++++++++++++ > 1 file changed, 30 insertions(+) > > applied, thanks! From h.laimer at proxmox.com Mon Nov 18 09:36:34 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 18 Nov 2024 09:36:34 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 03/26] pbs-api-types: add backing-device to DataStoreConfig In-Reply-To: <2ca6ce0e-ecdb-4c46-ac1d-eb289dd7076a@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-4-h.laimer@proxmox.com> <2ca6ce0e-ecdb-4c46-ac1d-eb289dd7076a@proxmox.com> Message-ID: On Sun Nov 17, 2024 at 8:27 PM CET, Thomas Lamprecht wrote: > Am 13.11.24 um 16:00 schrieb Hannes Laimer: >> Signed-off-by: Hannes Laimer >> --- >> changes since v12: >> * clearify/improve description of `DATASTORE_DIR_NAME_SCHAME` >> >> pbs-api-types/src/datastore.rs | 31 ++++++++++++++++++++++++++++--- >> 1 file changed, 28 insertions(+), 3 deletions(-) >> >> diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs >> index a5704c93..f6c255d3 100644 >> --- a/pbs-api-types/src/datastore.rs >> +++ b/pbs-api-types/src/datastore.rs >> @@ -42,7 +42,7 @@ const_regex! { >> >> pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); >> >> -pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") >> +pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or a relative on-device path for removable datastores.") >> .min_length(1) >> .max_length(4096) >> .schema(); >> @@ -160,6 +160,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = >> .minimum(1) >> .schema(); >> >> +/// Base directory where datastores are mounted >> +pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; >> + >> #[api] >> #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] >> #[serde(rename_all = "lowercase")] >> @@ -234,7 +237,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore >> schema: DATASTORE_SCHEMA, >> }, >> path: { >> - schema: DIR_NAME_SCHEMA, >> + schema: DATASTORE_DIR_NAME_SCHEMA, >> }, >> "notify-user": { >> optional: true, >> @@ -273,6 +276,12 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore >> format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), >> type: String, >> }, >> + "backing-device": { >> + description: "The UUID of the filesystem partition for removable datastores.", >> + optional: true, >> + format: &proxmox_schema::api_types::UUID_FORMAT, > > FWIW, I get an error about this regex not matching for a USB pen drive I'm testing. > > The POST data is: > > { > "name": "samsung-stick-foo", > "path": "foo", > "backing-device": "64A5-F009", > "gc-schedule": "daily", > "prune-schedule": "daily", > "comment": "", > "notification-mode": "notification-system" > } > > > The data of the usb disk I selected > > { > "3": { > "devpath": "/dev/sdd", > "disk-type": "hdd", > "gpt": false, > "model": "Flash_Drive_FIT", > "name": "sdd", > "partitions": [ > { > "devpath": "/dev/sdd1", > "filesystem": "exfat", > "gpt": false, > "mounted": false, > "name": "sdd1", > "size": 128320719872, > "used": "filesystem", > "uuid": "64A5-F009" > } > ], > "rpm": null, > "serial": "0392523110004665", > "size": 128320801792, > "status": "unknown", > "used": "partitions", > "vendor": "Samsung", > "wearout": null, > "wwn": null > } > } > > note: this pen drive is brand new, got just unwrapped and passed through to my dev > VM, and as such it's still coming with the formatting from factoring. > > Now, I first did not even expect that it shows up in the selector, but it did, so I'm > wondering if it either should not be available or if it should work to use this disk > too. > > No worries, I do not want an immediate fix or the like, just would like to know what's > the expected outcome here is ? as I think quite some other users might also plug in their > freshly unwrapped and proudly exfat/vfat formatted pen drives to see how this goes. > That they have to do something might be fine, but a regex not matching error won't > shove them in the right direction I think. Good point, we could either - not reutrn UUIDs that are not really UUIDs[1] - filter them out in the UI In both cases the supported filesystem should probably be mentioned to the error msg. Generally I think it'd be better to not return a UUID that is not really a UUID. So since, for example, `exfat` only has a 'pseudo-UUID' it would not be included in the returned data. [1] https://www.rfc-editor.org/rfc/rfc9562.html#name-uuid-format From c.ebner at proxmox.com Mon Nov 18 10:24:32 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 18 Nov 2024 10:24:32 +0100 Subject: [pbs-devel] [PATCH docs 0/3] extend documentation for change detection mode Message-ID: <20241118092435.81880-1-c.ebner@proxmox.com> Add sections explaining the change detection modes in more technical details and reference to this sections in the client usage section, which should cover more the how-to-use than the how-it-works. Christian Ebner (3): docs: explain the working principle of the change detection modes docs: reference technical change detection mode section for client docs: client: fix formatting by using double ticks docs/backup-client.rst | 38 +++++-------- docs/technical-overview.rst | 108 ++++++++++++++++++++++++++++++++++++ 2 files changed, 123 insertions(+), 23 deletions(-) -- 2.39.5 From c.ebner at proxmox.com Mon Nov 18 10:24:35 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 18 Nov 2024 10:24:35 +0100 Subject: [pbs-devel] [PATCH docs 3/3] docs: client: fix formatting by using double ticks In-Reply-To: <20241118092435.81880-1-c.ebner@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> Message-ID: <20241118092435.81880-4-c.ebner@proxmox.com> With single ticks the containing modes and archive formats are displayed cursive, to be consistent with other sections of the documentation use inline blocks. Adapted line wrappings to the additional line length. Signed-off-by: Christian Ebner --- docs/backup-client.rst | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/docs/backup-client.rst b/docs/backup-client.rst index 78e856979..45df440c9 100644 --- a/docs/backup-client.rst +++ b/docs/backup-client.rst @@ -295,19 +295,20 @@ therefore deduplicated). If the backed up files are largely unchanged, re-reading and then detecting the corresponding chunks don't need to be uploaded after all is time consuming and undesired. -The backup client's `change-detection-mode` can be switched from default to -`metadata` based detection to reduce limitations as described above, instructing -the client to avoid re-reading files with unchanged metadata whenever possible. +The backup client's ``change-detection-mode`` can be switched from default to +``metadata`` based detection to reduce limitations as described above, +instructing the client to avoid re-reading files with unchanged metadata +whenever possible. When using this mode, instead of the regular pxar archive, the backup snapshot -is stored into two separate files: the `mpxar` containing the archive's metadata -and the `ppxar` containing a concatenation of the file contents. This splitting -allows for efficient metadata lookups. When creating the backup archives, the -current file metadata is compared to the one looked up in the previous `mpxar` -archive. The operational details are explained more in depth in the -:ref:`technical documentation `. - -Using the `change-detection-mode` set to `data` allows to create the same split -archive as when using the `metadata` mode, but without using a previous +is stored into two separate files: the ``mpxar`` containing the archive's +metadata and the ``ppxar`` containing a concatenation of the file contents. This +splitting allows for efficient metadata lookups. When creating the backup +archives, the current file metadata is compared to the one looked up in the +previous ``mpxar`` archive. The operational details are explained more in depth +in the :ref:`technical documentation `. + +Using the ``change-detection-mode`` set to ``data`` allows to create the same +split archive as when using the ``metadata`` mode, but without using a previous reference and therefore reencoding all file payloads. For details of this mode please see the :ref:`technical documentation `. -- 2.39.5 From c.ebner at proxmox.com Mon Nov 18 10:24:34 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 18 Nov 2024 10:24:34 +0100 Subject: [pbs-devel] [PATCH docs 2/3] docs: reference technical change detection mode section for client In-Reply-To: <20241118092435.81880-1-c.ebner@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> Message-ID: <20241118092435.81880-3-c.ebner@proxmox.com> Currently, the change detection modes are described in the client usage section, not intended for in-depth explanation on how these client option works, but rather with focus on how to use them. Therefore, add a reference to the more detailed technical section regarding the change detection modes and reduce duplicate explanations. Signed-off-by: Christian Ebner --- docs/backup-client.rst | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/docs/backup-client.rst b/docs/backup-client.rst index e56e0625b..78e856979 100644 --- a/docs/backup-client.rst +++ b/docs/backup-client.rst @@ -301,24 +301,15 @@ the client to avoid re-reading files with unchanged metadata whenever possible. When using this mode, instead of the regular pxar archive, the backup snapshot is stored into two separate files: the `mpxar` containing the archive's metadata and the `ppxar` containing a concatenation of the file contents. This splitting -allows for efficient metadata lookups. +allows for efficient metadata lookups. When creating the backup archives, the +current file metadata is compared to the one looked up in the previous `mpxar` +archive. The operational details are explained more in depth in the +:ref:`technical documentation `. Using the `change-detection-mode` set to `data` allows to create the same split archive as when using the `metadata` mode, but without using a previous -reference and therefore reencoding all file payloads. -When creating the backup archives, the current file metadata is compared to the -one looked up in the previous `mpxar` archive. -The metadata comparison includes file size, file type, ownership and permission -information, as well as acls and attributes and most importantly the file's -mtime, for details see the -:ref:`pxar metadata archive format `. - -If unchanged, the entry is cached for possible re-use of content chunks without -re-reading, by indexing the already present chunks containing the contents from -the previous backup snapshot. Since the file might only partially re-use chunks -(thereby introducing wasted space in the form of padding), the decision whether -to re-use or re-encode the currently cached entries is postponed to when enough -information is available, comparing the possible padding to a threshold value. +reference and therefore reencoding all file payloads. For details of this mode +please see the :ref:`technical documentation `. .. _client_change_detection_mode_table: -- 2.39.5 From c.ebner at proxmox.com Mon Nov 18 10:24:33 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 18 Nov 2024 10:24:33 +0100 Subject: [pbs-devel] [PATCH docs 1/3] docs: explain the working principle of the change detection modes In-Reply-To: <20241118092435.81880-1-c.ebner@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> Message-ID: <20241118092435.81880-2-c.ebner@proxmox.com> Describe in more details how the different change detection modes operate and give insights into the inner workings, especially for the more complex `metadata` mode, which involves lookahead caching and padding calculation for reused payload chunks. Suggested-by: Dietmar Maurer Signed-off-by: Christian Ebner --- docs/technical-overview.rst | 108 ++++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) diff --git a/docs/technical-overview.rst b/docs/technical-overview.rst index f79deff38..21793c5c5 100644 --- a/docs/technical-overview.rst +++ b/docs/technical-overview.rst @@ -134,6 +134,111 @@ This is done to speed up the client part of the backup, since it only needs to encrypt chunks that are actually getting uploaded. Chunks that exist already in the previous backup, do not need to be encrypted and uploaded. +Change Detection Mode for File-Based Backups +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The change detection mode controls how to detect and act for files which did not +change in-between subsequent backup runs as well as the archive file format used +to encode the directory entries. + +.. _change-detection-mode-legacy: + +Legacy Mode ++++++++++++ + +Backup snapshots of filesystems are created by recursively scanning the +directory entries. All entries to be included in the snapshot are read and +serialized by encoding them using the ``pxar`` +:ref:`archive format `. The resulting stream is chunked into +:ref:`dynamically sized chunks ` and uploaded to the +Proxmox Backup Server, deduplicating chunks based on their content digest for +space efficient storage. +File contents are read and chunked unconditionally, no check is performed to +detect unchanged files. + +.. _change-detection-mode-data: + +Data Mode ++++++++++ + +Like for ``legacy`` mode file contents are read and chunked unconditionally, no +check is performed to detect unchanged files. + +However, in contrast to ``legacy`` mode, which stores entries metadata and data +in a single self-contained ``pxar`` archive, the ``data`` mode encodes metadata +and file contents into two separate streams. The resulting backup snapshots +therefore contain split archives, an archive in ``mpxar`` +:ref:`format ` containing the entries metadata and an archive +with ``ppxar`` :ref:`format ` , containing the actual file +contents, separated by payload headers for consistency checks. The metadata +archive stores a reference offset to the corresponding payload archive entry so +the file contents can be accessed. Both of these archives are chunked and +uploaded by the Proxmox backup client, resulting in separated indices and +independent chunks. + +The ``mpxar`` archive can be used to efficiently fetch the associated metadata +for archive entries without the overhead of payload data stored within the same +chunks. This is used for example for entry lookups to list the archive contents +or to navigate the mounted filesystem via the FUSE implementation. No dedicated +catalog is therefore created for archives encoded using this mode. + +.. _change-detection-mode-metadata: + +Metadata Mode ++++++++++++++ + +The ``metadata`` mode detects files whose file metadata did not change +in-between subsequent backup runs. The metadata comparison includes file size, +file type, ownership and permission information, as well as acls and attributes +and most importantly the file's mtime, for details see the +:ref:`pxar metadata archive format `. This mode will avoid +reading and rechunking the file contents whenever possible by reusing the file +content chunks of unchanged files from the previous backup snapshot. + +To compare the metadata, the previous snapshots ``mpxar`` metadata archive is +downloaded at the start of the backup run and used as a reference. Further, the +index of the payload archive ``ppxar`` is fetched and used to lookup the file +content chunk's digests, which will be used to reindex pre-existing chunks +without the need to reread and rechunk the file contents. + +During backup, the metadata and payload archives are encoded in the same manner +as for the ``data`` mode, but for the ``metadata`` mode each entry is +additionally looked up in the metadata reference archive for comparison first. +If the file did not change as compared to the reference, the file is considered +as unchanged and the Proxmox backup client enters a look-ahead caching mode. In +this mode, the client will keep reading and comparing then following entries in +the filesystem as long as they are reusable. Further, it keeps track of the +payload archive offset range these file contents are stored in. The additional +look-ahead caching is needed, as file boundaries are not required to be aligned +with chunk boundaries, therefore reused chunks can contain possibly wasted chunk +content (also called padding) if reused unconditionally. + +The look-ahead cache will greedily cache all unchanged entries up to the point +where either the cache size limit is reached, a file entry with changed +metadata is encountered, or the range of payload chunks considered for reuse is +not continuous. An example for the latter is a file which disappeared in-between +subsequent backup runs, leaving a hole in the range. At this point, the caching +mode is disabled and the client calculates the wasted padding size which would +be introduced by reusing the payload chunks for all the unchanged files cached +up to this point. If the padding is acceptable (below a preset limit of 10% of +the actually reused chunk content), the files are reused by encoding them in the +metadata archive using updated offset references to the contents and reindexing +the pre-existing chunks in the new ``ppxar`` archive. If however the padding is +not acceptable, exceeding the limit, all cached entries are reencoded, not +reusing any of the pre-existing data. The metadata as cached will be encoded in +the metadata archive, no matter if cached file contents are to be reused or +reencoded. + +This combination of look-ahead caching and reuse of pre-existing payload archive +chunks for files with unchanged contents therefore speeds up the backup +process by avoiding rereading and rechunking file contents whenever possible. + +To reduce paddings and increase chunk reusability, during creation of the +archives in ``data`` mode and ``metadata`` mode the pxar encoder signals +encountered file boundaries as suggested chunk boundaries to the sliding window +chunker. The chunker then decides based on the internal state if the suggested +boundary is accepted or disregarded. + Caveats and Limitations ----------------------- @@ -184,6 +289,9 @@ read all files again for every backup, otherwise it would not be possible to generate a consistent, independent pxar archive where the original chunks can be reused. Note that in spite of this, only new or changed chunks will be uploaded. +In order to avoid these limitations, the Change Detection Mode ``metadata`` was +introduced. + Verification of Encrypted Chunks ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -- 2.39.5 From m.sandoval at proxmox.com Mon Nov 18 11:43:14 2024 From: m.sandoval at proxmox.com (Maximiliano Sandoval) Date: Mon, 18 Nov 2024 11:43:14 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 13/26] docs: add removable datastores section In-Reply-To: <20241113150102.164820-14-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-14-h.laimer@proxmox.com> Message-ID: Hannes Laimer writes: > +Removable Datastores > +^^^^^^^^^^^^^^^^^^^^ > +Removable datastores have a ``backing-device`` associated with them, they can be > +mounted and unmounted. Other than that they behave the same way a normal datastore > +would. > + > +They can be created on already correctly formatted partitions, which, as with normal > +datastores, should be either ``ext4`` or ``xfs``. It is also possible to create them > +on completely unused disks through "Administartion" > "Disks / Storage" > "Directory", there is a typo on Administartion. From a.lauterer at proxmox.com Mon Nov 18 11:49:59 2024 From: a.lauterer at proxmox.com (Aaron Lauterer) Date: Mon, 18 Nov 2024 11:49:59 +0100 Subject: [pbs-devel] [PATCH] ui tasks: use view task instead of open task Message-ID: <20241118104959.95159-1-a.lauterer@proxmox.com> This aligns the tooltips to how we have in in Proxmox VE. Using "view" instead of "open" should make it clear, that this is a safe read-only action. Signed-off-by: Aaron Lauterer --- Matching patches for PVE in the pve-devel list: https://lore.proxmox.com/pve-devel/20241118104530.91798-1-a.lauterer at proxmox.com/T/#t www/dashboard/LongestTasks.js | 2 +- www/dashboard/RunningTasks.js | 2 +- www/dashboard/TaskSummary.js | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/www/dashboard/LongestTasks.js b/www/dashboard/LongestTasks.js index 20cf1183..f405b98f 100644 --- a/www/dashboard/LongestTasks.js +++ b/www/dashboard/LongestTasks.js @@ -100,7 +100,7 @@ Ext.define('PBS.LongestTasks', { items: [ { iconCls: 'fa fa-chevron-right', - tooltip: gettext('Open Task'), + tooltip: gettext('View Task'), handler: 'openTaskActionColumn', }, ], diff --git a/www/dashboard/RunningTasks.js b/www/dashboard/RunningTasks.js index 4529d0d3..20a7c51e 100644 --- a/www/dashboard/RunningTasks.js +++ b/www/dashboard/RunningTasks.js @@ -95,7 +95,7 @@ Ext.define('PBS.RunningTasks', { items: [ { iconCls: 'fa fa-chevron-right', - tooltip: gettext('Open Task'), + tooltip: gettext('View Task'), handler: 'openTaskActionColumn', }, ], diff --git a/www/dashboard/TaskSummary.js b/www/dashboard/TaskSummary.js index 68f6f6ef..684a18f0 100644 --- a/www/dashboard/TaskSummary.js +++ b/www/dashboard/TaskSummary.js @@ -109,7 +109,7 @@ Ext.define('PBS.TaskSummary', { items: [ { iconCls: 'fa fa-chevron-right', - tooltip: gettext('Open Task'), + tooltip: gettext('View Task'), handler: function(g, rowIndex) { let rec = tasklist.getStore().getAt(rowIndex); tasklist.setVisible(false); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 18 13:16:03 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 18 Nov 2024 13:16:03 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] client: pxar: fix race in pxar backup stream Message-ID: <20241118121603.235524-1-c.ebner@proxmox.com> Fixes a race condition where the backup upload stream can miss an error returned by pxar::create_archive, because the error state is only set after the backup stream was already polled. On instantiation, `PxarBackupStream` spawns a future handling the pxar archive creation, which sends the encoded pxar archive stream (or streams in case of split archives) through a channel, received by the pxar backup stream on polling. In case this channel is closed as signaled by returning an error, the poll logic will propagate an eventual error occurred during pxar creation by taking it from the `PxarBackupStream`. As this error might not have been set just yet, this can lead to incorrectly terminating a backup snapshot with success, eventhough an error occurred. To fix this, signal the end of the archive creation to the pxar backup stream via a notification. In case of premature termination of the pxar backup stream, no additional measures have to been taken, as the abort handle already terminates the archive creation. Signed-off-by: Christian Ebner --- pbs-client/src/pxar_backup_stream.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pbs-client/src/pxar_backup_stream.rs b/pbs-client/src/pxar_backup_stream.rs index 4370da6cc..5399cc4f3 100644 --- a/pbs-client/src/pxar_backup_stream.rs +++ b/pbs-client/src/pxar_backup_stream.rs @@ -11,6 +11,7 @@ use futures::stream::Stream; use nix::dir::Dir; use nix::fcntl::OFlag; use nix::sys::stat::Mode; +use tokio::sync::Notify; use proxmox_async::blocking::TokioWriterAdapter; use proxmox_io::StdChannelWriter; @@ -30,6 +31,7 @@ pub struct PxarBackupStream { pub suggested_boundaries: Option>, handle: Option, error: Arc>>, + notify: Arc, } impl Drop for PxarBackupStream { @@ -79,6 +81,8 @@ impl PxarBackupStream { let error = Arc::new(Mutex::new(None)); let error2 = Arc::clone(&error); + let notify = Arc::new(Notify::new()); + let notify_clone = notify.clone(); let handler = async move { if let Err(err) = crate::pxar::create_archive( dir, @@ -100,6 +104,8 @@ impl PxarBackupStream { let mut error = error2.lock().unwrap(); *error = Some(err); } + // Notify upload stream that archiver is finished (with or without error) + notify_clone.notify_one(); }; let (handle, registration) = AbortHandle::new_pair(); @@ -111,6 +117,7 @@ impl PxarBackupStream { suggested_boundaries: None, handle: Some(handle.clone()), error: Arc::clone(&error), + notify: notify.clone(), }; let backup_payload_stream = payload_rx.map(|rx| Self { @@ -118,6 +125,7 @@ impl PxarBackupStream { suggested_boundaries: suggested_boundaries_rx, handle: Some(handle), error, + notify: notify.clone(), }); Ok((backup_stream, backup_payload_stream)) @@ -151,6 +159,8 @@ impl Stream for PxarBackupStream { match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) { Ok(data) => Poll::Ready(Some(data)), Err(_) => { + // Wait until archiver signals finished to catch eventual errors + proxmox_async::runtime::block_in_place(|| self.notify.notified()); let mut error = self.error.lock().unwrap(); if let Some(err) = error.take() { return Poll::Ready(Some(Err(err))); -- 2.39.5 From s.sterz at proxmox.com Mon Nov 18 16:04:28 2024 From: s.sterz at proxmox.com (Shannon Sterz) Date: Mon, 18 Nov 2024 16:04:28 +0100 Subject: [pbs-devel] [PATCH docs 3/3] docs: client: fix formatting by using double ticks In-Reply-To: <20241118092435.81880-4-c.ebner@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> <20241118092435.81880-4-c.ebner@proxmox.com> Message-ID: On Mon Nov 18, 2024 at 10:24 AM CET, Christian Ebner wrote: > With single ticks the containing modes and archive formats are > displayed cursive, to be consistent with other sections of the > documentation use inline blocks. > > Adapted line wrappings to the additional line length. > > Signed-off-by: Christian Ebner > --- > docs/backup-client.rst | 25 +++++++++++++------------ > 1 file changed, 13 insertions(+), 12 deletions(-) > > diff --git a/docs/backup-client.rst b/docs/backup-client.rst > index 78e856979..45df440c9 100644 > --- a/docs/backup-client.rst > +++ b/docs/backup-client.rst > @@ -295,19 +295,20 @@ therefore deduplicated). If the backed up files are largely unchanged, > re-reading and then detecting the corresponding chunks don't need to be uploaded > after all is time consuming and undesired. > > -The backup client's `change-detection-mode` can be switched from default to > -`metadata` based detection to reduce limitations as described above, instructing > -the client to avoid re-reading files with unchanged metadata whenever possible. > +The backup client's ``change-detection-mode`` can be switched from default to > +``metadata`` based detection to reduce limitations as described above, > +instructing the client to avoid re-reading files with unchanged metadata > +whenever possible. > When using this mode, instead of the regular pxar archive, the backup snapshot > -is stored into two separate files: the `mpxar` containing the archive's metadata > -and the `ppxar` containing a concatenation of the file contents. This splitting > -allows for efficient metadata lookups. When creating the backup archives, the > -current file metadata is compared to the one looked up in the previous `mpxar` > -archive. The operational details are explained more in depth in the > -:ref:`technical documentation `. > - > -Using the `change-detection-mode` set to `data` allows to create the same split > -archive as when using the `metadata` mode, but without using a previous > +is stored into two separate files: the ``mpxar`` containing the archive's > +metadata and the ``ppxar`` containing a concatenation of the file contents. This > +splitting allows for efficient metadata lookups. When creating the backup > +archives, the current file metadata is compared to the one looked up in the > +previous ``mpxar`` archive. The operational details are explained more in depth > +in the :ref:`technical documentation `. > + > +Using the ``change-detection-mode`` set to ``data`` allows to create the same > +split archive as when using the ``metadata`` mode, but without using a previous > reference and therefore reencoding all file payloads. For details of this mode > please see the :ref:`technical documentation `. > read through all these doc patches, they sound good to me From t.lamprecht at proxmox.com Mon Nov 18 16:23:43 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 18 Nov 2024 16:23:43 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] client: pxar: fix race in pxar backup stream In-Reply-To: <20241118121603.235524-1-c.ebner@proxmox.com> References: <20241118121603.235524-1-c.ebner@proxmox.com> Message-ID: Am 18.11.24 um 13:16 schrieb Christian Ebner: > @@ -30,6 +31,7 @@ pub struct PxarBackupStream { > pub suggested_boundaries: Option>, > handle: Option, > error: Arc>>, > + notify: Arc, could be slightly nicer to name it such that one can (roughly) infer what this should notify about. From c.ebner at proxmox.com Mon Nov 18 16:47:42 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 18 Nov 2024 16:47:42 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup] client: pxar: fix race in pxar backup stream Message-ID: <20241118154742.376039-1-c.ebner@proxmox.com> Fixes a race condition where the backup upload stream can miss an error returned by pxar::create_archive, because the error state is only set after the backup stream was already polled. On instantiation, `PxarBackupStream` spawns a future handling the pxar archive creation, which sends the encoded pxar archive stream (or streams in case of split archives) through a channel, received by the pxar backup stream on polling. In case this channel is closed as signaled by returning an error, the poll logic will propagate an eventual error occurred during pxar creation by taking it from the `PxarBackupStream`. As this error might not have been set just yet, this can lead to incorrectly terminating a backup snapshot with success, eventhough an error occurred. To fix this, signal the end of the archive creation to the pxar backup stream via a notification. In case of premature termination of the pxar backup stream, no additional measures have to been taken, as the abort handle already terminates the archive creation. Signed-off-by: Christian Ebner --- changes since version 1: - improved variable naming, distinguish notification sender and notified receiver pbs-client/src/pxar_backup_stream.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pbs-client/src/pxar_backup_stream.rs b/pbs-client/src/pxar_backup_stream.rs index 4370da6cc..6005a8d1e 100644 --- a/pbs-client/src/pxar_backup_stream.rs +++ b/pbs-client/src/pxar_backup_stream.rs @@ -11,6 +11,7 @@ use futures::stream::Stream; use nix::dir::Dir; use nix::fcntl::OFlag; use nix::sys::stat::Mode; +use tokio::sync::Notify; use proxmox_async::blocking::TokioWriterAdapter; use proxmox_io::StdChannelWriter; @@ -30,6 +31,7 @@ pub struct PxarBackupStream { pub suggested_boundaries: Option>, handle: Option, error: Arc>>, + archive_finished_notification: Arc, } impl Drop for PxarBackupStream { @@ -79,6 +81,8 @@ impl PxarBackupStream { let error = Arc::new(Mutex::new(None)); let error2 = Arc::clone(&error); + let pxar_backup_stream_notifier = Arc::new(Notify::new()); + let archive_finished_notification = pxar_backup_stream_notifier.clone(); let handler = async move { if let Err(err) = crate::pxar::create_archive( dir, @@ -100,6 +104,8 @@ impl PxarBackupStream { let mut error = error2.lock().unwrap(); *error = Some(err); } + // Notify upload stream that archiver is finished (with or without error) + pxar_backup_stream_notifier.notify_one(); }; let (handle, registration) = AbortHandle::new_pair(); @@ -111,6 +117,7 @@ impl PxarBackupStream { suggested_boundaries: None, handle: Some(handle.clone()), error: Arc::clone(&error), + archive_finished_notification: archive_finished_notification.clone(), }; let backup_payload_stream = payload_rx.map(|rx| Self { @@ -118,6 +125,7 @@ impl PxarBackupStream { suggested_boundaries: suggested_boundaries_rx, handle: Some(handle), error, + archive_finished_notification, }); Ok((backup_stream, backup_payload_stream)) @@ -151,6 +159,10 @@ impl Stream for PxarBackupStream { match proxmox_async::runtime::block_in_place(|| self.rx.as_ref().unwrap().recv()) { Ok(data) => Poll::Ready(Some(data)), Err(_) => { + // Wait until archiver signals finished to catch eventual errors + proxmox_async::runtime::block_in_place(|| { + self.archive_finished_notification.notified() + }); let mut error = self.error.lock().unwrap(); if let Some(err) = error.take() { return Poll::Ready(Some(Err(err))); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 18 20:35:52 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 18 Nov 2024 20:35:52 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup] client: pxar: fix race in pxar backup stream In-Reply-To: <20241118154742.376039-1-c.ebner@proxmox.com> References: <20241118154742.376039-1-c.ebner@proxmox.com> Message-ID: <3037ac22-05d4-4b8d-aca1-fc33adb48e83@proxmox.com> Please disregard this patch for now, this does not work as intended. Have to think this through once again. From l.wagner at proxmox.com Tue Nov 19 09:05:33 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Tue, 19 Nov 2024 09:05:33 +0100 Subject: [pbs-devel] [pve-devel] [PATCH widget-toolkit v3 07/14] notification: add UI for adding/updating webhook targets In-Reply-To: References: <20241108144124.273550-1-l.wagner@proxmox.com> <20241108144124.273550-8-l.wagner@proxmox.com> Message-ID: On Mon Nov 11, 2024 at 11:09 PM CET, Thomas Lamprecht wrote: > Am 08.11.24 um 15:41 schrieb Lukas Wagner: > > The widgets for editing the headers/secrets were adapted from > > the 'Tag Edit' dialog from PVE's datacenter options. > > > > Apart from that, the new dialog is rather standard. I've decided > > to put the http method and url in a single row, mostly to > > save space and also to make it analogous to how an actual http request > > is structured (VERB URL, followed by headers, followed by the body). > > > > The secrets are a mechanism to store tokens/passwords in the > > protected notification config. Secrets are accessible via > > templating in the URL, headers and body via {{ secrets.NAME }}. > > Secrets can only be set/updated, but not retrieved/displayed. > > > > I re-checked this now with a bit more time and while it works fine I > think there can be still some UX enhancements: > > - Move the "Add" buttons below the grids, like we do for most other > such UIs (like e.g. in the "Match Rules" tab of the Matcher Add/Edit > window). You probably need to change the layout a bit to keep the > label on top; might want to ask Dominik for how to best achieve that. > Maybe include what is added in the button text, like "Add Header" or > "Add Secret" > > - always show an emptyText for the key/value fields to better convey > what is what, alternatively show colum headers, but that takes more > space. > > - improve validity checking for header/secret fields, without clicking > in them no invalid/required border is shown, but the overall form > state is invalid, thus disabling the window's Add button. This can > be confusing if no field is marked as invalid. > Thanks for the feedback, much appreciated. Seems like these issues were already taken care of while I was on sick leave, thanks Dominik! From g.goller at proxmox.com Tue Nov 19 17:17:25 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 19 Nov 2024 17:17:25 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix: allow datastore creation in directory with lost+found directory Message-ID: <20241119161725.519650-1-g.goller@proxmox.com> When creating a datastore without the "reuse-datastore" option and the datastore contains a `lost+found` directory (which is quite common), the creation fails. Add `lost+found` to the ignore list. Reported here: https://forum.proxmox.com/threads/bug-when-adding-new-storage-task-error-datastore-path-is-not-empty.157629/#post-721733 Fixes: 6e101ff75777 ("fix #5439: allow to reuse existing datastore") Signed-off-by: Gabriel Goller --- src/api2/config/datastore.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 374c302fcf28..f3bf652cda76 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -89,7 +89,9 @@ pub(crate) fn do_create_datastore( if let Ok(dir) = std::fs::read_dir(&path) { for file in dir { let name = file?.file_name(); - if !name.to_str().map_or(false, |name| name.starts_with('.')) { + if !name.to_str().map_or(false, |name| { + name.starts_with('.') || name.starts_with("lost+found") + }) { bail!("datastore path is not empty"); } } -- 2.39.5 From f.ebner at proxmox.com Wed Nov 20 11:19:58 2024 From: f.ebner at proxmox.com (Fiona Ebner) Date: Wed, 20 Nov 2024 11:19:58 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix: allow datastore creation in directory with lost+found directory In-Reply-To: <20241119161725.519650-1-g.goller@proxmox.com> References: <20241119161725.519650-1-g.goller@proxmox.com> Message-ID: <9ea533d6-7c5f-41db-9f60-e2a7fc09375b@proxmox.com> Am 19.11.24 um 17:17 schrieb Gabriel Goller: > When creating a datastore without the "reuse-datastore" option and the > datastore contains a `lost+found` directory (which is quite common), the > creation fails. Add `lost+found` to the ignore list. > > Reported here: https://forum.proxmox.com/threads/bug-when-adding-new-storage-task-error-datastore-path-is-not-empty.157629/#post-721733 > > Fixes: 6e101ff75777 ("fix #5439: allow to reuse existing datastore") > Signed-off-by: Gabriel Goller > --- > src/api2/config/datastore.rs | 4 +++- > 1 file changed, 3 insertions(+), 1 deletion(-) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 374c302fcf28..f3bf652cda76 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -89,7 +89,9 @@ pub(crate) fn do_create_datastore( > if let Ok(dir) = std::fs::read_dir(&path) { > for file in dir { > let name = file?.file_name(); > - if !name.to_str().map_or(false, |name| name.starts_with('.')) { > + if !name.to_str().map_or(false, |name| { > + name.starts_with('.') || name.starts_with("lost+found") Nit: while it shouldn't make much difference in practice, why use starts_with()? Exact checking would seem more natural to me for this. From g.goller at proxmox.com Wed Nov 20 11:51:54 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Wed, 20 Nov 2024 11:51:54 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] fix: allow datastore creation in directory with lost+found directory In-Reply-To: <9ea533d6-7c5f-41db-9f60-e2a7fc09375b@proxmox.com> References: <20241119161725.519650-1-g.goller@proxmox.com> <9ea533d6-7c5f-41db-9f60-e2a7fc09375b@proxmox.com> Message-ID: <62qdxfi2wjug63bnp3e7qcxh2w4b5tk4u5lzm7lgdvf7nqx57j@iz5m3zy6tpcs> On 20.11.2024 11:19, Fiona Ebner wrote: >Am 19.11.24 um 17:17 schrieb Gabriel Goller: >> When creating a datastore without the "reuse-datastore" option and the >> datastore contains a `lost+found` directory (which is quite common), the >> creation fails. Add `lost+found` to the ignore list. >> >> Reported here: https://forum.proxmox.com/threads/bug-when-adding-new-storage-task-error-datastore-path-is-not-empty.157629/#post-721733 >> >> Fixes: 6e101ff75777 ("fix #5439: allow to reuse existing datastore") >> Signed-off-by: Gabriel Goller >> --- >> src/api2/config/datastore.rs | 4 +++- >> 1 file changed, 3 insertions(+), 1 deletion(-) >> >> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs >> index 374c302fcf28..f3bf652cda76 100644 >> --- a/src/api2/config/datastore.rs >> +++ b/src/api2/config/datastore.rs >> @@ -89,7 +89,9 @@ pub(crate) fn do_create_datastore( >> if let Ok(dir) = std::fs::read_dir(&path) { >> for file in dir { >> let name = file?.file_name(); >> - if !name.to_str().map_or(false, |name| name.starts_with('.')) { >> + if !name.to_str().map_or(false, |name| { >> + name.starts_with('.') || name.starts_with("lost+found") > >Nit: while it shouldn't make much difference in practice, why use >starts_with()? Exact checking would seem more natural to me for this. Actually no idea :) Will submit a v2 with Eq. From g.goller at proxmox.com Wed Nov 20 11:55:05 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Wed, 20 Nov 2024 11:55:05 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] fix: allow datastore creation in directory with lost+found directory Message-ID: <20241120105505.238476-1-g.goller@proxmox.com> When creating a datastore without the "reuse-datastore" option and the datastore contains a `lost+found` directory (which is quite common), the creation fails. Add `lost+found` to the ignore list. Reported here: https://forum.proxmox.com/threads/bug-when-adding-new-storage-task-error-datastore-path-is-not-empty.157629/#post-721733 Fixes: 6e101ff75777 ("fix #5439: allow to reuse existing datastore") Signed-off-by: Gabriel Goller --- v2, thanks @Fiona: - exact match with == instead of `.starts_with` src/api2/config/datastore.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 374c302fcf28..9c36cb312a99 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -89,7 +89,10 @@ pub(crate) fn do_create_datastore( if let Ok(dir) = std::fs::read_dir(&path) { for file in dir { let name = file?.file_name(); - if !name.to_str().map_or(false, |name| name.starts_with('.')) { + if !name + .to_str() + .map_or(false, |name| name.starts_with('.') || name == "lost+found") + { bail!("datastore path is not empty"); } } -- 2.39.5 From f.gruenbichler at proxmox.com Wed Nov 20 14:11:04 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Wed, 20 Nov 2024 14:11:04 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <20241105104015.162094-2-g.goller@proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> <20241105104015.162094-2-g.goller@proxmox.com> Message-ID: <173210826421.198988.14774192201672116937@yuna.proxmox.com> a few small nits inline, looks good to me otherwise, but given the size of this and the size of the push series, I'd rather this be rebased on top of the other one ;) Quoting Gabriel Goller (2024-11-05 11:40:13) > This option allows us to "fix" corrupt snapshots (and/or their chunks) > by pulling them from another remote. When traversing the remote > snapshots, we check if it exists locally, and if it is, we check if the > last verification of it failed. If the local snapshot is broken and the > `resync-corrupt` option is turned on, we pull in the remote snapshot, > overwriting the local one. > > This is very useful and has been requested a lot, as there is currently > no way to "fix" corrupt chunks/snapshots even if the user has a healthy > version of it on their offsite instance. > > Originally-by: Shannon Sterz > Signed-off-by: Gabriel Goller > --- > pbs-api-types/src/jobs.rs | 10 +++++ > pbs-datastore/src/backup_info.rs | 12 +++++- > pbs-datastore/src/manifest.rs | 13 ++++++- > src/api2/config/sync.rs | 4 ++ > src/api2/pull.rs | 9 ++++- > src/bin/proxmox-backup-manager.rs | 4 +- > src/server/pull.rs | 62 +++++++++++++++++++++++-------- > 7 files changed, 93 insertions(+), 21 deletions(-) > > diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs > index 868702bc059e..58f739ad00b5 100644 > --- a/pbs-api-types/src/jobs.rs > +++ b/pbs-api-types/src/jobs.rs > @@ -498,6 +498,10 @@ pub const TRANSFER_LAST_SCHEMA: Schema = > .minimum(1) > .schema(); > > +pub const RESYNC_CORRUPT_SCHEMA: Schema = > + BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") > + .schema(); > + > #[api( > properties: { > id: { > @@ -552,6 +556,10 @@ pub const TRANSFER_LAST_SCHEMA: Schema = > schema: TRANSFER_LAST_SCHEMA, > optional: true, > }, > + "resync-corrupt": { > + schema: RESYNC_CORRUPT_SCHEMA, > + optional: true, > + } > } > )] > #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] > @@ -585,6 +593,8 @@ pub struct SyncJobConfig { > pub limit: RateLimitConfig, > #[serde(skip_serializing_if = "Option::is_none")] > pub transfer_last: Option, > + #[serde(skip_serializing_if = "Option::is_none")] > + pub resync_corrupt: Option, > } > > impl SyncJobConfig { > diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs > index 414ec878d01a..e6174322dad6 100644 > --- a/pbs-datastore/src/backup_info.rs > +++ b/pbs-datastore/src/backup_info.rs > @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; > use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; > > use pbs_api_types::{ > - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, > + Authid, BackupNamespace, BackupType, GroupFilter, VerifyState, BACKUP_DATE_REGEX, > + BACKUP_FILE_REGEX, > }; > use pbs_config::{open_backup_lockfile, BackupLockGuard}; > > @@ -583,6 +584,15 @@ impl BackupDir { > > Ok(()) > } > + > + /// Load the verify state from the manifest. > + pub fn verify_state(&self) -> Option { should this be a Result> to allow differentiation between no verification state, and failure to parse or load the manifest? that would allow us to resync totally corrupted snapshots as well (although that might be considered out of scope based on the parameter description ;)) > + if let Ok(manifest) = self.load_manifest() { > + manifest.0.verify_state() > + } else { > + None > + } > + } > } > > impl AsRef for BackupDir { > diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs > index c3df014272a0..623c1499c0bb 100644 > --- a/pbs-datastore/src/manifest.rs > +++ b/pbs-datastore/src/manifest.rs > @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; > use serde::{Deserialize, Serialize}; > use serde_json::{json, Value}; > > -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; > +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState, VerifyState}; > use pbs_tools::crypt_config::CryptConfig; > > pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; > @@ -242,6 +242,17 @@ impl BackupManifest { > let manifest: BackupManifest = serde_json::from_value(json)?; > Ok(manifest) > } > + > + /// Get the verify state of the snapshot > + /// > + /// Note: New snapshots, which have not been verified yet, do not have a status and this > + /// function will return `None`. > + pub fn verify_state(&self) -> Option { should this be a Result> to allow differentiation between no verification state, and failure to parse? also, it would be great if existing code retrieving this could be adapted to use these new helpers, which would require having the Result there as well.. > + let verify = self.unprotected["verify_state"].clone(); > + serde_json::from_value::(verify) > + .map(|svs| svs.state) > + .ok() > + } > } > > impl TryFrom for BackupManifest { > diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs > index 6fdc69a9e645..fa9db92f3d11 100644 > --- a/src/api2/config/sync.rs > +++ b/src/api2/config/sync.rs > @@ -368,6 +368,9 @@ pub fn update_sync_job( > if let Some(transfer_last) = update.transfer_last { > data.transfer_last = Some(transfer_last); > } > + if let Some(resync_corrupt) = update.resync_corrupt { > + data.resync_corrupt = Some(resync_corrupt); > + } > > if update.limit.rate_in.is_some() { > data.limit.rate_in = update.limit.rate_in; > @@ -527,6 +530,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator > ns: None, > owner: Some(write_auth_id.clone()), > comment: None, > + resync_corrupt: None, > remove_vanished: None, > max_depth: None, > group_filter: None, > diff --git a/src/api2/pull.rs b/src/api2/pull.rs > index e733c9839e3a..0d4be0e2d228 100644 > --- a/src/api2/pull.rs > +++ b/src/api2/pull.rs > @@ -10,7 +10,7 @@ use pbs_api_types::{ > Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, > GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, > PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, > - TRANSFER_LAST_SCHEMA, > + RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, > }; > use pbs_config::CachedUserInfo; > use proxmox_human_byte::HumanByte; > @@ -89,6 +89,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { > sync_job.group_filter.clone(), > sync_job.limit.clone(), > sync_job.transfer_last, > + sync_job.resync_corrupt, > ) > } > } > @@ -240,6 +241,10 @@ pub fn do_sync_job( > schema: TRANSFER_LAST_SCHEMA, > optional: true, > }, > + "resync-corrupt": { > + schema: RESYNC_CORRUPT_SCHEMA, > + optional: true, > + }, > }, > }, > access: { > @@ -264,6 +269,7 @@ async fn pull( > group_filter: Option>, > limit: RateLimitConfig, > transfer_last: Option, > + resync_corrupt: Option, > rpcenv: &mut dyn RpcEnvironment, > ) -> Result { > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > @@ -301,6 +307,7 @@ async fn pull( > group_filter, > limit, > transfer_last, > + resync_corrupt, > )?; > > // fixme: set to_stdout to false? > diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs > index 420e96665662..38a1cf0f5881 100644 > --- a/src/bin/proxmox-backup-manager.rs > +++ b/src/bin/proxmox-backup-manager.rs > @@ -14,8 +14,8 @@ use pbs_api_types::percent_encoding::percent_encode_component; > use pbs_api_types::{ > BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, > GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, > - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, > - VERIFICATION_OUTDATED_AFTER_SCHEMA, > + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, > + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, > }; > use pbs_client::{display_task_log, view_task_result}; > use pbs_config::sync; > diff --git a/src/server/pull.rs b/src/server/pull.rs > index d9584776ee7f..11a0a9d74cf3 100644 > --- a/src/server/pull.rs > +++ b/src/server/pull.rs > @@ -12,7 +12,8 @@ use tracing::info; > > use pbs_api_types::{ > print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, > - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, > + RateLimitConfig, Remote, VerifyState, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, > + PRIV_DATASTORE_BACKUP, > }; > use pbs_client::BackupRepository; > use pbs_config::CachedUserInfo; > @@ -55,6 +56,8 @@ pub(crate) struct PullParameters { > group_filter: Vec, > /// How many snapshots should be transferred at most (taking the newest N snapshots) > transfer_last: Option, > + /// Whether to re-sync corrupted snapshots > + resync_corrupt: bool, > } > > impl PullParameters { > @@ -72,12 +75,14 @@ impl PullParameters { > group_filter: Option>, > limit: RateLimitConfig, > transfer_last: Option, > + resync_corrupt: Option, > ) -> Result { > if let Some(max_depth) = max_depth { > ns.check_max_depth(max_depth)?; > remote_ns.check_max_depth(max_depth)?; > }; > let remove_vanished = remove_vanished.unwrap_or(false); > + let resync_corrupt = resync_corrupt.unwrap_or(false); > > let source: Arc = if let Some(remote) = remote { > let (remote_config, _digest) = pbs_config::remote::config()?; > @@ -116,6 +121,7 @@ impl PullParameters { > max_depth, > group_filter, > transfer_last, > + resync_corrupt, > }) > } > } > @@ -323,7 +329,7 @@ async fn pull_single_archive<'a>( > /// > /// Pulling a snapshot consists of the following steps: > /// - (Re)download the manifest > -/// -- if it matches, only download log and treat snapshot as already synced > +/// -- if it matches and is not corrupt, only download log and treat snapshot as already synced > /// - Iterate over referenced files > /// -- if file already exists, verify contents > /// -- if not, pull it from the remote > @@ -332,6 +338,7 @@ async fn pull_snapshot<'a>( > reader: Arc, > snapshot: &'a pbs_datastore::BackupDir, > downloaded_chunks: Arc>>, > + corrupt: bool, > ) -> Result { > let mut sync_stats = SyncStats::default(); > let mut manifest_name = snapshot.full_path(); > @@ -352,7 +359,7 @@ async fn pull_snapshot<'a>( > return Ok(sync_stats); > } > > - if manifest_name.exists() { > + if manifest_name.exists() && !corrupt { > let manifest_blob = proxmox_lang::try_block!({ > let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| { > format_err!("unable to open local manifest {manifest_name:?} - {err}") > @@ -381,7 +388,7 @@ async fn pull_snapshot<'a>( > let mut path = snapshot.full_path(); > path.push(&item.filename); > > - if path.exists() { > + if !corrupt && path.exists() { > match ArchiveType::from_path(&item.filename)? { > ArchiveType::DynamicIndex => { > let index = DynamicIndexReader::open(&path)?; > @@ -443,6 +450,7 @@ async fn pull_snapshot_from<'a>( > reader: Arc, > snapshot: &'a pbs_datastore::BackupDir, > downloaded_chunks: Arc>>, > + corrupt: bool, > ) -> Result { > let (_path, is_new, _snap_lock) = snapshot > .datastore() > @@ -451,7 +459,7 @@ async fn pull_snapshot_from<'a>( > let sync_stats = if is_new { is_new and corrupt are never both true.. > info!("sync snapshot {}", snapshot.dir()); > > - match pull_snapshot(reader, snapshot, downloaded_chunks).await { > + match pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await { so this should be always false ;) > Err(err) => { > if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( > snapshot.backup_ns(), > @@ -468,8 +476,15 @@ async fn pull_snapshot_from<'a>( > } > } > } else { > - info!("re-sync snapshot {}", snapshot.dir()); > - pull_snapshot(reader, snapshot, downloaded_chunks).await? > + if corrupt { > + info!( > + "re-sync snapshot {} due to bad verification result", nit: why not call it "corrupt", since that is what the parameter is called? > + snapshot.dir() > + ); > + } else { > + info!("re-sync snapshot {}", snapshot.dir()); > + } > + pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await? > }; > > Ok(sync_stats) > @@ -523,26 +538,40 @@ async fn pull_group( > .last_successful_backup(&target_ns, group)? > .unwrap_or(i64::MIN); > > - let list: Vec = raw_list > + // Filter remote BackupDirs to include in pull > + // Also stores if the snapshot is corrupt (verification job failed) > + let list: Vec<(BackupDir, bool)> = raw_list > .into_iter() > .enumerate() > - .filter(|&(pos, ref dir)| { > + .filter_map(|(pos, dir)| { > source_snapshots.insert(dir.time); > + // If resync_corrupt is set, check if the corresponding local snapshot failed to > + // verification > + if params.resync_corrupt { > + let local_dir = params > + .target > + .store > + .backup_dir(target_ns.clone(), dir.clone()); > + if let Ok(local_dir) = local_dir { > + let verify_state = local_dir.verify_state(); > + if verify_state == Some(VerifyState::Failed) { > + return Some((dir, true)); > + } > + } > + } > // Note: the snapshot represented by `last_sync_time` might be missing its backup log > // or post-backup verification state if those were not yet available during the last > // sync run, always resync it > if last_sync_time > dir.time { > already_synced_skip_info.update(dir.time); > - return false; > + return None; > } > - > if pos < cutoff && last_sync_time != dir.time { > transfer_last_skip_info.update(dir.time); > - return false; > + return None; > } > - true > + Some((dir, false)) > }) > - .map(|(_, dir)| dir) > .collect(); > > if already_synced_skip_info.count > 0 { > @@ -561,7 +590,7 @@ async fn pull_group( > > let mut sync_stats = SyncStats::default(); > > - for (pos, from_snapshot) in list.into_iter().enumerate() { > + for (pos, (from_snapshot, corrupt)) in list.into_iter().enumerate() { > let to_snapshot = params > .target > .store > @@ -571,7 +600,8 @@ async fn pull_group( > .source > .reader(source_namespace, &from_snapshot) > .await?; > - let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await; > + let result = > + pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone(), corrupt).await; > > progress.done_snapshots = pos as u64 + 1; > info!("percentage done: {progress}"); > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Wed Nov 20 14:12:17 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Wed, 20 Nov 2024 14:12:17 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 2/3] fix #3786: ui/cli: add resync-corrupt option on sync-jobs In-Reply-To: <20241105104015.162094-3-g.goller@proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> <20241105104015.162094-3-g.goller@proxmox.com> Message-ID: <173210833764.198988.6832521006255309425@yuna.proxmox.com> nit: might look better moved to the right side of the window, but no hard feelings either way Quoting Gabriel Goller (2024-11-05 11:40:14) > Add the `resync-corrupt` option to the ui and the > `proxmox-backup-manager` cli. It is listed in the `Advanced` section, > because it slows the sync-job down and is useless if no verification > job was run beforehand. > > Originally-by: Shannon Sterz > Signed-off-by: Gabriel Goller > --- > src/bin/proxmox-backup-manager.rs | 9 +++++++++ > www/window/SyncJobEdit.js | 11 +++++++++++ > 2 files changed, 20 insertions(+) > > diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs > index 38a1cf0f5881..08728e9d7250 100644 > --- a/src/bin/proxmox-backup-manager.rs > +++ b/src/bin/proxmox-backup-manager.rs > @@ -339,6 +339,10 @@ fn task_mgmt_cli() -> CommandLineInterface { > schema: TRANSFER_LAST_SCHEMA, > optional: true, > }, > + "resync-corrupt": { > + schema: RESYNC_CORRUPT_SCHEMA, > + optional: true, > + }, > } > } > )] > @@ -355,6 +359,7 @@ async fn pull_datastore( > group_filter: Option>, > limit: RateLimitConfig, > transfer_last: Option, > + resync_corrupt: Option, > param: Value, > ) -> Result { > let output_format = get_output_format(¶m); > @@ -391,6 +396,10 @@ async fn pull_datastore( > args["transfer-last"] = json!(transfer_last) > } > > + if let Some(resync_corrupt) = resync_corrupt { > + args["resync-corrupt"] = Value::from(resync_corrupt); > + } > + > let mut limit_json = json!(limit); > let limit_map = limit_json > .as_object_mut() > diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js > index 6543995e8800..a3c497fc2185 100644 > --- a/www/window/SyncJobEdit.js > +++ b/www/window/SyncJobEdit.js > @@ -321,6 +321,17 @@ Ext.define('PBS.window.SyncJobEdit', { > deleteEmpty: '{!isCreate}', > }, > }, > + { > + fieldLabel: gettext('Resync corrupt snapshots'), > + xtype: 'proxmoxcheckbox', > + name: 'resync-corrupt', > + autoEl: { > + tag: 'div', > + 'data-qtip': gettext('Re-sync snapshots, whose verfification failed.'), > + }, > + uncheckedValue: false, > + value: false, > + }, > ], > }, > { > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From t.lamprecht at proxmox.com Wed Nov 20 17:27:06 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Wed, 20 Nov 2024 17:27:06 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <173089427968.79072.3773251895934605531@yuna.proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> Message-ID: Am 06.11.24 um 12:57 schrieb Fabian Gr?nbichler: > @Thomas: since there's a few questions below that have long-term implications, > I'd appreciate feedback.. > > Quoting Christian Ebner (2024-10-31 13:15:02) >> Add a dedicated api type for the `version` api endpoint and helper >> methods for supported feature comparison. >> This will be used to detect api incompatibility of older hosts, not >> supporting some features. >> >> Use the new api type to refactor the version endpoint and set it as >> return type. >> > > but, I am not sure if we even need this now, we could also just implement > helpers on ApiVersionInfo that give us the major, minor, release versions as > u64? especially if we do "does the server support XX" via explicit named > features, and don't even have a use case (yet) for accessing the version parts? > > the big question here is - do we want to expose this kind of thing? > > so far, we've used the approach of making things opt-in or backwards > compatible, or failing hard if a newer client tries to use a feature that is not > supported by an older server (e.g., if a client tries to use namespaces with a > server that doesn't support them, it will just error out on whichever request it > makes). For most new features this should be the way to go for now, as you write, we basically use it on every products' API already. Such a feature negotiation makes IMO mostly sense if I can use that to fallback to some other protocol/enpoint/parameter set transparently while still honoring what the user told us to do here. > > there are two ways to handle explicit versioning between client and server: > > 1.) client retrieves the version, and has a list of "feature A is supported > since version X.Y.Z" > > 2.) client retrieves a list of supported features from the server (this patch > (series)) > > variant 1 has the advantage that we don't have to keep an ever-growing list of > features around (or worry about "naming" and organizing them). variant 2 has the > advantage that the server can explicitly tell what it supports without needing > the client to adapt its version <-> feature mapping (i.e., if we or somebody else > backports a feature). it also has the advantage that there is no risk of the > version mapping being wrong (e.g., because there was unexpected delay in > applying a patch series, or somebody made a mistake in the contained version > number). > > variant 1 was what I actually had in mind when I originally proposed this, but I > do like variant 2 as well! Not sure if we really want to backport features, at least if using the work in the more literal sense from a user POV. If, it would rather be for a security issue or grave bug fix, and it's hard to predict but IMO a bit unlikely that such things need features at a PBS-aware level that the API and its parameters cannot provide. IMO it really depends on the specific use-case, and sure if we would add such feature negotiation endpoints and plumbing we might use it more, but mostly because it's there already, and probably not because it's strictly necessary. But if we ignore the need then yes, feature lists might be a bit nicer, they decouple versioning and provide more semantic meaning on their own, that IME reduces error-potential to hold them wrong. Anyway, I did not check this or v7 series out all to closely, so please bear with me, but from a 1 km view: I'd find it a bit unlikely that this series, which is adding new endpoints for new functionality needs such feature detection or negotiation, users simply need to upgrade in any way. So, for the sake of simplicity, which often has a big value for long-term maintenance, let's avoid this if not really required. From c.ebner at proxmox.com Wed Nov 20 18:34:58 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Wed, 20 Nov 2024 18:34:58 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> Message-ID: <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> On 11/20/24 17:27, Thomas Lamprecht wrote: > Am 06.11.24 um 12:57 schrieb Fabian Gr?nbichler: >> @Thomas: since there's a few questions below that have long-term implications, >> I'd appreciate feedback.. >> >> Quoting Christian Ebner (2024-10-31 13:15:02) >>> Add a dedicated api type for the `version` api endpoint and helper >>> methods for supported feature comparison. >>> This will be used to detect api incompatibility of older hosts, not >>> supporting some features. >>> >>> Use the new api type to refactor the version endpoint and set it as >>> return type. >>> >> >> but, I am not sure if we even need this now, we could also just implement >> helpers on ApiVersionInfo that give us the major, minor, release versions as >> u64? especially if we do "does the server support XX" via explicit named >> features, and don't even have a use case (yet) for accessing the version parts? >> >> the big question here is - do we want to expose this kind of thing? >> >> so far, we've used the approach of making things opt-in or backwards >> compatible, or failing hard if a newer client tries to use a feature that is not >> supported by an older server (e.g., if a client tries to use namespaces with a >> server that doesn't support them, it will just error out on whichever request it >> makes). > > > For most new features this should be the way to go for now, as you write, > we basically use it on every products' API already. > > Such a feature negotiation makes IMO mostly sense if I can use that to > fallback to some other protocol/enpoint/parameter set transparently while > still honoring what the user told us to do here. In this case we use the feature negotiation to expose and additional parameter to the snapshot/group delete endpoints, so that it behaves differently (no hard failure when protected snapshots are present, return delete stats). Without the feature exposed, the previous behavior is honored, which is still used for regular snapshot/group deletion. > >> >> there are two ways to handle explicit versioning between client and server: >> >> 1.) client retrieves the version, and has a list of "feature A is supported >> since version X.Y.Z" >> >> 2.) client retrieves a list of supported features from the server (this patch >> (series)) >> >> variant 1 has the advantage that we don't have to keep an ever-growing list of >> features around (or worry about "naming" and organizing them). variant 2 has the >> advantage that the server can explicitly tell what it supports without needing >> the client to adapt its version <-> feature mapping (i.e., if we or somebody else >> backports a feature). it also has the advantage that there is no risk of the >> version mapping being wrong (e.g., because there was unexpected delay in >> applying a patch series, or somebody made a mistake in the contained version >> number). >> >> variant 1 was what I actually had in mind when I originally proposed this, but I >> do like variant 2 as well! > > Not sure if we really want to backport features, at least if using the work in > the more literal sense from a user POV. If, it would rather be for a security > issue or grave bug fix, and it's hard to predict but IMO a bit unlikely that > such things need features at a PBS-aware level that the API and its parameters > cannot provide. > > IMO it really depends on the specific use-case, and sure if we would add such > feature negotiation endpoints and plumbing we might use it more, but mostly > because it's there already, and probably not because it's strictly necessary. > > But if we ignore the need then yes, feature lists might be a bit nicer, they > decouple versioning and provide more semantic meaning on their own, that IME > reduces error-potential to hold them wrong. I would argue in favor of the feature list here, as this makes it: - easier to see from the context what is needed - independent of version bumps > Anyway, I did not check this or v7 series out all to closely, so please bear > with me, but from a 1 km view: I'd find it a bit unlikely that this series, > which is adding new endpoints for new functionality needs such feature > detection or negotiation, users simply need to upgrade in any way. > So, for the sake of simplicity, which often has a big value for long-term > maintenance, let's avoid this if not really required. From t.lamprecht at proxmox.com Thu Nov 21 10:23:58 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 21 Nov 2024 10:23:58 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> Message-ID: <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> Am 20.11.24 um 18:34 schrieb Christian Ebner: >> Such a feature negotiation makes IMO mostly sense if I can use that to >> fallback to some other protocol/enpoint/parameter set transparently while >> still honoring what the user told us to do here. > In this case we use the feature negotiation to expose and additional > parameter to the snapshot/group delete endpoints, so that it behaves > differently (no hard failure when protected snapshots are present, Hmm, I'm a bit torn, I can get where you come from, but this is a bit bigger change in terms of how we handled these in the past, and naturally a permanent commitment. A "classic" alternative could be e.g. to expose it in the sync job and switch the default value for new jobs with the next major release. I have some concerns about some feature explosion over the midterm if used at this level which can lead to rather odd effects for users, e.g. if one setup behaves very different than another but same job settings is used. Explicit settings and errors might not be _that_ convenient, but they are very telling and easy. That said, do not take this as blocking this outright, maybe someone else can also share their opnion on this (or if you got further arguments of why my concerns are not warranted I'm obv. happy to hear these too) > return delete stats). Without the feature exposed, the previous behavior The stats are always returned now? Am 20.11.24 um 18:34 schrieb Christian Ebner: >> But if we ignore the need then yes, feature lists might be a bit nicer, they >> decouple versioning and provide more semantic meaning on their own, that IME >> reduces error-potential to hold them wrong. > I would argue in favor of the feature list here, as this makes it: > - easier to see from the context what is needed > - independent of version bumps Albeit, for the PDM we will go for simple version matching to know what APIs can be used, as we normally try to batch bigger changes at major releases, and for bigger new features minor releases work fine too. We can naturally do this for PBS and do not have to then use that paradigm everywhere, so it's not coupled, as IMO maintaining feature lists over many releases and that over multiple product is not something I want to do for PDM, albeit it's a bit more gut feeling, backed by some experience but still, I certainly to not assume I'm right, this is far from black and white. Something tangentially related: In general, it might be also worth thinking about how the protection flag can be better synced ? FWICT it's now set if the source has it set and then never will get unset manually anymore? Remembering the source of the flag (i.e., sync from remote vs local api) could be an option to differentiate here when it's OK to clear on sync transiently again (probably guarded as option in the job). But here I'm a bit more distanced from the matter than you are, I'll need to think a bit more about this all. For now maybe order the whole API feature thing towards the end of the series and we can still commit all earlier patches already and decide on this a (short) time later. From t.lamprecht at proxmox.com Thu Nov 21 10:27:28 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 21 Nov 2024 10:27:28 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 27/29] api: datastore/namespace: return backup groups delete stats on remove In-Reply-To: <20241031121519.434337-28-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-28-c.ebner@proxmox.com> Message-ID: <5cf14b9b-2e58-4084-87ac-b44f2e5d1d83@proxmox.com> Am 31.10.24 um 13:15 schrieb Christian Ebner: > Add and expose the backup group delete statistics by adding the > return type to the corresponding REST API endpoints. > > Further, add a `error-on-protected` flag to the api endpoints, > allowing to return without error when set to false. Default remains > enabled. This first sounded like it should two patches, but they are related in the sense that the other side needs both to differ between "backup went generally fine" and "all was OK but could not deleted some snapshots that are protected". So please add that rationale, or a better one if I was off, to the commit message. As while the "what" can be nice to have a summary without reading all the code, the "why" is very important to see how it overall fits into the picture (of a series but also the project as a whole) ? at lest for me that is. From f.gruenbichler at proxmox.com Thu Nov 21 10:38:57 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Thu, 21 Nov 2024 10:38:57 +0100 (CET) Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> Message-ID: <1095829617.7310.1732181937570@webmail.proxmox.com> > Thomas Lamprecht hat am 21.11.2024 10:23 CET geschrieben: > > > Am 20.11.24 um 18:34 schrieb Christian Ebner: > >> Such a feature negotiation makes IMO mostly sense if I can use that to > >> fallback to some other protocol/enpoint/parameter set transparently while > >> still honoring what the user told us to do here. > > In this case we use the feature negotiation to expose and additional > > parameter to the snapshot/group delete endpoints, so that it behaves > > differently (no hard failure when protected snapshots are present, > > Hmm, I'm a bit torn, I can get where you come from, but this is a bit > bigger change in terms of how we handled these in the past, and naturally > a permanent commitment. > > A "classic" alternative could be e.g. to expose it in the sync job and > switch the default value for new jobs with the next major release. > > I have some concerns about some feature explosion over the midterm if used > at this level which can lead to rather odd effects for users, e.g. if one > setup behaves very different than another but same job settings is used. > Explicit settings and errors might not be _that_ convenient, but they are > very telling and easy. > > That said, do not take this as blocking this outright, maybe someone else > can also share their opnion on this (or if you got further arguments of > why my concerns are not warranted I'm obv. happy to hear these too) > > > return delete stats). Without the feature exposed, the previous behavior > > The stats are always returned now? no, the stats are returned if an opt-in flag is set, otherwise encountering protected snapshots/groups makes removal fail. that way, old clients still get the used-to behaviour, but can opt into not treating such cases as fatal error and instead get structured data about the situation as a result. sync wants to set the flag to get the better behaviour/information, if the remote does support it already (there is no hard requirement for push support other than namespaces). it's purely an ergonomics/UX improvement to only set the flag if supported and handle the returned stats properly depending on that state. I prepared a follow-up commit that guards this based on the version and drops the feature list altogether for now. > Am 20.11.24 um 18:34 schrieb Christian Ebner: > >> But if we ignore the need then yes, feature lists might be a bit nicer, they > >> decouple versioning and provide more semantic meaning on their own, that IME > >> reduces error-potential to hold them wrong. > > I would argue in favor of the feature list here, as this makes it: > > - easier to see from the context what is needed > > - independent of version bumps > > Albeit, for the PDM we will go for simple version matching to know what > APIs can be used, as we normally try to batch bigger changes at major > releases, and for bigger new features minor releases work fine too. > We can naturally do this for PBS and do not have to then use that paradigm > everywhere, so it's not coupled, as IMO maintaining feature lists over many > releases and that over multiple product is not something I want to do for PDM, > albeit it's a bit more gut feeling, backed by some experience but still, I > certainly to not assume I'm right, this is far from black and white. > > Something tangentially related: > > In general, it might be also worth thinking about how the protection flag can > be better synced ? FWICT it's now set if the source has it set and then never > will get unset manually anymore? Remembering the source of the flag (i.e., > sync from remote vs local api) could be an option to differentiate here when > it's OK to clear on sync transiently again (probably guarded as option in the > job). But here I'm a bit more distanced from the matter than you are, I'll need > to think a bit more about this all. the protection flag (and notes, and changes to the unprotected part of the manifest after the initial sync) are not (re-)synced at all (neither with pull, nor with push), this is just about what to do if a snapshot/group/namespace has vanished but protection prevents complete removal. (re)syncing protection status, better handling of verification state in sync context, and other similar things might be nice as an opt-in feature though. > For now maybe order the whole API feature thing towards the end of the series > and we can still commit all earlier patches already and decide on this a > (short) time later. see above. From c.ebner at proxmox.com Thu Nov 21 10:58:40 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 10:58:40 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> Message-ID: <65ce8683-8e27-4d4e-a2f3-9d05960f2e72@proxmox.com> On 11/21/24 10:23, Thomas Lamprecht wrote: > Am 20.11.24 um 18:34 schrieb Christian Ebner: >>> Such a feature negotiation makes IMO mostly sense if I can use that to >>> fallback to some other protocol/enpoint/parameter set transparently while >>> still honoring what the user told us to do here. >> In this case we use the feature negotiation to expose and additional >> parameter to the snapshot/group delete endpoints, so that it behaves >> differently (no hard failure when protected snapshots are present, > > Hmm, I'm a bit torn, I can get where you come from, but this is a bit > bigger change in terms of how we handled these in the past, and naturally > a permanent commitment. > > A "classic" alternative could be e.g. to expose it in the sync job and > switch the default value for new jobs with the next major release. > > I have some concerns about some feature explosion over the midterm if used > at this level which can lead to rather odd effects for users, e.g. if one > setup behaves very different than another but same job settings is used. > Explicit settings and errors might not be _that_ convenient, but they are > very telling and easy. > > That said, do not take this as blocking this outright, maybe someone else > can also share their opnion on this (or if you got further arguments of > why my concerns are not warranted I'm obv. happy to hear these too) > >> return delete stats). Without the feature exposed, the previous behavior > > The stats are always returned now? Yes, sorry that was incorrectly stated by me. Only the failure mode is handled differently, as already clarified by Fabian in his reply. But the stats are returned unconditionally on success. > Am 20.11.24 um 18:34 schrieb Christian Ebner: >>> But if we ignore the need then yes, feature lists might be a bit nicer, they >>> decouple versioning and provide more semantic meaning on their own, that IME >>> reduces error-potential to hold them wrong. >> I would argue in favor of the feature list here, as this makes it: >> - easier to see from the context what is needed >> - independent of version bumps > > Albeit, for the PDM we will go for simple version matching to know what > APIs can be used, as we normally try to batch bigger changes at major > releases, and for bigger new features minor releases work fine too. > We can naturally do this for PBS and do not have to then use that paradigm > everywhere, so it's not coupled, as IMO maintaining feature lists over many > releases and that over multiple product is not something I want to do for PDM, > albeit it's a bit more gut feeling, backed by some experience but still, I > certainly to not assume I'm right, this is far from black and white. Well, that is something I did not consider at all! So with that viewpoint, adding this to PBS specifically is surely not the best way. As discussed with Fabain off list, version based matching will be the best way forward here, and dropping the incompatibility check once EOL is reached. > > Something tangentially related: > > In general, it might be also worth thinking about how the protection flag can > be better synced ? FWICT it's now set if the source has it set and then never > will get unset manually anymore? Remembering the source of the flag (i.e., > sync from remote vs local api) could be an option to differentiate here when > it's OK to clear on sync transiently again (probably guarded as option in the > job). But here I'm a bit more distanced from the matter than you are, I'll need > to think a bit more about this all. > > For now maybe order the whole API feature thing towards the end of the series > and we can still commit all earlier patches already and decide on this a > (short) time later. Not sure if I correctly interpreted you rational here. As Fabian mentioned, the additional parameter only included in the api calls is not to handle how we sync the flag, but rather how to act in case the sync jobs should prune vanished snapshots/groups from the remote. From c.ebner at proxmox.com Thu Nov 21 11:00:50 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 11:00:50 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 27/29] api: datastore/namespace: return backup groups delete stats on remove In-Reply-To: <5cf14b9b-2e58-4084-87ac-b44f2e5d1d83@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-28-c.ebner@proxmox.com> <5cf14b9b-2e58-4084-87ac-b44f2e5d1d83@proxmox.com> Message-ID: <33ef0535-3331-4126-ae20-96ff31a11b37@proxmox.com> On 11/21/24 10:27, Thomas Lamprecht wrote: > Am 31.10.24 um 13:15 schrieb Christian Ebner: >> Add and expose the backup group delete statistics by adding the >> return type to the corresponding REST API endpoints. >> >> Further, add a `error-on-protected` flag to the api endpoints, >> allowing to return without error when set to false. Default remains >> enabled. > > This first sounded like it should two patches, but they are related in the > sense that the other side needs both to differ between "backup went generally > fine" and "all was OK but could not deleted some snapshots that are protected". > > So please add that rationale, or a better one if I was off, to the commit > message. As while the "what" can be nice to have a summary without reading > all the code, the "why" is very important to see how it overall fits into the > picture (of a series but also the project as a whole) ? at lest for me that is. Okay, will extend this commit message with more reasoning on why to add the flag and who it is used by. From g.goller at proxmox.com Thu Nov 21 11:04:07 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 11:04:07 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <173210826421.198988.14774192201672116937@yuna.proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> <20241105104015.162094-2-g.goller@proxmox.com> <173210826421.198988.14774192201672116937@yuna.proxmox.com> Message-ID: On 20.11.2024 14:11, Fabian Gr?nbichler wrote: >a few small nits inline, looks good to me otherwise, but given the size of this >and the size of the push series, I'd rather this be rebased on top of the other >one ;) Sure, shouldn't be a lot of work. Should I send a rebased version on top of the current push series as a v4? >Quoting Gabriel Goller (2024-11-05 11:40:13) >> diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs >> index 414ec878d01a..e6174322dad6 100644 >> --- a/pbs-datastore/src/backup_info.rs >> +++ b/pbs-datastore/src/backup_info.rs >> @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; >> use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; >> >> use pbs_api_types::{ >> - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, >> + Authid, BackupNamespace, BackupType, GroupFilter, VerifyState, BACKUP_DATE_REGEX, >> + BACKUP_FILE_REGEX, >> }; >> use pbs_config::{open_backup_lockfile, BackupLockGuard}; >> >> @@ -583,6 +584,15 @@ impl BackupDir { >> >> Ok(()) >> } >> + >> + /// Load the verify state from the manifest. >> + pub fn verify_state(&self) -> Option { > >should this be a Result> to allow differentiation between no >verification state, and failure to parse or load the manifest? that would allow >us to resync totally corrupted snapshots as well (although that might be >considered out of scope based on the parameter description ;)) Yep it was already like this in the first version, no idea why I changed it. Like this we can return the load_manifest error with the Result and swallow the inner error with a `ok()` as it doesn't matter anymore. pub fn verify_state(&self) -> Result, anyhow::Error> { let manifest = self.load_manifest()?; Ok(manifest.0.verify_state().ok().flatten().map(|svs| svs.state)) } I think we also want to resync on errors when reading the manifest, I'll include that in the next version! Something like this maybe: match local_dir.verify_state() { Ok(Some(state)) => { if state == VerifyState::Failed { return Some((dir, true)); } }, Ok(None) => { // This means there either was an error parsing the manifest, or the // verify_state item was not found. This could be a new backup. } Err(_) => { // There was an error loading the manifest, probably better if we // resync. return Some((dir, true)); } } >> + if let Ok(manifest) = self.load_manifest() { >> + manifest.0.verify_state() >> + } else { >> + None >> + } >> + } >> } >> >> impl AsRef for BackupDir { >> diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs >> index c3df014272a0..623c1499c0bb 100644 >> --- a/pbs-datastore/src/manifest.rs >> +++ b/pbs-datastore/src/manifest.rs >> @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; >> use serde::{Deserialize, Serialize}; >> use serde_json::{json, Value}; >> >> -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; >> +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState, VerifyState}; >> use pbs_tools::crypt_config::CryptConfig; >> >> pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; >> @@ -242,6 +242,17 @@ impl BackupManifest { >> let manifest: BackupManifest = serde_json::from_value(json)?; >> Ok(manifest) >> } >> + >> + /// Get the verify state of the snapshot >> + /// >> + /// Note: New snapshots, which have not been verified yet, do not have a status and this >> + /// function will return `None`. >> + pub fn verify_state(&self) -> Option { > >should this be a Result> to allow differentiation between no >verification state, and failure to parse? Hmm so I could return a Result> by checking the error of the serde_json::from_value call. I could check if the "verify_state" value wasn't found in the manifest by calling `is_eof` [0] and if not, return a Ok(None), otherwise return an Error. This will make it more complicated for all the callers though ? also 99% of the callers will treat Err the same as Ok(None) anyways. LTMK what you think! /// Get the verify state of the snapshot /// /// Note: New snapshots, which have not been verified yet, do not have a status and this /// function will return `Ok(None)`. pub fn verify_state(&self) -> Result, anyhow::Error> { let verify = self.unprotected["verify_state"].clone(); match serde_json::from_value::(verify) { Err(err) => { // `verify_state` item has not been found if err.is_eof() { Ok(None) }else { Err(err.into()) } }, Ok(svs) => { Ok(Some(svs)) } } } Else I could just return a Result. [0]: https://docs.rs/serde_json/latest/serde_json/struct.Error.html#method.is_eof >also, it would be great if existing code retrieving this could be adapted to >use these new helpers, which would require having the Result there as well.. Yep, overlooked those, my bad. >> @@ -381,7 +388,7 @@ async fn pull_snapshot<'a>( >> let mut path = snapshot.full_path(); >> path.push(&item.filename); >> >> - if path.exists() { >> + if !corrupt && path.exists() { >> match ArchiveType::from_path(&item.filename)? { >> ArchiveType::DynamicIndex => { >> let index = DynamicIndexReader::open(&path)?; >> @@ -443,6 +450,7 @@ async fn pull_snapshot_from<'a>( >> reader: Arc, >> snapshot: &'a pbs_datastore::BackupDir, >> downloaded_chunks: Arc>>, >> + corrupt: bool, >> ) -> Result { >> let (_path, is_new, _snap_lock) = snapshot >> .datastore() >> @@ -451,7 +459,7 @@ async fn pull_snapshot_from<'a>( >> let sync_stats = if is_new { > >is_new and corrupt are never both true.. > >> info!("sync snapshot {}", snapshot.dir()); >> >> - match pull_snapshot(reader, snapshot, downloaded_chunks).await { >> + match pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await { > >so this should be always false ;) Agree, wrote a comment and passed directly `false`. >> Err(err) => { >> if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( >> snapshot.backup_ns(), >> @@ -468,8 +476,15 @@ async fn pull_snapshot_from<'a>( >> } >> } >> } else { >> - info!("re-sync snapshot {}", snapshot.dir()); >> - pull_snapshot(reader, snapshot, downloaded_chunks).await? >> + if corrupt { >> + info!( >> + "re-sync snapshot {} due to bad verification result", > >nit: why not call it "corrupt", since that is what the parameter is called? ack >> + snapshot.dir() >> + ); >> + } else { >> + info!("re-sync snapshot {}", snapshot.dir()); >> + } >> + pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await? >> }; >> >> Ok(sync_stats) From f.gruenbichler at proxmox.com Thu Nov 21 11:09:53 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Thu, 21 Nov 2024 11:09:53 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup v3 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: References: <20241105104015.162094-1-g.goller@proxmox.com> <20241105104015.162094-2-g.goller@proxmox.com> <173210826421.198988.14774192201672116937@yuna.proxmox.com> Message-ID: <2040736603.7366.1732183793621@webmail.proxmox.com> > Gabriel Goller hat am 21.11.2024 11:04 CET geschrieben: > > > On 20.11.2024 14:11, Fabian Gr?nbichler wrote: > >a few small nits inline, looks good to me otherwise, but given the size of this > >and the size of the push series, I'd rather this be rebased on top of the other > >one ;) > > Sure, shouldn't be a lot of work. Should I send a rebased version on top > of the current push series as a v4? yes, but please wait until it's applied (there have been a few changes queued on-top where I am not sure whether they might cause more conflicts ;)) > > >Quoting Gabriel Goller (2024-11-05 11:40:13) > >> diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs > >> index 414ec878d01a..e6174322dad6 100644 > >> --- a/pbs-datastore/src/backup_info.rs > >> +++ b/pbs-datastore/src/backup_info.rs > >> @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; > >> use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; > >> > >> use pbs_api_types::{ > >> - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, > >> + Authid, BackupNamespace, BackupType, GroupFilter, VerifyState, BACKUP_DATE_REGEX, > >> + BACKUP_FILE_REGEX, > >> }; > >> use pbs_config::{open_backup_lockfile, BackupLockGuard}; > >> > >> @@ -583,6 +584,15 @@ impl BackupDir { > >> > >> Ok(()) > >> } > >> + > >> + /// Load the verify state from the manifest. > >> + pub fn verify_state(&self) -> Option { > > > >should this be a Result> to allow differentiation between no > >verification state, and failure to parse or load the manifest? that would allow > >us to resync totally corrupted snapshots as well (although that might be > >considered out of scope based on the parameter description ;)) > > Yep it was already like this in the first version, no idea why I changed > it. Like this we can return the load_manifest error with the Result and > swallow the inner error with a `ok()` as it doesn't matter anymore. > > pub fn verify_state(&self) -> Result, anyhow::Error> { > let manifest = self.load_manifest()?; > Ok(manifest.0.verify_state().ok().flatten().map(|svs| svs.state)) > } > > > I think we also want to resync on errors when reading the manifest, I'll > include that in the next version! Something like this maybe: > > match local_dir.verify_state() { > Ok(Some(state)) => { > if state == VerifyState::Failed { > return Some((dir, true)); > } > }, > Ok(None) => { > // This means there either was an error parsing the manifest, or the > // verify_state item was not found. This could be a new backup. IMHO this should only be reached if no verification state is in the manifest (because no verification has happened yet), but the manifest was otherwise completely parseable. this can be treated the same as an okay verify state, since we can't know any better. > } > Err(_) => { > // There was an error loading the manifest, probably better if we > // resync. > return Some((dir, true)); > } > } > > >> + if let Ok(manifest) = self.load_manifest() { > >> + manifest.0.verify_state() > >> + } else { > >> + None > >> + } > >> + } > >> } > >> > >> impl AsRef for BackupDir { > >> diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs > >> index c3df014272a0..623c1499c0bb 100644 > >> --- a/pbs-datastore/src/manifest.rs > >> +++ b/pbs-datastore/src/manifest.rs > >> @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; > >> use serde::{Deserialize, Serialize}; > >> use serde_json::{json, Value}; > >> > >> -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; > >> +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState, VerifyState}; > >> use pbs_tools::crypt_config::CryptConfig; > >> > >> pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; > >> @@ -242,6 +242,17 @@ impl BackupManifest { > >> let manifest: BackupManifest = serde_json::from_value(json)?; > >> Ok(manifest) > >> } > >> + > >> + /// Get the verify state of the snapshot > >> + /// > >> + /// Note: New snapshots, which have not been verified yet, do not have a status and this > >> + /// function will return `None`. > >> + pub fn verify_state(&self) -> Option { > > > >should this be a Result> to allow differentiation between no > >verification state, and failure to parse? > > Hmm so I could return a Result> by checking the error of the > serde_json::from_value call. I could check if the "verify_state" value > wasn't found in the manifest by calling `is_eof` [0] and if not, return > a Ok(None), otherwise return an Error. This will make it more > complicated for all the callers though ? also 99% of the callers will > treat Err the same as Ok(None) anyways. LTMK what you think! > > /// Get the verify state of the snapshot > /// > /// Note: New snapshots, which have not been verified yet, do not have a status and this > /// function will return `Ok(None)`. > pub fn verify_state(&self) -> Result, anyhow::Error> { > let verify = self.unprotected["verify_state"].clone(); can't you just check here whether we have a value and return None otherwise? > match serde_json::from_value::(verify) { > Err(err) => { then this can just bubble up the error? > // `verify_state` item has not been found > if err.is_eof() { > Ok(None) > }else { > Err(err.into()) > } > }, > Ok(svs) => { > Ok(Some(svs)) > } > } > } > > > Else I could just return a Result. I think differentiating between Ok(Some(state)), Ok(None) and Err(err) is important here, so I'd rather not do that ;) > > [0]: https://docs.rs/serde_json/latest/serde_json/struct.Error.html#method.is_eof > > >also, it would be great if existing code retrieving this could be adapted to > >use these new helpers, which would require having the Result there as well.. > > Yep, overlooked those, my bad. > > >> @@ -381,7 +388,7 @@ async fn pull_snapshot<'a>( > >> let mut path = snapshot.full_path(); > >> path.push(&item.filename); > >> > >> - if path.exists() { > >> + if !corrupt && path.exists() { > >> match ArchiveType::from_path(&item.filename)? { > >> ArchiveType::DynamicIndex => { > >> let index = DynamicIndexReader::open(&path)?; > >> @@ -443,6 +450,7 @@ async fn pull_snapshot_from<'a>( > >> reader: Arc, > >> snapshot: &'a pbs_datastore::BackupDir, > >> downloaded_chunks: Arc>>, > >> + corrupt: bool, > >> ) -> Result { > >> let (_path, is_new, _snap_lock) = snapshot > >> .datastore() > >> @@ -451,7 +459,7 @@ async fn pull_snapshot_from<'a>( > >> let sync_stats = if is_new { > > > >is_new and corrupt are never both true.. > > > >> info!("sync snapshot {}", snapshot.dir()); > >> > >> - match pull_snapshot(reader, snapshot, downloaded_chunks).await { > >> + match pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await { > > > >so this should be always false ;) > > Agree, wrote a comment and passed directly `false`. > > >> Err(err) => { > >> if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( > >> snapshot.backup_ns(), > >> @@ -468,8 +476,15 @@ async fn pull_snapshot_from<'a>( > >> } > >> } > >> } else { > >> - info!("re-sync snapshot {}", snapshot.dir()); > >> - pull_snapshot(reader, snapshot, downloaded_chunks).await? > >> + if corrupt { > >> + info!( > >> + "re-sync snapshot {} due to bad verification result", > > > >nit: why not call it "corrupt", since that is what the parameter is called? > > ack > > >> + snapshot.dir() > >> + ); > >> + } else { > >> + info!("re-sync snapshot {}", snapshot.dir()); > >> + } > >> + pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await? > >> }; > >> > >> Ok(sync_stats) From g.goller at proxmox.com Thu Nov 21 11:18:18 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 11:18:18 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 2/3] fix #3786: ui/cli: add resync-corrupt option on sync-jobs In-Reply-To: <173210833764.198988.6832521006255309425@yuna.proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> <20241105104015.162094-3-g.goller@proxmox.com> <173210833764.198988.6832521006255309425@yuna.proxmox.com> Message-ID: On 20.11.2024 14:12, Fabian Gr?nbichler wrote: >nit: might look better moved to the right side of the window, but no hard >feelings either way Every "advanced options" popout of ours (at least pbs) currently aligns to left, so I wouldn't change that :) From g.goller at proxmox.com Thu Nov 21 11:30:52 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 11:30:52 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 1/3] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <2040736603.7366.1732183793621@webmail.proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> <20241105104015.162094-2-g.goller@proxmox.com> <173210826421.198988.14774192201672116937@yuna.proxmox.com> <2040736603.7366.1732183793621@webmail.proxmox.com> Message-ID: On 21.11.2024 11:09, Fabian Gr?nbichler wrote: >> Gabriel Goller hat am 21.11.2024 11:04 CET geschrieben: >> >> >> On 20.11.2024 14:11, Fabian Gr?nbichler wrote: >> >a few small nits inline, looks good to me otherwise, but given the size of this >> >and the size of the push series, I'd rather this be rebased on top of the other >> >one ;) >> >> Sure, shouldn't be a lot of work. Should I send a rebased version on top >> of the current push series as a v4? > >yes, but please wait until it's applied (there have been a few changes queued on-top where I am not sure whether they might cause more conflicts ;)) Ok, will wait with the next version until that series is applied! >> >Quoting Gabriel Goller (2024-11-05 11:40:13) >> >> diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs >> >> index 414ec878d01a..e6174322dad6 100644 >> >> --- a/pbs-datastore/src/backup_info.rs >> >> +++ b/pbs-datastore/src/backup_info.rs >> >> @@ -8,7 +8,8 @@ use anyhow::{bail, format_err, Error}; >> >> use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; >> >> >> >> use pbs_api_types::{ >> >> - Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, >> >> + Authid, BackupNamespace, BackupType, GroupFilter, VerifyState, BACKUP_DATE_REGEX, >> >> + BACKUP_FILE_REGEX, >> >> }; >> >> use pbs_config::{open_backup_lockfile, BackupLockGuard}; >> >> >> >> @@ -583,6 +584,15 @@ impl BackupDir { >> >> >> >> Ok(()) >> >> } >> >> + >> >> + /// Load the verify state from the manifest. >> >> + pub fn verify_state(&self) -> Option { >> > >> >should this be a Result> to allow differentiation between no >> >verification state, and failure to parse or load the manifest? that would allow >> >us to resync totally corrupted snapshots as well (although that might be >> >considered out of scope based on the parameter description ;)) >> >> Yep it was already like this in the first version, no idea why I changed >> it. Like this we can return the load_manifest error with the Result and >> swallow the inner error with a `ok()` as it doesn't matter anymore. >> >> pub fn verify_state(&self) -> Result, anyhow::Error> { >> let manifest = self.load_manifest()?; >> Ok(manifest.0.verify_state().ok().flatten().map(|svs| svs.state)) >> } >> >> >> I think we also want to resync on errors when reading the manifest, I'll >> include that in the next version! Something like this maybe: >> >> match local_dir.verify_state() { >> Ok(Some(state)) => { >> if state == VerifyState::Failed { >> return Some((dir, true)); >> } >> }, >> Ok(None) => { >> // This means there either was an error parsing the manifest, or the >> // verify_state item was not found. This could be a new backup. > >IMHO this should only be reached if no verification state is in the manifest (because no verification has happened yet), but the manifest was otherwise completely parseable. this can be treated the same as an okay verify state, since we can't know any better. Oh, right. >> } >> Err(_) => { >> // There was an error loading the manifest, probably better if we >> // resync. >> return Some((dir, true)); >> } >> } >> >> >> + if let Ok(manifest) = self.load_manifest() { >> >> + manifest.0.verify_state() >> >> + } else { >> >> + None >> >> + } >> >> + } >> >> } >> >> >> >> impl AsRef for BackupDir { >> >> diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs >> >> index c3df014272a0..623c1499c0bb 100644 >> >> --- a/pbs-datastore/src/manifest.rs >> >> +++ b/pbs-datastore/src/manifest.rs >> >> @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; >> >> use serde::{Deserialize, Serialize}; >> >> use serde_json::{json, Value}; >> >> >> >> -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; >> >> +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState, VerifyState}; >> >> use pbs_tools::crypt_config::CryptConfig; >> >> >> >> pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; >> >> @@ -242,6 +242,17 @@ impl BackupManifest { >> >> let manifest: BackupManifest = serde_json::from_value(json)?; >> >> Ok(manifest) >> >> } >> >> + >> >> + /// Get the verify state of the snapshot >> >> + /// >> >> + /// Note: New snapshots, which have not been verified yet, do not have a status and this >> >> + /// function will return `None`. >> >> + pub fn verify_state(&self) -> Option { >> > >> >should this be a Result> to allow differentiation between no >> >verification state, and failure to parse? >> >> Hmm so I could return a Result> by checking the error of the >> serde_json::from_value call. I could check if the "verify_state" value >> wasn't found in the manifest by calling `is_eof` [0] and if not, return >> a Ok(None), otherwise return an Error. This will make it more >> complicated for all the callers though ? also 99% of the callers will >> treat Err the same as Ok(None) anyways. LTMK what you think! >> >> /// Get the verify state of the snapshot >> /// >> /// Note: New snapshots, which have not been verified yet, do not have a status and this >> /// function will return `Ok(None)`. >> pub fn verify_state(&self) -> Result, anyhow::Error> { >> let verify = self.unprotected["verify_state"].clone(); > >can't you just check here whether we have a value and return None otherwise? Yep, I can check with `value.is_null()`. >> match serde_json::from_value::(verify) { >> Err(err) => { > >then this can just bubble up the error? ack >> // `verify_state` item has not been found >> if err.is_eof() { >> Ok(None) >> }else { >> Err(err.into()) >> } >> }, >> Ok(svs) => { >> Ok(Some(svs)) >> } >> } >> } >> >> >> Else I could just return a Result. > >I think differentiating between Ok(Some(state)), Ok(None) and Err(err) is important here, so I'd rather not do that ;) ack. Thanks for the review! From f.gruenbichler at proxmox.com Thu Nov 21 13:08:09 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 13:08:09 +0100 Subject: [pbs-devel] applied-series: [PATCH v7 proxmox-backup 00/31] fix #3044: push datastore to remote target In-Reply-To: <20241111154353.482734-1-c.ebner@proxmox.com> References: <20241111154353.482734-1-c.ebner@proxmox.com> Message-ID: <1732190636.gvupqrayxw.astroid@yuna.none> with some follow-ups, notably: - dropped the named features, checking just the api version instead - simplified the namespace filtering - added a new datastore_api_path helper - add a check whether the source namespace anchor exists - simplify the snapshot selection for remove_vanished - reduce the (expensive) queries to the remote snapshot lists - some code simplification and style cleanups the error handling/contexts could still benefit from some attention (mostly making clear where an error from the remote side is bubbled up, since the ACL paths are confusing if that context is not included), but that can be done as follow-up.. full diff of changes on top of the series as it is on-list: diff --git a/Cargo.toml b/Cargo.toml index 2fa1f04bf..6c9bf878c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace.package] -version = "3.2.10" +version = "3.2.11" authors = [ "Dietmar Maurer ", "Dominik Csapak ", diff --git a/debian/changelog b/debian/changelog index d200b063c..b69ac905d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,11 @@ +rust-proxmox-backup (3.2.11-1) bookworm; urgency=medium + + * fix #3044: server: implement push support for sync operations + + * push sync related refactors + + -- Proxmox Support Team Thu, 21 Nov 2024 12:03:50 +0100 + rust-proxmox-backup (3.2.10-1) bookworm; urgency=medium * api: disk list: do not fail but just log error on gathering smart data diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs index 7a4c6cb74..80f87e372 100644 --- a/pbs-api-types/src/version.rs +++ b/pbs-api-types/src/version.rs @@ -20,14 +20,6 @@ use proxmox_schema::api; description: "Version repository id", type: String, }, - "features": { - description: "List of supported features", - type: Array, - items: { - type: String, - description: "Feature id", - }, - }, } )] #[derive(serde::Deserialize, serde::Serialize)] @@ -35,8 +27,6 @@ pub struct ApiVersionInfo { pub version: String, pub release: String, pub repoid: String, - #[serde(default, skip_serializing_if = "Vec::is_empty")] - pub features: Vec, } pub type ApiVersionMajor = u64; @@ -48,7 +38,6 @@ pub struct ApiVersion { pub minor: ApiVersionMinor, pub release: ApiVersionRelease, pub repoid: String, - pub features: Vec, } impl TryFrom for ApiVersion { @@ -76,13 +65,6 @@ impl TryFrom for ApiVersion { minor, release, repoid: value.repoid.clone(), - features: value.features.clone(), }) } } - -impl ApiVersion { - pub fn supports_feature(&self, feature: &str) -> bool { - self.features.iter().any(|f| f == feature) - } -} diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 97d599e30..78eb73205 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -76,24 +76,31 @@ pub fn check_sync_job_modify_access( match sync_direction { SyncDirection::Pull => { let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); - if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 - || ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 - { + + // job visibility check + if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 { + return false; + } + + // creating backups on target check + if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 { return false; } if let Some(true) = job.remove_vanished { + // pruning backups on target check if ns_anchor_privs & PRIV_DATASTORE_PRUNE == 0 { return false; } } - // same permission as changing ownership after syncing + // same permission as changing ownership after syncing on the target side if !is_correct_owner(auth_id, job) && ns_anchor_privs & PRIV_DATASTORE_MODIFY == 0 { return false; } if let Some(remote) = &job.remote { + // remote read access check let remote_privs = user_info.lookup_privs(auth_id, &["remote", remote, &job.remote_store]); return remote_privs & PRIV_REMOTE_READ != 0; @@ -127,13 +134,13 @@ pub fn check_sync_job_modify_access( return true; } - // check user is not the owner of the sync job, but has datastore modify permissions, - // which implies permissions to change group ownership + // check datastore modify permission if user is not the owner of the sync job + // this implies permissions to change group ownership if !is_correct_owner(auth_id, job) && source_privs & PRIV_DATASTORE_MODIFY == 0 { return false; } - // user has Datastore.Modify, check also for Datastore.Backup to allow modify access + // no read on full datastore, so check backup access for owned backups source_privs & PRIV_DATASTORE_BACKUP != 0 } } diff --git a/src/api2/version.rs b/src/api2/version.rs index da2cb74b4..4d104f2d6 100644 --- a/src/api2/version.rs +++ b/src/api2/version.rs @@ -8,8 +8,6 @@ use proxmox_schema::api; use pbs_api_types::ApiVersionInfo; -const FEATURES: &[&str] = &["prune-delete-stats"]; - #[api( returns: { type: ApiVersionInfo, @@ -28,7 +26,6 @@ fn version( version: pbs_buildcfg::PROXMOX_PKG_VERSION.to_string(), release: pbs_buildcfg::PROXMOX_PKG_RELEASE.to_string(), repoid: pbs_buildcfg::PROXMOX_PKG_REPOID.to_string(), - features: FEATURES.iter().map(|feature| feature.to_string()).collect(), }) } diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index f91d5bf29..d887dc1d5 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -647,8 +647,10 @@ async fn run() -> Result<(), Error> { CliCommand::new(&API_METHOD_PUSH_DATASTORE) .arg_param(&["store", "remote", "remote-store"]) .completion_cb("store", pbs_config::datastore::complete_datastore_name) + .completion_cb("ns", complete_sync_local_datastore_namespace) .completion_cb("remote", pbs_config::remote::complete_remote_name) - .completion_cb("remote-store", complete_remote_datastore_name), + .completion_cb("remote-store", complete_remote_datastore_name) + .completion_cb("remote-ns", complete_remote_datastore_namespace), ) .insert( "verify", diff --git a/src/server/pull.rs b/src/server/pull.rs index e00187764..08b55956c 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -739,15 +739,9 @@ pub(crate) async fn pull_store(mut params: PullParameters) -> Result Authid { self.remote.config.auth_id.clone() } + + fn datastore_api_path(&self, endpoint: &str) -> String { + format!( + "api2/json/admin/datastore/{store}/{endpoint}", + store = self.repo.store() + ) + } } /// Parameters for a push operation @@ -92,11 +99,16 @@ impl PushParameters { remote_ns.check_max_depth(max_depth)?; }; let remove_vanished = remove_vanished.unwrap_or(false); + let store = DataStore::lookup_datastore(store, Some(Operation::Read))?; + + if !store.namespace_exists(&ns) { + bail!( + "Source namespace '{ns}' doesn't exist in datastore '{store}'!", + store = store.name() + ); + } - let source = Arc::new(LocalSource { - store: DataStore::lookup_datastore(store, Some(Operation::Read))?, - ns, - }); + let source = Arc::new(LocalSource { store, ns }); let (remote_config, _digest) = pbs_config::remote::config()?; let remote: Remote = remote_config.lookup("remote", remote_id)?; @@ -114,11 +126,15 @@ impl PushParameters { let data = result["data"].take(); let version_info: ApiVersionInfo = serde_json::from_value(data)?; let api_version = ApiVersion::try_from(version_info)?; - let supports_prune_delete_stats = api_version.supports_feature("prune-delete-stats"); + + // push assumes namespace support on the remote side, fail early if missing if api_version.major < 2 || (api_version.major == 2 && api_version.minor < 2) { bail!("unsupported remote api version, minimum v2.2 required"); } + let supports_prune_delete_stats = api_version.major > 3 + || (api_version.major == 3 && api_version.minor >= 2 && api_version.release >= 11); + let target = PushTarget { remote, repo, @@ -163,10 +179,7 @@ fn check_ns_remote_datastore_privs( // Fetch the list of namespaces found on target async fn fetch_target_namespaces(params: &PushParameters) -> Result, Error> { - let api_path = format!( - "api2/json/admin/datastore/{store}/namespace", - store = params.target.repo.store(), - ); + let api_path = params.target.datastore_api_path("namespace"); let mut result = params.target.client.get(&api_path, None).await?; let namespaces: Vec = serde_json::from_value(result["data"].take())?; let mut namespaces: Vec = namespaces @@ -188,12 +201,9 @@ async fn remove_target_namespace( } check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) - .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; + .map_err(|err| format_err!("Pruning remote datastore namespaces not allowed - {err}"))?; - let api_path = format!( - "api2/json/admin/datastore/{store}/namespace", - store = params.target.repo.store(), - ); + let api_path = params.target.datastore_api_path("namespace"); let mut args = serde_json::json!({ "ns": target_namespace.name(), @@ -222,25 +232,20 @@ async fn fetch_target_groups( params: &PushParameters, target_namespace: &BackupNamespace, ) -> Result<(Vec, HashSet), Error> { - let api_path = format!( - "api2/json/admin/datastore/{store}/groups", - store = params.target.repo.store(), - ); + let api_path = params.target.datastore_api_path("groups"); let args = Some(serde_json::json!({ "ns": target_namespace.name() })); let mut result = params.target.client.get(&api_path, args).await?; let groups: Vec = serde_json::from_value(result["data"].take())?; - let (mut owned, not_owned) = groups.iter().fold( + let (mut owned, not_owned) = groups.into_iter().fold( (Vec::new(), HashSet::new()), |(mut owned, mut not_owned), group| { - if let Some(ref owner) = group.owner { - if params.target.remote_user() == *owner { - owned.push(group.backup.clone()); - return (owned, not_owned); - } + if Some(params.target.remote_user()) == group.owner { + owned.push(group.backup); + } else { + not_owned.insert(group.backup); } - not_owned.insert(group.backup.clone()); (owned, not_owned) }, ); @@ -259,22 +264,15 @@ async fn remove_target_group( check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; - let api_path = format!( - "api2/json/admin/datastore/{store}/groups", - store = params.target.repo.store(), - ); + let api_path = params.target.datastore_api_path("groups"); - let mut args = serde_json::json!({ - "backup-id": backup_group.id, - "backup-type": backup_group.ty, - }); + let mut args = serde_json::json!(backup_group); + args["ns"] = serde_json::to_value(target_namespace.name())?; if params.target.supports_prune_delete_stats { args["error-on-protected"] = serde_json::to_value(false)?; } - args["ns"] = serde_json::to_value(target_namespace.name())?; - let mut result = params.target.client.delete(&api_path, Some(args)).await?; if params.target.supports_prune_delete_stats { @@ -289,10 +287,10 @@ async fn remove_target_group( // Check if the namespace is already present on the target, create it otherwise async fn check_or_create_target_namespace( params: &PushParameters, - target_namespaces: &[BackupNamespace], + existing_target_namespaces: &mut Vec, target_namespace: &BackupNamespace, ) -> Result<(), Error> { - if !target_namespace.is_root() && !target_namespaces.contains(target_namespace) { + if !target_namespace.is_root() && !existing_target_namespaces.contains(target_namespace) { // Namespace not present on target, create namespace. // Sub-namespaces have to be created by creating parent components first. @@ -303,25 +301,22 @@ async fn check_or_create_target_namespace( for component in target_namespace.components() { let current = BackupNamespace::from_parent_ns(&parent, component.to_string())?; // Skip over pre-existing parent namespaces on target - if target_namespaces.contains(¤t) { + if existing_target_namespaces.contains(¤t) { parent = current; continue; } - let api_path = format!( - "api2/json/admin/datastore/{store}/namespace", - store = params.target.repo.store(), - ); + let api_path = params.target.datastore_api_path("namespace"); let mut args = serde_json::json!({ "name": component.to_string() }); if !parent.is_root() { args["parent"] = serde_json::to_value(parent.clone())?; } - let target_store_and_ns = print_store_and_ns(params.target.repo.store(), ¤t); match params.target.client.post(&api_path, Some(args)).await { - Ok(_) => info!("Created new namespace on target: {target_store_and_ns}"), - Err(err) => bail!( - "Sync into {target_store_and_ns} failed - namespace creation failed: {err}" - ), + Ok(_) => info!("Created new namespace on target: {current}"), + Err(err) => { + bail!("Remote creation of namespace {current} failed, remote returned: {err}") + } } + existing_target_namespaces.push(current.clone()); parent = current; } } @@ -334,38 +329,40 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result Result { errors |= sync_errors; stats.add(sync_stats); @@ -390,10 +392,10 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result Result { errors = true; - info!("Encountered errors while syncing namespace {namespace} - {err}"); + info!("Encountered errors while syncing namespace {source_namespace} - {err}"); } } } @@ -411,23 +413,20 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result = existing_target_namespaces + .into_iter() + .filter(|target_namespace| { + params + .target + .ns + .contains(&target_namespace) + .map(|sub_depth| sub_depth <= max_depth) + .unwrap_or(false) + }) + .collect(); // Sort by namespace length and revert for sub-namespaces to be removed before parents target_sub_namespaces.sort_unstable_by_key(|a| a.name_len()); @@ -577,10 +576,7 @@ async fn fetch_target_snapshots( target_namespace: &BackupNamespace, group: &BackupGroup, ) -> Result, Error> { - let api_path = format!( - "api2/json/admin/datastore/{store}/snapshots", - store = params.target.repo.store(), - ); + let api_path = params.target.datastore_api_path("snapshots"); let mut args = serde_json::to_value(group)?; if !target_namespace.is_root() { args["ns"] = serde_json::to_value(target_namespace)?; @@ -591,16 +587,6 @@ async fn fetch_target_snapshots( Ok(snapshots) } -async fn fetch_previous_backup_time( - params: &PushParameters, - target_namespace: &BackupNamespace, - group: &BackupGroup, -) -> Result, Error> { - let mut snapshots = fetch_target_snapshots(params, target_namespace, group).await?; - snapshots.sort_unstable_by(|a, b| a.backup.time.cmp(&b.backup.time)); - Ok(snapshots.last().map(|snapshot| snapshot.backup.time)) -} - async fn forget_target_snapshot( params: &PushParameters, target_namespace: &BackupNamespace, @@ -609,10 +595,7 @@ async fn forget_target_snapshot( check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; - let api_path = format!( - "api2/json/admin/datastore/{store}/snapshots", - store = params.target.repo.store(), - ); + let api_path = params.target.datastore_api_path("snapshots"); let mut args = serde_json::to_value(snapshot)?; if !target_namespace.is_root() { args["ns"] = serde_json::to_value(target_namespace)?; @@ -650,8 +633,12 @@ pub(crate) async fn push_group( .unwrap_or_default(); let target_namespace = params.map_to_target(namespace)?; - let last_snapshot_time = fetch_previous_backup_time(params, &target_namespace, group) - .await? + let mut target_snapshots = fetch_target_snapshots(params, &target_namespace, group).await?; + target_snapshots.sort_unstable_by_key(|a| a.backup.time); + + let last_snapshot_time = target_snapshots + .last() + .map(|snapshot| snapshot.backup.time) .unwrap_or(i64::MIN); let mut source_snapshots = HashSet::new(); @@ -684,20 +671,9 @@ pub(crate) async fn push_group( progress.group_snapshots = snapshots.len() as u64; - let target_snapshots = fetch_target_snapshots(params, &target_namespace, group).await?; - let target_snapshots: Vec = target_snapshots - .into_iter() - .map(|snapshot| snapshot.backup) - .collect(); - let mut stats = SyncStats::default(); let mut fetch_previous_manifest = !target_snapshots.is_empty(); for (pos, source_snapshot) in snapshots.into_iter().enumerate() { - if target_snapshots.contains(&source_snapshot) { - progress.done_snapshots = pos as u64 + 1; - info!("percentage done: {progress}"); - continue; - } let result = push_snapshot(params, namespace, &source_snapshot, fetch_previous_manifest).await; fetch_previous_manifest = true; @@ -711,7 +687,6 @@ pub(crate) async fn push_group( } if params.remove_vanished { - let target_snapshots = fetch_target_snapshots(params, &target_namespace, group).await?; for snapshot in target_snapshots { if source_snapshots.contains(&snapshot.backup.time) { continue; @@ -814,7 +789,7 @@ pub(crate) async fn push_snapshot( }; // Avoid double upload penalty by remembering already seen chunks - let known_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024 * 1024))); + let known_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64 * 1024))); for entry in source_manifest.files() { let mut path = backup_dir.full_path(); @@ -882,7 +857,7 @@ pub(crate) async fn push_snapshot( } } } else { - warn!("{path:?} does not exist, skipped."); + bail!("{path:?} does not exist, aborting upload."); } } diff --git a/src/server/sync.rs b/src/server/sync.rs index 4ce0777bf..a0157ab2d 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -21,7 +21,6 @@ use pbs_api_types::{ SyncDirection, SyncJobConfig, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, }; use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; -use pbs_config::CachedUserInfo; use pbs_datastore::data_blob::DataBlob; use pbs_datastore::manifest::CLIENT_LOG_BLOB_NAME; use pbs_datastore::read_chunk::AsyncReadChunk; @@ -215,8 +214,7 @@ impl SyncSourceReader for LocalSourceReader { } } -pub type NamespaceFilter = - Box bool + Send>; +pub type NamespaceFilter = Box bool + Send>; #[async_trait::async_trait] /// `SyncSource` is a trait that provides an interface for synchronizing data/information from a @@ -228,8 +226,6 @@ pub(crate) trait SyncSource: Send + Sync { async fn list_namespaces( &self, max_depth: &mut Option, - auth_id: &Authid, - user_info: &CachedUserInfo, filter_callback: NamespaceFilter, ) -> Result, Error>; @@ -273,9 +269,7 @@ impl SyncSource for RemoteSource { async fn list_namespaces( &self, max_depth: &mut Option, - auth_id: &Authid, - user_info: &CachedUserInfo, - mut filter_callback: NamespaceFilter, + filter_callback: NamespaceFilter, ) -> Result, Error> { if self.ns.is_root() && max_depth.map_or(false, |depth| depth == 0) { return Ok(vec![self.ns.clone()]); @@ -323,10 +317,7 @@ impl SyncSource for RemoteSource { .map(|list_item| list_item.ns) .collect(); - let list = list - .into_iter() - .filter(|namespace| filter_callback((namespace, self.get_store(), auth_id, user_info))) - .collect(); + let list = list.into_iter().filter(filter_callback).collect(); Ok(list) } @@ -421,9 +412,7 @@ impl SyncSource for LocalSource { async fn list_namespaces( &self, max_depth: &mut Option, - auth_id: &Authid, - user_info: &CachedUserInfo, - mut filter_callback: NamespaceFilter, + filter_callback: NamespaceFilter, ) -> Result, Error> { let list: Result, Error> = ListNamespacesRecursive::new_max_depth( self.store.clone(), @@ -432,10 +421,7 @@ impl SyncSource for LocalSource { )? .collect(); - let list = list? - .into_iter() - .filter(|namespace| filter_callback((namespace, self.get_store(), auth_id, user_info))) - .collect(); + let list = list?.into_iter().filter(filter_callback).collect(); Ok(list) } On November 11, 2024 4:43 pm, Christian Ebner wrote: > This patch series implements the functionality to extend the current > sync jobs in pull direction by an additional push direction, allowing > to push contents of a local source datastore to a remote target. > > The series implements this by using the REST API of the remote target > for fetching, creating and/or deleting namespaces, groups and backups, > and reuses the clients backup writer functionality to create snapshots > by writing a manifeset on the remote target and sync the fixed index, > dynamic index or blobs contained in the source manifest to the remote, > preserving also encryption information. > > Thanks to Fabian for further feedback to the previous patch series > version. > > Changes since version 6 of the patch series: > - Fix permission check for sync job modify access, correctly check local > datastore access if job not owned by sync user. > - Pre-filter source namespaces, so namespaces which the sync user has no > access to cannot be leaked. > - Avoid possibly removing unrelated target namespaces during remove > vanished by only removing sub-namespaces of the remote target namespace. > - Fix issues with local/target namespace mapping, make clear which are > which by adapting variable names accordingly. > - Adapt roles related to remote datastore access to mimic roles for > local datastore access. > - Uncoditionally pass namespace parameter and early check and fail if > remote does not support namespaces. > - Fetch previous snapshots index to initialize known chunks correctly. > - Adapt snapshot filter for excluding snapshots older than current last > snapshot already present on target. > - Fix incorrect owner header label in sync job grid for push direction. > - Use `BackupGroup`s `cmp::Ord` for sorting, for pull and push > - Update some comments and docs. > > Changes since version 5 of the patch series: > - Split roles and permissions for separate remote datastore prune and remote > datastoe modify roles. > - Fetch target groups filtered by ownership, so to not try to push or remove > unowned groups. > - Add documentation, highlight the caveats of conflicting push jobs when using > shared remotes. > - Check also for optional `PRIV_DATASTORE_BACKUP` as opposed to only > `PRIV_DATASTORE_READ` on source datastore namespace, that user can read the > contents from there as well. > - Drop `sync-direction` parameter from API endpoints where not needed, determine > it from the corresponding jobs configuration instead. > - Adapt layout of split job view in WebUI to use more general, less component > specific values > - Introduce `remote_acl_path` helpers for `BackupNamespace` and `SyncJobConfig`. > - Refactor upload counters to bundle and update counters by chunk variant. > - Rework `version` endpoint and supported api feature check to be based on > `supported_features` rather than a hardcoded version, allowing for more > flexibility. > - `PushParameters` now always have the remote version for check stored > unconditionally. > - Renamed `igonre-protected` to a less misinterpretable `error-on-protected` and > inverted boolean logic. > - Squashed and reorderd patches, the delete stats are not followup patches as > they are now fully backwards compatible. > > Changes since version 4 of the patch series: > - Rebased onto current master > > Most notable changes since version 3 of the patch series include: > - Rework access control permission checks to resemble the pull based > logic more closely. > In order to perform a full sync in push direction, including > permissions for pruning contents with remove vansished, a acl.cfg > looks like below: > ``` > acl:1:/datastore/source-store:syncoperator at pbs:DatastoreReader > acl:1:/remote:syncoperator at pbs:RemoteAudit > acl:1:/remote/remote-target/target-store:syncoperator at pbs:RemoteDatastorePrune,RemoteSyncPushOperator > ``` > - Modify access to sync jobs now requires `DatastoreAudit` for both, > pull and push sync jobs > - Fix previously incorrect privs required for removing target > namespaces > - Fix performance bottleneck by not reading known chunks from source, > by sending `MergedChunkInfo` instead of `ChunkInfo` over to the > upload stream > - Factor upload statistic counters and structs out into their own > module and provide methods for easy conversion > - Implement `map_to_target` helper for easier/more readable source to > target mapping for namespaces > - Optimize namespace creation on target, only try creating non > pre-existing namespace components. > - Avoid temp file for manifest and upload source manifest directly > - Not failing on deletion for protected snapshots is now opt-in > - Refactor api endpoint `version` in order to be able to fetch api > version for target > - Reworked `SyncDirection` api type, use `api` macro to reduce code > > Most notable changes since version 2 of the patch series include: > - Add checks and extend roles and privs to allow for restricting a local > users access to remote datastore operations. In order to perform a > full sync in push direction, including permissions for namespace > creation and deleting contents with remove vansished, a acl.cfg looks > like below: > ``` > acl:1:/datastore/datastore:syncoperator at pbs:DatastoreAudit > acl:1:/remote:syncoperator at pbs:RemoteSyncOperator > acl:1:/remote/local/pushme:syncoperator at pbs:RemoteDatastoreModify,RemoteDatastorePrune,RemoteSyncPushOperator > ``` > Based on further feedback, privs might get further grouped or an > additional role containing most of these can be created. > - Drop patch introducing `no-timestamp-check` flag for backup client, as pointed > out by Fabian this is not needed, as only backups newer than the currently > last available will be pushed. > - Fix read snapshots from source by using the correct namespace. > - Rename PullParameters `owner` to more fitting `local_user`. > - Fix typos in remote sync push operator comment. > - Fix comments not matching the functionality for the cli implementations. > > Link to issue on bugtracker: > https://bugzilla.proxmox.com/show_bug.cgi?id=3044 > > Christian Ebner (31): > sync: pull: optimize backup group sorting > sync: extend sync source's list namespaces method by filter callback > client: backup writer: refactor backup and upload stats counters > client: backup writer: factor out merged chunk stream upload > client: backup writer: allow push uploading index and chunks > config: acl: refactor acl path component check for datastore > config: acl: allow namespace components for remote datastores > api types: add remote acl path method for `BackupNamespace` > api types: implement remote acl path method for sync job > api types: define remote permissions and roles for push sync > datastore: move `BackupGroupDeleteStats` to api types > api types: implement api type for `BackupGroupDeleteStats` > datastore: increment deleted group counter when removing group > api/api-types: refactor api endpoint version, add api types > fix #3044: server: implement push support for sync operations > api types/config: add `sync-push` config type for push sync jobs > api: push: implement endpoint for sync in push direction > api: sync: move sync job invocation to server sync module > api: config: Require PRIV_DATASTORE_AUDIT to modify sync job > api: config: factor out sync job owner check > api: sync jobs: expose optional `sync-direction` parameter > api: admin: avoid duplicate name for list sync jobs api method > bin: manager: add datastore push cli command > ui: group filter: allow to set namespace for local datastore > ui: sync edit: source group filters based on sync direction > ui: add view with separate grids for pull and push sync jobs > ui: sync job: adapt edit window to be used for pull and push > ui: sync view: set proxy on view instead of model > api: datastore/namespace: return backup groups delete stats on remove > api: version: add 'prune-delete-stats' as supported feature > docs: add section for sync jobs in push direction > > docs/managing-remotes.rst | 40 + > pbs-api-types/src/acl.rs | 38 + > pbs-api-types/src/datastore.rs | 76 +- > pbs-api-types/src/jobs.rs | 46 ++ > pbs-api-types/src/lib.rs | 3 + > pbs-api-types/src/version.rs | 88 +++ > pbs-client/src/backup_stats.rs | 119 +++ > pbs-client/src/backup_writer.rs | 318 +++++--- > pbs-client/src/inject_reused_chunks.rs | 14 +- > pbs-client/src/lib.rs | 4 + > pbs-config/src/acl.rs | 11 +- > pbs-config/src/sync.rs | 16 +- > pbs-datastore/src/backup_info.rs | 34 +- > pbs-datastore/src/datastore.rs | 27 +- > src/api2/admin/datastore.rs | 29 +- > src/api2/admin/namespace.rs | 31 +- > src/api2/admin/sync.rs | 43 +- > src/api2/config/datastore.rs | 15 +- > src/api2/config/notifications/mod.rs | 21 +- > src/api2/config/sync.rs | 296 ++++++-- > src/api2/mod.rs | 2 + > src/api2/pull.rs | 108 --- > src/api2/push.rs | 175 +++++ > src/api2/version.rs | 42 +- > src/bin/proxmox-backup-manager.rs | 216 ++++-- > src/bin/proxmox-backup-proxy.rs | 24 +- > src/server/mod.rs | 2 + > src/server/pull.rs | 33 +- > src/server/push.rs | 994 +++++++++++++++++++++++++ > src/server/sync.rs | 179 ++++- > www/Makefile | 1 + > www/config/SyncPullPushView.js | 61 ++ > www/config/SyncView.js | 29 +- > www/datastore/DataStoreList.js | 2 +- > www/datastore/Panel.js | 2 +- > www/form/GroupFilter.js | 21 +- > www/window/SyncJobEdit.js | 49 +- > 37 files changed, 2694 insertions(+), 515 deletions(-) > create mode 100644 pbs-api-types/src/version.rs > create mode 100644 pbs-client/src/backup_stats.rs > create mode 100644 src/api2/push.rs > create mode 100644 src/server/push.rs > create mode 100644 www/config/SyncPullPushView.js > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Thu Nov 21 13:25:15 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 13:25:15 +0100 Subject: [pbs-devel] applied-series: [PATCH v2 proxmox-backup 0/5] fix #5853: ignore stale files In-Reply-To: <20241114144114.375987-1-c.ebner@proxmox.com> References: <20241114144114.375987-1-c.ebner@proxmox.com> Message-ID: <1732191900.4xn1zu55fm.astroid@yuna.none> thanks! this version looks much cleaner now :) On November 14, 2024 3:41 pm, Christian Ebner wrote: > When files and their associated metadata get invalidated, I/O > operations on network filesystems return ESTALE to indicate that the > filehandle does not reference a valid file anymore. > > Currently, the proxmox-backup-client does not cover such cases, it > will fail with a hard error when a stale file handle is encountered. > Any concurrent operation invalidating file handles has the potential > to lead to the backups failing if timed accordingly. For local > filesystems this is not an issue, as the file remains accessible > until the file handle is closed. > > Make the backup client more resilient by handling the ESTALE errors > gracefully, warning the user about the vanished/invalidated files, > while generating a valid and consistent backup archive nevertheless. > > Changes since version 1: > - Avoid tuples in return values by downcasting anyhow::Error to Errno > when latter is required > - Add report stale file handle helper > - Refactor report vanished/changed file helpers > > Christian Ebner (5): > client: pxar: refactor report vanished/changed helpers > client: pxar: skip directories on stale file handle > client: pxar: skip directory entries on stale file handle > client: pxar: warn user and ignore stale file handles on file open > fix #5853: client: pxar: exclude stale files on metadata/link read > > pbs-client/src/pxar/create.rs | 94 +++++++++++++++++++++++++---------- > 1 file changed, 69 insertions(+), 25 deletions(-) > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Thu Nov 21 13:26:40 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 13:26:40 +0100 Subject: [pbs-devel] applied-series: [PATCH v7 proxmox-backup 00/31] fix #3044: push datastore to remote target In-Reply-To: <1732190636.gvupqrayxw.astroid@yuna.none> References: <20241111154353.482734-1-c.ebner@proxmox.com> <1732190636.gvupqrayxw.astroid@yuna.none> Message-ID: <3463fa78-61bd-49aa-96bb-36f376981767@proxmox.com> On 11/21/24 13:08, Fabian Gr?nbichler wrote: > with some follow-ups, notably: > > - dropped the named features, checking just the api version instead > - simplified the namespace filtering > - added a new datastore_api_path helper > - add a check whether the source namespace anchor exists > - simplify the snapshot selection for remove_vanished > - reduce the (expensive) queries to the remote snapshot lists > - some code simplification and style cleanups > > the error handling/contexts could still benefit from some attention > (mostly making clear where an error from the remote side is bubbled up, > since the ACL paths are confusing if that context is not included), but > that can be done as follow-up.. Acked, thanks a lot for all your input on this series! Will look at the suggested improved error handling right away. From f.gruenbichler at proxmox.com Thu Nov 21 14:19:33 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 14:19:33 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 06/26] datastore: add helper for checking if a datastore is mounted In-Reply-To: <20241113150102.164820-7-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-7-h.laimer@proxmox.com> Message-ID: <1732193726.vmgtl8o1rq.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > ... at a specific location. This is removable datastore specific so it > takes both a uuid and mount location. > > Co-authored-by: Wolfgang Bumiller > Signed-off-by: Hannes Laimer > --- > changes since v12: > * clearify documentation > * make function more removable datastore specific to remove ambiguity > about what it does and what it is meant for > * only use for removable datastore > > pbs-api-types/src/maintenance.rs | 2 + > pbs-datastore/src/datastore.rs | 73 +++++++++++++++++++++++++++++ > pbs-datastore/src/lib.rs | 2 +- > src/server/metric_collection/mod.rs | 10 ++++ > 4 files changed, 86 insertions(+), 1 deletion(-) > > diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs > index fd4d3416..9f51292e 100644 > --- a/pbs-api-types/src/maintenance.rs > +++ b/pbs-api-types/src/maintenance.rs > @@ -82,6 +82,8 @@ impl MaintenanceMode { > /// task finishes, so all open files are closed. > pub fn is_offline(&self) -> bool { > self.ty == MaintenanceType::Offline > + || self.ty == MaintenanceType::Unmount > + || self.ty == MaintenanceType::Delete > } > this part here doesn't really belong into this commit (it's not part of the helper mentioned above). the comment and the (new) contents also don't match - if the MaintenanceType is Delete (i.e., the datastore contents are currently being deleted) then all open files can't be already closed? same for Unmount - if it is currently being unmounted, there might still be references open.. I think this should rather be explicit -> at the end of unmounting/deletion, remove from cache? or this helper should be renamed and the comment adapted, if it is actually not "is the datastore offline" but "is it a removal candidate once all tasks have exited".. > pub fn check(&self, operation: Option) -> Result<(), Error> { > diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs > index fb37bd5a..cadf9245 100644 > --- a/pbs-datastore/src/datastore.rs > +++ b/pbs-datastore/src/datastore.rs > @@ -1,5 +1,6 @@ > use std::collections::{HashMap, HashSet}; > use std::io::{self, Write}; > +use std::os::unix::ffi::OsStrExt; > use std::os::unix::io::AsRawFd; > use std::path::{Path, PathBuf}; > use std::sync::{Arc, LazyLock, Mutex}; > @@ -14,6 +15,7 @@ use proxmox_schema::ApiType; > use proxmox_sys::error::SysError; > use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions}; > use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard}; > +use proxmox_sys::linux::procfs::MountInfo; > use proxmox_sys::process_locker::ProcessLockSharedGuard; > use proxmox_worker_task::WorkerTaskContext; > > @@ -46,6 +48,55 @@ pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> > Ok(()) > } > > +/// Check if a device with a given UUID is currently mounted at store_mount_point by > +/// comparing the `st_rdev` values of `/dev/disk/by-uuid/` and the source device in > +/// /proc/self/mountinfo. > +/// > +/// If we can't check if it is mounted, we treat that as not mounted, > +/// returning false. > +/// > +/// Reasons it could fail other than not being mounted where expected: > +/// - could not read /proc/self/mountinfo > +/// - could not stat /dev/disk/by-uuid/ > +/// - /dev/disk/by-uuid/ is not a block device > +/// > +/// Since these are very much out of our control, there is no real value in distinguishing > +/// between them, so for this function they all are treated as 'device not mounted' > +pub fn is_datastore_mounted_at(store_mount_point: String, device_uuid: String) -> bool { > + use nix::sys::stat::SFlag; > + > + let store_mount_point = Path::new(&store_mount_point); > + > + let dev_node = match nix::sys::stat::stat(format!("/dev/disk/by-uuid/{device_uuid}").as_str()) { > + Ok(stat) if SFlag::from_bits_truncate(stat.st_mode) == SFlag::S_IFBLK => stat.st_rdev, > + _ => return false, > + }; > + > + let Ok(mount_info) = MountInfo::read() else { > + return false; > + }; > + > + for (_, entry) in mount_info { > + let Some(source) = entry.mount_source else { > + continue; > + }; > + > + if entry.mount_point != store_mount_point || !source.as_bytes().starts_with(b"/") { > + continue; > + } > + > + if let Ok(stat) = nix::sys::stat::stat(source.as_os_str()) { > + let sflag = SFlag::from_bits_truncate(stat.st_mode); > + > + if sflag == SFlag::S_IFBLK && stat.st_rdev == dev_node { > + return true; > + } > + } > + } > + > + false > +} > + > /// Datastore Management > /// > /// A Datastore can store severals backups, and provides the > @@ -154,6 +205,18 @@ impl DataStore { > bail!("datastore '{name}' is in {error}"); > } > } > + let mount_status = config > + .get_mount_point() > + .zip(config.backing_device.as_ref()) > + .map(|(mount_point, device_uuid)| { > + is_datastore_mounted_at(mount_point, device_uuid.to_string()) > + }); > + > + if mount_status == Some(false) { > + let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); > + datastore_cache.remove(&config.name); > + bail!("Removable Datastore is not mounted"); note the message here > + } > > let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); > let entry = datastore_cache.get(name); > @@ -258,6 +321,16 @@ impl DataStore { > ) -> Result, Error> { > let name = config.name.clone(); > > + let mount_status = config > + .get_mount_point() > + .zip(config.backing_device.as_ref()) > + .map(|(mount_point, device_uuid)| { > + is_datastore_mounted_at(mount_point, device_uuid.to_string()) > + }); > + if mount_status == Some(false) { > + bail!("Datastore is not available") and here - shouldn't they be the same? could also be combined into a helper (e.g., ensure_removable_datastore_is_mounted), if desired, there is a third call site with the exact same code but yet another error message in src/api2/admin/datastore.rs (and three more slight variations where it's not (always) fatal that it is not mounted, but with the same zip+map code before). > + } > + > let tuning: DatastoreTuning = serde_json::from_value( > DatastoreTuning::API_SCHEMA > .parse_property_string(config.tuning.as_deref().unwrap_or(""))?, > diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs > index 202b0955..34113261 100644 > --- a/pbs-datastore/src/lib.rs > +++ b/pbs-datastore/src/lib.rs > @@ -204,7 +204,7 @@ pub use manifest::BackupManifest; > pub use store_progress::StoreProgress; > > mod datastore; > -pub use datastore::{check_backup_owner, DataStore}; > +pub use datastore::{check_backup_owner, is_datastore_mounted_at, DataStore}; > > mod hierarchy; > pub use hierarchy::{ > diff --git a/src/server/metric_collection/mod.rs b/src/server/metric_collection/mod.rs > index b95dba20..edba512c 100644 > --- a/src/server/metric_collection/mod.rs > +++ b/src/server/metric_collection/mod.rs > @@ -176,6 +176,16 @@ fn collect_disk_stats_sync() -> (DiskStat, Vec) { > continue; > } > > + let mount_status = config > + .get_mount_point() > + .zip(config.backing_device.as_ref()) > + .map(|(mount_point, device_uuid)| { > + pbs_datastore::is_datastore_mounted_at(mount_point, device_uuid.to_string()) > + }); > + if mount_status == Some(false) { > + continue; > + } > + this one of those variations.. since all call sites to is_datastore_mounted_at start with the config, it might make more sense to implement it there (or rather, using that as parameter, since we don't want all that stat code in an api type ;)) and avoid all the repetitive code? then a simple get_datastore_mount_status(&config) -> Option(bool), and maybe ensure_datastore_is_mounted(&config) -> Result<(), Error> would solve the same issue, but be nicer to read? > datastores.push(gather_disk_stats( > disk_manager.clone(), > Path::new(&config.absolute_path()), > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From g.goller at proxmox.com Thu Nov 21 14:34:37 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 14:34:37 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3 0/3] fix #3786: resync corrupt chunks in sync-job In-Reply-To: <20241105104015.162094-1-g.goller@proxmox.com> References: <20241105104015.162094-1-g.goller@proxmox.com> Message-ID: Submitted v4! From f.gruenbichler at proxmox.com Thu Nov 21 14:35:00 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 14:35:00 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 07/26] api: admin: add (un)mount endpoint for removable datastores In-Reply-To: <20241113150102.164820-8-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-8-h.laimer@proxmox.com> Message-ID: <1732195236.hffln90atb.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > Removable datastores can be mounted unless > - they are already > - their device is not present > For unmounting the maintenance mode is set to `unmount`, > which prohibits the starting of any new tasks envolving any > IO, this mode is unset either > - on completion of the unmount > - on abort of the unmount tasks > If the unmounting itself should fail, the maintenance mode stays in > place and requires manual intervention by unsetting it in the config > file directly. This is intentional, as unmounting should not fail, > and if it should the situation should be looked at. > > Signed-off-by: Hannes Laimer > --- > changes since v12: > * allow multiple stores on one device > * add best effort attempt to unmount after failed creation > > src/api2/admin/datastore.rs | 267 ++++++++++++++++++++++++++++++++++-- > 1 file changed, 257 insertions(+), 10 deletions(-) > > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index b73ad0ff..a12262e7 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -3,7 +3,7 @@ > use std::collections::HashSet; > use std::ffi::OsStr; > use std::os::unix::ffi::OsStrExt; > -use std::path::PathBuf; > +use std::path::{Path, PathBuf}; > use std::sync::Arc; > > use anyhow::{bail, format_err, Error}; > @@ -13,7 +13,7 @@ use hyper::{header, Body, Response, StatusCode}; > use serde::Deserialize; > use serde_json::{json, Value}; > use tokio_stream::wrappers::ReceiverStream; > -use tracing::{info, warn}; > +use tracing::{debug, info, warn}; > > use proxmox_async::blocking::WrappedReaderStream; > use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; > @@ -29,6 +29,7 @@ use proxmox_sys::fs::{ > file_read_firstline, file_read_optional_string, replace_file, CreateOptions, > }; > use proxmox_time::CalendarEvent; > +use proxmox_worker_task::WorkerTaskContext; > > use pxar::accessor::aio::Accessor; > use pxar::EntryKind; > @@ -36,12 +37,12 @@ use pxar::EntryKind; > use pbs_api_types::{ > print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType, > Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, > - GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, Operation, > - PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, > - BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, > - DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, > - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, > - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, > + GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, MaintenanceMode, > + MaintenanceType, Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, > + BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, > + BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, > + NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, > + PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, > VERIFICATION_OUTDATED_AFTER_SCHEMA, > }; > use pbs_client::pxar::{create_tar, create_zip}; > @@ -57,8 +58,8 @@ use pbs_datastore::index::IndexFile; > use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; > use pbs_datastore::prune::compute_prune_info; > use pbs_datastore::{ > - check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, > - StoreProgress, CATALOG_NAME, > + check_backup_owner, is_datastore_mounted_at, task_tracking, BackupDir, BackupGroup, DataStore, > + LocalChunkReader, StoreProgress, CATALOG_NAME, > }; > use pbs_tools::json::required_string_param; > use proxmox_rest_server::{formatter, WorkerTask}; > @@ -2384,6 +2385,250 @@ pub async fn set_backup_owner( > .await? > } > > +/// Here we > +/// > +/// 1. mount the removable device to `/mount/` > +/// 2. bind mount `/mount//` to `/mnt/datastore/` > +/// 3. unmount `/mount/` > +/// > +/// leaving us with the datastore being mounted directly with its name under /mnt/datastore/... > +/// > +/// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to > +/// the same path, this is *very* unlikely since the device is only mounted really shortly, but > +/// technically possible. > +pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> { > + if let (Some(uuid), Some(mount_point)) = ( > + datastore.backing_device.as_ref(), > + datastore.get_mount_point(), > + ) { another variation, see previous patch comments.. > + if pbs_datastore::is_datastore_mounted_at(mount_point.clone(), uuid.to_string()) { > + bail!("device is already mounted at '{}'", mount_point); > + } > + let tmp_mount_path = format!( > + "{}/{:x}", > + pbs_buildcfg::rundir!("/mount"), > + proxmox_uuid::Uuid::generate() > + ); > + > + let default_options = proxmox_sys::fs::CreateOptions::new(); > + proxmox_sys::fs::create_path( > + &tmp_mount_path, > + Some(default_options.clone()), > + Some(default_options.clone()), > + )?; > + > + debug!("mounting '{uuid}' to '{}'", tmp_mount_path); IMHO this could be info!, we are in a task context here with very little output, and if something went wrong, the extra info can only help.. maybe add in a "temporarily" at the beginning.. > + crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path))?; because else, if this fails, the user has no idea what's going on unless they happen to run in debug mode.. > + > + let full_store_path = format!( > + "{tmp_mount_path}/{}", > + datastore.path.trim_start_matches('/') > + ); > + let backup_user = pbs_config::backup_user()?; > + let options = CreateOptions::new() > + .owner(backup_user.uid) > + .group(backup_user.gid); > + > + proxmox_sys::fs::create_path( > + &mount_point, > + Some(default_options.clone()), > + Some(options.clone()), > + )?; should we add some context to the error here? > + > + // can't be created before it is mounted, so we have to do it here > + proxmox_sys::fs::create_path( > + &full_store_path, > + Some(default_options.clone()), > + Some(options.clone()), > + )?; and here? > + > + info!( > + "mounting '{}'({}) to '{}'", > + datastore.name, datastore.path, mount_point > + ); if the message above becomes info, then this should probably say something like "bind mounting '{full_store_path}' to '{mount_point}'" > + if let Err(err) = > + crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point)) > + { > + debug!("unmounting '{}'", tmp_mount_path); > + let _ = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)); should we log errors her? > + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)); and here? if those fail, we might need additional cleanup? > + return Err(format_err!( > + "Datastore '{}' cound not be mounted: {}.", > + datastore.name, > + err > + )); > + } > + > + debug!("unmounting '{}'", tmp_mount_path); if the first message becomes info, this should too (and maybe add in that the path being unmounted was temporary). > + crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path))?; > + std::fs::remove_dir(std::path::Path::new(&tmp_mount_path))?; error context here might be nice as well > + > + Ok(()) > + } else { > + Err(format_err!( > + "Datastore '{}' cannot be mounted because it is not removable.", > + datastore.name > + )) > + } > +} > + > +#[api( > + protected: true, > + input: { > + properties: { > + store: { > + schema: DATASTORE_SCHEMA, > + }, > + } > + }, > + returns: { > + schema: UPID_SCHEMA, > + }, > + access: { > + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), > + }, > +)] > +/// Mount removable datastore. > +pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > + let (section_config, _digest) = pbs_config::datastore::config()?; > + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > + > + if datastore.backing_device.is_none() { > + bail!("datastore '{store}' is not removable"); > + } > + > + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > + > + let upid = WorkerTask::new_thread( > + "mount-device", > + Some(store), > + auth_id.to_string(), > + to_stdout, > + move |_worker| do_mount_device(datastore), > + )?; > + > + Ok(json!(upid)) > +} > + > +fn unset_unmount_maintenance(store: &str) -> Result<(), Error> { > + let _lock = pbs_config::datastore::lock_config()?; > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + let mut store_config: DataStoreConfig = section_config.lookup("datastore", store)?; > + if store_config > + .get_maintenance_mode() > + .map_or(true, |m| m.ty != MaintenanceType::Unmount) > + { > + bail!("Maintenance mode should have been 'Unmount'") > + } > + store_config.maintenance_mode = None; > + section_config.set_data(store, "datastore", &store_config)?; > + pbs_config::datastore::save_config(§ion_config)?; > + Ok(()) > +} > + > +fn do_unmount_device( > + datastore: DataStoreConfig, > + worker: Option<&dyn WorkerTaskContext>, > +) -> Result<(), Error> { > + let mut active_operations = task_tracking::get_active_operations(&datastore.name)?; > + let mut old_status = String::new(); > + while active_operations.read + active_operations.write > 0 { > + if let Some(worker) = worker { > + if worker.abort_requested() { > + unset_unmount_maintenance(&datastore.name)?; this error should be caught and converted to a warning > + bail!("aborted, due to user request"); else this much more important information might not be printed > + } this check should also be done below the loop, else this is racy.. > + let status = format!( > + "cannot unmount yet, still {} read and {} write operations active", this reads a bit strange language-wise, maybe it can be rephrased? unmounting not possible yet, there are still .. > + active_operations.read, active_operations.write > + ); > + if status != old_status { > + info!("{status}"); > + old_status = status; > + } > + } > + std::thread::sleep(std::time::Duration::from_secs(1)); > + active_operations = task_tracking::get_active_operations(&datastore.name)?; > + } > + if let Some(mount_point) = datastore.get_mount_point() { > + crate::tools::disks::unmount_by_mountpoint(Path::new(&mount_point))?; > + unset_unmount_maintenance(&datastore.name)?; so if I clear the maintenance mode, it will get unmounted anyway, and only then tell me that the maintenance mode is unexpected? this should re-lock and read the config before unmounting.. that likely means you actually want the helper above to give you the lock and check the state, and then have a second helper to unset it and write the config out (if we had proper locked configs as types this would be easier :().. > + } > + Ok(()) > +} > + > +#[api( > + protected: true, > + input: { > + properties: { > + store: { schema: DATASTORE_SCHEMA }, > + }, > + }, > + returns: { > + schema: UPID_SCHEMA, > + }, > + access: { > + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), > + } > +)] > +/// Unmount a removable device that is associated with the datastore > +pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > + let _lock = pbs_config::datastore::lock_config()?; > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > + > + if datastore.backing_device.is_none() { > + bail!("datastore '{store}' is not removable"); > + } > + > + let mount_status = datastore > + .get_mount_point() > + .zip(datastore.backing_device.as_ref()) > + .map(|(mount_point, device_uuid)| { > + is_datastore_mounted_at(mount_point, device_uuid.to_string()) > + }); another variant ;) > + > + if mount_status == Some(false) { > + bail!("datastore '{store}' is not mounted"); > + } > + > + datastore.set_maintenance_mode(Some(MaintenanceMode { > + ty: MaintenanceType::Unmount, > + message: None, > + }))?; > + section_config.set_data(&store, "datastore", &datastore)?; > + pbs_config::datastore::save_config(§ion_config)?; > + > + drop(_lock); > + > + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > + > + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) > + { > + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); > + let _ = proxmox_daemon::command_socket::send_raw( > + sock, > + &format!( > + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", > + &store > + ), > + ) > + .await; > + } > + > + let upid = WorkerTask::new_thread( > + "unmount-device", > + Some(store), > + auth_id.to_string(), > + to_stdout, > + move |worker| do_unmount_device(datastore, Some(&worker)), > + )?; > + > + Ok(json!(upid)) > +} > + > #[sortable] > const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > ( > @@ -2422,6 +2667,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > .get(&API_METHOD_LIST_GROUPS) > .delete(&API_METHOD_DELETE_GROUP), > ), > + ("mount", &Router::new().post(&API_METHOD_MOUNT)), > ( > "namespace", > // FIXME: move into datastore:: sub-module?! > @@ -2456,6 +2702,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > .delete(&API_METHOD_DELETE_SNAPSHOT), > ), > ("status", &Router::new().get(&API_METHOD_STATUS)), > + ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)), > ( > "upload-backup-log", > &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From g.goller at proxmox.com Thu Nov 21 14:35:05 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 14:35:05 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v4 0/4] fix #3786: resync corrupt chunks in sync-job Message-ID: <20241121133509.289419-1-g.goller@proxmox.com> Add an option `resync-corrupt` that resyncs corrupt snapshots when running sync-job. This option checks if the local snapshot failed the last verification and if it did, overwrites the local snapshot with the remote one. This is quite useful, as we currently don't have an option to "fix" broken chunks/snapshots in any way, even if a healthy version is on another (e.g. offsite) instance. Important things to note are also: this has a slight performance penalty, as all the manifests have to be looked through, and a verification job has to be run beforehand, otherwise we do not know if the snapshot is healthy. Note: This series was originally written by Shannon! I just picked it up, rebased, and fixed the obvious comments on the last series. Changelog v4 (thanks @Fabian): - make verify_state bubble up errors - call verify_state helper everywhere we need the verify_state - resync broken manifests (so resync when load_manifest fails) Changelog v3 (thanks @Fabian): - filter out snapshots earlier in the pull_group function - move verify_state to BackupManifest and fixed invocations - reverted verify_state Option -> Result state (It doesn't matter if we get an error, we get that quite often f.e. in new backups) - removed some unnecessary log lines - removed some unnecessary imports and modifications - rebase to current master Changelog v2 (thanks @Thomas): - order git trailers - adjusted schema description to include broken indexes - change verify_state to return a Result<_,_> - print error if verify_state is not able to read the state - update docs on pull_snapshot function - simplify logic by combining flags - move log line out of loop to only print once that we resync the snapshot Changelog since RFC (Shannon's work): - rename option from deep-sync to resync-corrupt - rebase on latest master (and change implementation details, as a lot has changed around sync-jobs) proxmox-backup: Gabriel Goller (4): snapshot: add helper function to retrieve verify_state fix #3786: api: add resync-corrupt option to sync jobs fix #3786: ui/cli: add resync-corrupt option on sync-jobs fix #3786: docs: add resync-corrupt option to sync-job docs/managing-remotes.rst | 6 +++ pbs-api-types/src/jobs.rs | 10 +++++ pbs-datastore/src/backup_info.rs | 15 ++++++- pbs-datastore/src/manifest.rs | 14 +++++- src/api2/admin/datastore.rs | 16 +++---- src/api2/backup/mod.rs | 13 +++--- src/api2/config/sync.rs | 4 ++ src/api2/pull.rs | 9 +++- src/backup/verify.rs | 7 ++- src/bin/proxmox-backup-manager.rs | 16 ++++++- src/server/pull.rs | 72 ++++++++++++++++++++++++------- www/window/SyncJobEdit.js | 11 +++++ 12 files changed, 151 insertions(+), 42 deletions(-) Summary over all repositories: 12 files changed, 151 insertions(+), 42 deletions(-) -- Generated by git-murpp 0.7.1 From g.goller at proxmox.com Thu Nov 21 14:35:07 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 14:35:07 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v4 2/4] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <20241121133509.289419-1-g.goller@proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> Message-ID: <20241121133509.289419-3-g.goller@proxmox.com> This option allows us to "fix" corrupt snapshots (and/or their chunks) by pulling them from another remote. When traversing the remote snapshots, we check if it exists locally, and if it is, we check if the last verification of it failed. If the local snapshot is broken and the `resync-corrupt` option is turned on, we pull in the remote snapshot, overwriting the local one. This is very useful and has been requested a lot, as there is currently no way to "fix" corrupt chunks/snapshots even if the user has a healthy version of it on their offsite instance. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller --- pbs-api-types/src/jobs.rs | 10 ++++++ src/api2/config/sync.rs | 4 +++ src/api2/pull.rs | 9 ++++- src/server/pull.rs | 72 ++++++++++++++++++++++++++++++--------- 4 files changed, 78 insertions(+), 17 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e8056beb00cb..52520811b560 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -536,6 +536,10 @@ impl SyncDirection { } } +pub const RESYNC_CORRUPT_SCHEMA: Schema = + BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") + .schema(); + #[api( properties: { id: { @@ -590,6 +594,10 @@ impl SyncDirection { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + } } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -623,6 +631,8 @@ pub struct SyncJobConfig { pub limit: RateLimitConfig, #[serde(skip_serializing_if = "Option::is_none")] pub transfer_last: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub resync_corrupt: Option, } impl SyncJobConfig { diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 78eb7320566b..7ff6cae029d1 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -471,6 +471,9 @@ pub fn update_sync_job( if let Some(transfer_last) = update.transfer_last { data.transfer_last = Some(transfer_last); } + if let Some(resync_corrupt) = update.resync_corrupt { + data.resync_corrupt = Some(resync_corrupt); + } if update.limit.rate_in.is_some() { data.limit.rate_in = update.limit.rate_in; @@ -629,6 +632,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator ns: None, owner: Some(write_auth_id.clone()), comment: None, + resync_corrupt: None, remove_vanished: None, max_depth: None, group_filter: None, diff --git a/src/api2/pull.rs b/src/api2/pull.rs index d039dab59c65..d8ed1a7347b5 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -10,7 +10,7 @@ use pbs_api_types::{ Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, - TRANSFER_LAST_SCHEMA, + RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, }; use pbs_config::CachedUserInfo; use proxmox_rest_server::WorkerTask; @@ -87,6 +87,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { sync_job.group_filter.clone(), sync_job.limit.clone(), sync_job.transfer_last, + sync_job.resync_corrupt, ) } } @@ -132,6 +133,10 @@ impl TryFrom<&SyncJobConfig> for PullParameters { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, }, }, access: { @@ -156,6 +161,7 @@ async fn pull( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -193,6 +199,7 @@ async fn pull( group_filter, limit, transfer_last, + resync_corrupt, )?; // fixme: set to_stdout to false? diff --git a/src/server/pull.rs b/src/server/pull.rs index 08b55956ce52..40d872d2487c 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -12,7 +12,8 @@ use tracing::info; use pbs_api_types::{ print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + RateLimitConfig, Remote, VerifyState, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, + PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -55,6 +56,8 @@ pub(crate) struct PullParameters { group_filter: Vec, /// How many snapshots should be transferred at most (taking the newest N snapshots) transfer_last: Option, + /// Whether to re-sync corrupted snapshots + resync_corrupt: bool, } impl PullParameters { @@ -72,12 +75,14 @@ impl PullParameters { group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, ) -> Result { if let Some(max_depth) = max_depth { ns.check_max_depth(max_depth)?; remote_ns.check_max_depth(max_depth)?; }; let remove_vanished = remove_vanished.unwrap_or(false); + let resync_corrupt = resync_corrupt.unwrap_or(false); let source: Arc = if let Some(remote) = remote { let (remote_config, _digest) = pbs_config::remote::config()?; @@ -116,6 +121,7 @@ impl PullParameters { max_depth, group_filter, transfer_last, + resync_corrupt, }) } } @@ -323,7 +329,7 @@ async fn pull_single_archive<'a>( /// /// Pulling a snapshot consists of the following steps: /// - (Re)download the manifest -/// -- if it matches, only download log and treat snapshot as already synced +/// -- if it matches and is not corrupt, only download log and treat snapshot as already synced /// - Iterate over referenced files /// -- if file already exists, verify contents /// -- if not, pull it from the remote @@ -332,6 +338,7 @@ async fn pull_snapshot<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let mut sync_stats = SyncStats::default(); let mut manifest_name = snapshot.full_path(); @@ -352,7 +359,7 @@ async fn pull_snapshot<'a>( return Ok(sync_stats); } - if manifest_name.exists() { + if manifest_name.exists() && !corrupt { let manifest_blob = proxmox_lang::try_block!({ let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| { format_err!("unable to open local manifest {manifest_name:?} - {err}") @@ -381,7 +388,7 @@ async fn pull_snapshot<'a>( let mut path = snapshot.full_path(); path.push(&item.filename); - if path.exists() { + if !corrupt && path.exists() { match ArchiveType::from_path(&item.filename)? { ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path)?; @@ -443,6 +450,7 @@ async fn pull_snapshot_from<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let (_path, is_new, _snap_lock) = snapshot .datastore() @@ -451,7 +459,8 @@ async fn pull_snapshot_from<'a>( let sync_stats = if is_new { info!("sync snapshot {}", snapshot.dir()); - match pull_snapshot(reader, snapshot, downloaded_chunks).await { + // this snapshot is new, so it can never be corrupt + match pull_snapshot(reader, snapshot, downloaded_chunks, false).await { Err(err) => { if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( snapshot.backup_ns(), @@ -468,8 +477,12 @@ async fn pull_snapshot_from<'a>( } } } else { - info!("re-sync snapshot {}", snapshot.dir()); - pull_snapshot(reader, snapshot, downloaded_chunks).await? + if corrupt { + info!("re-sync snapshot {} due to corruption", snapshot.dir()); + } else { + info!("re-sync snapshot {}", snapshot.dir()); + } + pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await? }; Ok(sync_stats) @@ -523,26 +536,52 @@ async fn pull_group( .last_successful_backup(&target_ns, group)? .unwrap_or(i64::MIN); - let list: Vec = raw_list + // Filter remote BackupDirs to include in pull + // Also stores if the snapshot is corrupt (verification job failed) + let list: Vec<(BackupDir, bool)> = raw_list .into_iter() .enumerate() - .filter(|&(pos, ref dir)| { + .filter_map(|(pos, dir)| { source_snapshots.insert(dir.time); + // If resync_corrupt is set, check if the corresponding local snapshot failed to + // verification + if params.resync_corrupt { + let local_dir = params + .target + .store + .backup_dir(target_ns.clone(), dir.clone()); + if let Ok(local_dir) = local_dir { + match local_dir.verify_state() { + Ok(Some(state)) => { + if state == VerifyState::Failed { + return Some((dir, true)); + } + } + Ok(None) => { + // The verify_state item was not found in the manifest, this means the + // snapshot is new. + } + Err(_) => { + // There was an error loading the manifest, probably better if we + // resync. + return Some((dir, true)); + } + } + } + } // Note: the snapshot represented by `last_sync_time` might be missing its backup log // or post-backup verification state if those were not yet available during the last // sync run, always resync it if last_sync_time > dir.time { already_synced_skip_info.update(dir.time); - return false; + return None; } - if pos < cutoff && last_sync_time != dir.time { transfer_last_skip_info.update(dir.time); - return false; + return None; } - true + Some((dir, false)) }) - .map(|(_, dir)| dir) .collect(); if already_synced_skip_info.count > 0 { @@ -561,7 +600,7 @@ async fn pull_group( let mut sync_stats = SyncStats::default(); - for (pos, from_snapshot) in list.into_iter().enumerate() { + for (pos, (from_snapshot, corrupt)) in list.into_iter().enumerate() { let to_snapshot = params .target .store @@ -571,7 +610,8 @@ async fn pull_group( .source .reader(source_namespace, &from_snapshot) .await?; - let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await; + let result = + pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone(), corrupt).await; progress.done_snapshots = pos as u64 + 1; info!("percentage done: {progress}"); -- 2.39.5 From g.goller at proxmox.com Thu Nov 21 14:35:09 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 14:35:09 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v4 4/4] fix #3786: docs: add resync-corrupt option to sync-job In-Reply-To: <20241121133509.289419-1-g.goller@proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> Message-ID: <20241121133509.289419-5-g.goller@proxmox.com> Add short section explaining the `resync-corrupt` option on the sync-job. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller --- docs/managing-remotes.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/managing-remotes.rst b/docs/managing-remotes.rst index a7fd5143d236..4a78a9310fa5 100644 --- a/docs/managing-remotes.rst +++ b/docs/managing-remotes.rst @@ -135,6 +135,12 @@ For mixing include and exclude filter, following rules apply: .. note:: The ``protected`` flag of remote backup snapshots will not be synced. +Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have +failed to verify during the last :ref:`maintenance_verification`. Hence, a verification +job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware +that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore +and might take much longer than regular sync jobs. + Namespace Support ^^^^^^^^^^^^^^^^^ -- 2.39.5 From g.goller at proxmox.com Thu Nov 21 14:35:08 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 14:35:08 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v4 3/4] fix #3786: ui/cli: add resync-corrupt option on sync-jobs In-Reply-To: <20241121133509.289419-1-g.goller@proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> Message-ID: <20241121133509.289419-4-g.goller@proxmox.com> Add the `resync-corrupt` option to the ui and the `proxmox-backup-manager` cli. It is listed in the `Advanced` section, because it slows the sync-job down and is useless if no verification job was run beforehand. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller --- src/bin/proxmox-backup-manager.rs | 16 ++++++++++++++-- www/window/SyncJobEdit.js | 11 +++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index d887dc1d50a1..02ca0d028225 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -14,8 +14,8 @@ use pbs_api_types::percent_encoding::percent_encode_component; use pbs_api_types::{ BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::{display_task_log, view_task_result}; use pbs_config::sync; @@ -307,6 +307,7 @@ async fn sync_datastore( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, param: Value, sync_direction: SyncDirection, ) -> Result { @@ -343,6 +344,10 @@ async fn sync_datastore( args["transfer-last"] = json!(transfer_last) } + if let Some(resync) = resync_corrupt { + args["resync-corrupt"] = Value::from(resync); + } + let mut limit_json = json!(limit); let limit_map = limit_json .as_object_mut() @@ -405,6 +410,10 @@ async fn sync_datastore( schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, } } )] @@ -421,6 +430,7 @@ async fn pull_datastore( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, param: Value, ) -> Result { sync_datastore( @@ -434,6 +444,7 @@ async fn pull_datastore( group_filter, limit, transfer_last, + resync_corrupt, param, SyncDirection::Pull, ) @@ -513,6 +524,7 @@ async fn push_datastore( group_filter, limit, transfer_last, + None, param, SyncDirection::Push, ) diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 0e648e7b3e50..03f61bee6494 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -358,6 +358,17 @@ Ext.define('PBS.window.SyncJobEdit', { deleteEmpty: '{!isCreate}', }, }, + { + fieldLabel: gettext('Resync corrupt snapshots'), + xtype: 'proxmoxcheckbox', + name: 'resync-corrupt', + autoEl: { + tag: 'div', + 'data-qtip': gettext('Re-sync snapshots, whose verfification failed.'), + }, + uncheckedValue: false, + value: false, + }, ], }, { -- 2.39.5 From g.goller at proxmox.com Thu Nov 21 14:35:06 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 14:35:06 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v4 1/4] snapshot: add helper function to retrieve verify_state In-Reply-To: <20241121133509.289419-1-g.goller@proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> Message-ID: <20241121133509.289419-2-g.goller@proxmox.com> Add helper functions to retrieve the verify_state from the manifest of a snapshot. Replaced all the manual "verify_state" parsing with the helper function. Suggested-by: Fabian Gr?nbichler Signed-off-by: Gabriel Goller --- pbs-datastore/src/backup_info.rs | 15 +++++++++++++-- pbs-datastore/src/manifest.rs | 14 +++++++++++++- src/api2/admin/datastore.rs | 16 +++++++--------- src/api2/backup/mod.rs | 13 ++++++------- src/backup/verify.rs | 7 +++---- 5 files changed, 42 insertions(+), 23 deletions(-) diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 62d12b1183df..2d8e0a6d92da 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -8,8 +8,8 @@ use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ - Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, - BACKUP_FILE_REGEX, + Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState, + BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; @@ -555,6 +555,17 @@ impl BackupDir { Ok(()) } + + /// Load the verify state from the manifest. + pub fn verify_state(&self) -> Result, anyhow::Error> { + let manifest = self.load_manifest()?; + Ok(manifest + .0 + .verify_state() + .ok() + .flatten() + .map(|svs| svs.state)) + } } impl AsRef for BackupDir { diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index c3df014272a0..3013fab97221 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState}; use pbs_tools::crypt_config::CryptConfig; pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; @@ -242,6 +242,18 @@ impl BackupManifest { let manifest: BackupManifest = serde_json::from_value(json)?; Ok(manifest) } + + /// Get the verify state of the snapshot + /// + /// Note: New snapshots, which have not been verified yet, do not have a status and this + /// function will return `Ok(None)`. + pub fn verify_state(&self) -> Result, anyhow::Error> { + let verify = self.unprotected["verify_state"].clone(); + if verify.is_null() { + return Ok(None); + } + Ok(Some(serde_json::from_value::(verify)?)) + } } impl TryFrom for BackupManifest { diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 99b579f02c50..3624dba41199 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -537,15 +537,13 @@ unsafe fn list_snapshots_blocking( } }; - let verification = manifest.unprotected["verify_state"].clone(); - let verification: Option = - match serde_json::from_value(verification) { - Ok(verify) => verify, - Err(err) => { - eprintln!("error parsing verification state : '{}'", err); - None - } - }; + let verification: Option = match manifest.verify_state() { + Ok(verify) => verify, + Err(err) => { + eprintln!("error parsing verification state : '{}'", err); + None + } + }; let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index ea0d0292ec58..605c75e2dfa9 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -19,9 +19,9 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, - BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, + Authid, BackupNamespace, BackupType, Operation, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, + BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, + CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; @@ -159,13 +159,12 @@ fn upgrade_to_backup_protocol( let info = backup_group.last_backup(true).unwrap_or(None); if let Some(info) = info { let (manifest, _) = info.backup_dir.load_manifest()?; - let verify = manifest.unprotected["verify_state"].clone(); - match serde_json::from_value::(verify) { - Ok(verify) => match verify.state { + match manifest.verify_state() { + Ok(Some(verify)) => match verify.state { VerifyState::Ok => Some(info), VerifyState::Failed => None, }, - Err(_) => { + Ok(None) | Err(_) => { // no verify state found, treat as valid Some(info) } diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6ef7e8eb3ebb..20c605c4dde6 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -553,10 +553,9 @@ pub fn verify_filter( return true; } - let raw_verify_state = manifest.unprotected["verify_state"].clone(); - match serde_json::from_value::(raw_verify_state) { - Err(_) => true, // no last verification, always include - Ok(last_verify) => { + match manifest.verify_state() { + Ok(None) | Err(_) => true, // no last verification, always include + Ok(Some(last_verify)) => { match outdated_after { None => false, // never re-verify if ignored and no max age Some(max_age) => { -- 2.39.5 From f.gruenbichler at proxmox.com Thu Nov 21 15:22:47 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 15:22:47 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 08/26] api: removable datastore creation In-Reply-To: <20241113150102.164820-9-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-9-h.laimer@proxmox.com> Message-ID: <1732197970.jvav0hqb6h.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > Devices can contains multiple datastores, the only limitations is that > they are not allowed to be nested. > If the specified path already contains a datastore, `reuse datastore` has > to be set so it'll be added without creating a chunckstore. > > Signed-off-by: Hannes Laimer > --- > changes since v12: > * use recently added 'reuse datastore' > * allow creation even if device is already used by datastore, just no > nesting > > src/api2/config/datastore.rs | 50 +++++++++++++++++++++++++++++++----- > 1 file changed, 44 insertions(+), 6 deletions(-) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 374c302f..9140a7a4 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -20,7 +20,8 @@ use pbs_config::BackupLockGuard; > use pbs_datastore::chunk_store::ChunkStore; > > use crate::api2::admin::{ > - prune::list_prune_jobs, sync::list_sync_jobs, verify::list_verification_jobs, > + datastore::do_mount_device, prune::list_prune_jobs, sync::list_sync_jobs, > + verify::list_verification_jobs, > }; > use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; > use crate::api2::config::sync::delete_sync_job; > @@ -31,6 +32,7 @@ use pbs_config::CachedUserInfo; > use proxmox_rest_server::WorkerTask; > > use crate::server::jobstate; > +use crate::tools::disks::unmount_by_mountpoint; > > #[api( > input: { > @@ -72,7 +74,11 @@ pub(crate) fn do_create_datastore( > datastore: DataStoreConfig, > reuse_datastore: bool, > ) -> Result<(), Error> { > - let path: PathBuf = datastore.path.clone().into(); > + let path: PathBuf = datastore.absolute_path().into(); > + let need_unmount = datastore.get_mount_point().is_some() && { nit: would be easier to read as let need_unmount = ; if need_unmount {do_mount_device(..)?; } > + do_mount_device(datastore.clone())?; > + true > + }; > > if path.parent().is_none() { > bail!("cannot create datastore in root path"); this can fail (well, not really for a removable datastore), but also some parsing code between this > @@ -84,24 +90,32 @@ pub(crate) fn do_create_datastore( and this, and this repeats below as well.. it might be better to wrap most of the body after the mounting, check for any error, then do the cleanup/unmounting in one place? > )?; > > if reuse_datastore { > - ChunkStore::verify_chunkstore(&path)?; > + if let Err(e) = ChunkStore::verify_chunkstore(&path) { > + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); > + return Err(e); > + } then this > } else { > if let Ok(dir) = std::fs::read_dir(&path) { > for file in dir { > let name = file?.file_name(); > if !name.to_str().map_or(false, |name| name.starts_with('.')) { > + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); > bail!("datastore path is not empty"); and this > } > } > } > let backup_user = pbs_config::backup_user()?; > - let _store = ChunkStore::create( > + let res = ChunkStore::create( > &datastore.name, > - path, > + path.clone(), > backup_user.uid, > backup_user.gid, > tuning.sync_level.unwrap_or_default(), > - )?; > + ); > + if let Err(e) = res { > + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok(); and this could all just return/bubble up the error, and the cleanup logic lives on call level higher.. > + return Err(e); > + } > } > > config.set_data(&datastore.name, "datastore", &datastore)?; > @@ -145,6 +159,30 @@ pub fn create_datastore( > param_bail!("name", "datastore '{}' already exists.", config.name); > } > > + if !config.path.starts_with("/") { > + param_bail!("path", "expected an abolute path, '{}' is not", config.path); > + } but the schema is now updated to allow relative paths for removable datastores? doesn't this need another condition to only apply for removable datastores? I guess this was only tested via the create_datastore_disk code path, which calls do_create_datastore directly, and not this API endpoint.. > + > + if let Some(uuid) = &config.backing_device { but this here should apply to all datastores? it causes GC confusion also for regular ones if they get nested.. and since this only affects attempts to create datastores, it should be okay to make it fatal? > + for (store_name, (_, store_config)) in §ion_config.sections { > + if let (Some(store_uuid), Some(store_path)) = ( > + store_config["backing-device"].as_str(), > + store_config["path"].as_str(), > + ) { > + // We don't allow two datastores to be nested in each other, so if > + // ds1: /a/b -> can't create new one at /, /a or /a/b/..., /a/c is fine > + if store_uuid == uuid > + && (store_path.starts_with(&config.path) || config.path.starts_with(store_path)) > + { > + param_bail!( > + "path", > + "can't nest datastores, '{store_name}' already in '{store_path}'", "nested datastores not allowed: " is a bit easier/nicer to read I think > + ); > + } > + }; > + } > + } > + > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Thu Nov 21 15:27:05 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 15:27:05 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 09/26] pbs-api-types: add mount_status field to DataStoreListItem In-Reply-To: <20241113150102.164820-10-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-10-h.laimer@proxmox.com> Message-ID: <1732199058.a4aqwwxo18.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > Only removable datastores have a mount status, so normal ones will have > `None`, and for removable ones it is either mounted (`Some(true)`) or > not mounted (`Some(false)`). > > Signed-off-by: Hannes Laimer > --- > changes since v12: > * replace is_availabl+removable field combo, with single mount_status > field > > pbs-api-types/src/datastore.rs | 9 ++++++++- > src/api2/admin/datastore.rs | 22 ++++++++++++++-------- > src/api2/status/mod.rs | 29 +++++++++++++++++++++++++---- > 3 files changed, 47 insertions(+), 13 deletions(-) > > diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs > index 888f5d5b..e111d692 100644 > --- a/pbs-api-types/src/datastore.rs > +++ b/pbs-api-types/src/datastore.rs > @@ -454,6 +454,9 @@ impl DataStoreConfig { > pub struct DataStoreListItem { > pub store: String, > pub comment: Option, > + /// Is datastore mounted, None for not-removable datastores > + #[serde(skip_serializing_if = "Option::is_none")] > + pub mount_status: Option, Option is okay for internal usage, but in an api type, wouldn't a proper enum be nicer? NonRemovable, Mounted, NotMounted > /// If the datastore is in maintenance mode, information about it > #[serde(skip_serializing_if = "Option::is_none")] > pub maintenance: Option, > @@ -1453,6 +1456,9 @@ pub struct DataStoreStatusListItem { > /// The available bytes of the underlying storage. (-1 on error) > #[serde(skip_serializing_if = "Option::is_none")] > pub avail: Option, > + /// The datastore is mounted, None for not-removable datastores > + #[serde(skip_serializing_if = "Option::is_none")] > + pub mount_status: Option, Option is okay for internal usage, but in an api type, wouldn't a proper enum be nicer? also would allow differentiating datastore types more easily in client code (if just for display purposes) NonRemovable, Mounted, NotMounted > /// A list of usages of the past (last Month). > #[serde(skip_serializing_if = "Option::is_none")] > pub history: Option>>, > @@ -1477,12 +1483,13 @@ pub struct DataStoreStatusListItem { > } > > impl DataStoreStatusListItem { > - pub fn empty(store: &str, err: Option) -> Self { > + pub fn empty(store: &str, err: Option, mount_status: Option) -> Self { > DataStoreStatusListItem { > store: store.to_owned(), > total: None, > used: None, > avail: None, > + mount_status, > history: None, > history_start: None, > history_delta: None, > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index a12262e7..a9d9040f 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -1310,8 +1310,8 @@ pub fn get_datastore_list( > > let mut list = Vec::new(); > > - for (store, (_, data)) in &config.sections { > - let acl_path = &["datastore", store]; > + for (store, (_, data)) in config.sections { > + let acl_path = &["datastore", &store]; > let user_privs = user_info.lookup_privs(&auth_id, acl_path); > let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; > > @@ -1322,15 +1322,21 @@ pub fn get_datastore_list( > } > } > > + let store_config: DataStoreConfig = serde_json::from_value(data)?; > + > + let mount_status = store_config > + .get_mount_point() > + .zip(store_config.backing_device.as_ref()) > + .map(|(mount_point, device_uuid)| { > + is_datastore_mounted_at(mount_point, device_uuid.to_string()) > + }); another variant of this helper ;) > + > if allowed || allow_id { > list.push(DataStoreListItem { > store: store.clone(), > - comment: if !allowed { > - None > - } else { > - data["comment"].as_str().map(String::from) > - }, > - maintenance: data["maintenance-mode"].as_str().map(String::from), > + comment: store_config.comment.filter(|_| allowed), > + mount_status, > + maintenance: store_config.maintenance_mode, > }); > } > } > diff --git a/src/api2/status/mod.rs b/src/api2/status/mod.rs > index 113aa985..508331fe 100644 > --- a/src/api2/status/mod.rs > +++ b/src/api2/status/mod.rs > @@ -10,11 +10,12 @@ use proxmox_schema::api; > use proxmox_sortable_macro::sortable; > > use pbs_api_types::{ > - Authid, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, > + Authid, DataStoreConfig, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, > + PRIV_DATASTORE_BACKUP, > }; > > use pbs_config::CachedUserInfo; > -use pbs_datastore::DataStore; > +use pbs_datastore::{is_datastore_mounted_at, DataStore}; > > use crate::server::metric_collection::rrd::extract_rrd_data; > use crate::tools::statistics::linear_regression; > @@ -51,10 +52,25 @@ pub async fn datastore_status( > for (store, (_, _)) in &config.sections { > let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); > let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; > + > + let store_config = config.lookup::("datastore", store)?; > + > + let mount_status = store_config > + .get_mount_point() > + .zip(store_config.backing_device.as_ref()) > + .map(|(mount_point, device_uuid)| { > + is_datastore_mounted_at(mount_point, device_uuid.to_string()) > + }); > + > + if let Some(false) = mount_status { > + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); > + continue; > + } > + > if !allowed { > if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) { > if can_access_any_namespace(datastore, &auth_id, &user_info) { > - list.push(DataStoreStatusListItem::empty(store, None)); > + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); > } > } > continue; > @@ -63,7 +79,11 @@ pub async fn datastore_status( > let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) { > Ok(datastore) => datastore, > Err(err) => { > - list.push(DataStoreStatusListItem::empty(store, Some(err.to_string()))); > + list.push(DataStoreStatusListItem::empty( > + store, > + Some(err.to_string()), > + mount_status, > + )); > continue; > } > }; > @@ -74,6 +94,7 @@ pub async fn datastore_status( > total: Some(status.total), > used: Some(status.used), > avail: Some(status.available), > + mount_status, > history: None, > history_start: None, > history_delta: None, > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Thu Nov 21 15:34:42 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 15:34:42 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 11/26] add auto-mounting for removable datastores In-Reply-To: <20241113150102.164820-12-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-12-h.laimer@proxmox.com> Message-ID: <1732199277.irqvh6rzyh.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > If a device houses multiple datastore, none of them will be mounted > automatically. If a device only contains a single datastore it will be > mounted automatically. The reason for not mounting multiple datastore > automatically is that we don't know which is actually wanted, and since > mounting all means also all have to be unmounted manually, it made sense > to have the user choose which to mount. > > Signed-off-by: Hannes Laimer did you reject the variant with a custom helper binary instead of an internal command? ;) > --- > changes since v12: > * make service not dynamic > * don't logs UUIDs that don't contains known datastores > > debian/proxmox-backup-server.install | 1 + > debian/proxmox-backup-server.udev | 3 + > etc/Makefile | 3 +- > etc/removable-device-attach at .service | 8 +++ > src/bin/proxmox_backup_manager/datastore.rs | 62 ++++++++++++++++++++- > 5 files changed, 75 insertions(+), 2 deletions(-) > create mode 100644 etc/removable-device-attach at .service > > diff --git a/debian/proxmox-backup-server.install b/debian/proxmox-backup-server.install > index 79757ead..ff581e3d 100644 > --- a/debian/proxmox-backup-server.install > +++ b/debian/proxmox-backup-server.install > @@ -4,6 +4,7 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/ > etc/proxmox-backup-daily-update.timer /lib/systemd/system/ > etc/proxmox-backup-proxy.service /lib/systemd/system/ > etc/proxmox-backup.service /lib/systemd/system/ > +etc/removable-device-attach at .service /lib/systemd/system/ > usr/bin/pmt > usr/bin/pmtx > usr/bin/proxmox-tape > diff --git a/debian/proxmox-backup-server.udev b/debian/proxmox-backup-server.udev > index afdfb2bc..e21b8bc7 100644 > --- a/debian/proxmox-backup-server.udev > +++ b/debian/proxmox-backup-server.udev > @@ -16,3 +16,6 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER > SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg" > > LABEL="persistent_storage_tape_end" > + > +# triggers the mounting of a removable device > +ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}" > \ No newline at end of file > diff --git a/etc/Makefile b/etc/Makefile > index 42f639f6..b206b9ca 100644 > --- a/etc/Makefile > +++ b/etc/Makefile > @@ -2,12 +2,13 @@ include ../defines.mk > > UNITS := \ > proxmox-backup-daily-update.timer \ > + removable-device-attach at .service \ nit: the last line shouldn't have a trailing \ > > DYNAMIC_UNITS := \ > proxmox-backup-banner.service \ > proxmox-backup-daily-update.service \ > proxmox-backup.service \ > - proxmox-backup-proxy.service > + proxmox-backup-proxy.service \ same here > > all: $(UNITS) $(DYNAMIC_UNITS) pbs-enterprise.list > > diff --git a/etc/removable-device-attach at .service b/etc/removable-device-attach at .service > new file mode 100644 > index 00000000..e10d1ea3 > --- /dev/null > +++ b/etc/removable-device-attach at .service > @@ -0,0 +1,8 @@ > +[Unit] > +Description=Try to mount the removable device of a datastore with uuid '%i'. > +After=proxmox-backup-proxy.service > +Requires=proxmox-backup-proxy.service > + > +[Service] > +Type=simple > +ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i > diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs > index f2795b39..05f35279 100644 > --- a/src/bin/proxmox_backup_manager/datastore.rs > +++ b/src/bin/proxmox_backup_manager/datastore.rs > @@ -1,4 +1,4 @@ > -use anyhow::{format_err, Error}; > +use anyhow::{bail, format_err, Error}; > use serde_json::Value; > > use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; > @@ -195,6 +195,62 @@ async fn delete_datastore(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> > Ok(()) > } > > +#[api( > + protected: true, > + input: { > + properties: { > + uuid: { > + type: String, > + description: "The UUID of the device that should be mounted", > + }, > + "output-format": { > + schema: OUTPUT_FORMAT, > + optional: true, > + }, > + }, > + }, > +)] > +/// Try mounting a removable datastore given the UUID. > +async fn uuid_mount(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result { > + let uuid = param["uuid"] > + .as_str() > + .ok_or_else(|| format_err!("uuid has to be specified"))?; > + > + let info = &api2::config::datastore::API_METHOD_LIST_DATASTORES; > + let data: Value = match info.handler { > + ApiHandler::Sync(handler) => (handler)(serde_json::json!({}), info, rpcenv)?, > + _ => unreachable!(), > + }; couldn't this just load the datastore.cfg ? > + > + let matching_stores = data.as_array().map_or(Vec::new(), |list| { > + list.iter() > + .filter_map(Value::as_object) > + .filter(|store| store.get("backing-device").map_or(false, |d| d.eq(&uuid))) > + .collect() > + }); then this could use regular methods ;) > + > + if matching_stores.len() != 1 { > + return Ok(Value::Null); > + } nit: see below.. > + > + let store_name = matching_stores > + .get(0) > + .and_then(|s| s.get("name").and_then(Value::as_str)); > + if let Some(store_name) = store_name { > + let info = &api2::admin::datastore::API_METHOD_MOUNT; > + let mount_param = serde_json::json!({ > + "store": store_name, > + }); > + let result = match info.handler { > + ApiHandler::Sync(handler) => (handler)(mount_param, info, rpcenv)?, > + _ => unreachable!(), > + }; > + crate::wait_for_local_worker(result.as_str().unwrap()).await?; this could use the mount wrapper we already have here in manager.. > + return Ok(Value::Null); > + } > + bail!("'{uuid}' is not associated with any datastore") nit: this bail is dead code? I'd just restructure it, easier to read.. // only auto-mount if 1:1 mapping between datastore and device if len == 1 { mount it } return Ok(Value::Null)) > +} > + > pub fn datastore_commands() -> CommandLineInterface { > let cmd_def = CliCommandMap::new() > .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) > @@ -240,6 +296,10 @@ pub fn datastore_commands() -> CommandLineInterface { > pbs_config::datastore::complete_calendar_event, > ), > ) > + .insert( > + "uuid-mount", > + CliCommand::new(&API_METHOD_UUID_MOUNT).arg_param(&["uuid"]), > + ) > .insert( > "remove", > CliCommand::new(&API_METHOD_DELETE_DATASTORE) > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Thu Nov 21 15:39:35 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 15:39:35 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 12/26] datastore: handle deletion of removable datastore properly In-Reply-To: <20241113150102.164820-13-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-13-h.laimer@proxmox.com> Message-ID: <1732199707.qs3vqm2wwz.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > Data deletion is only possible if the datastore is mounted, won't attempt > mounting it for the purpose of deleting data is made. this commit message is missing some word (or has a few too many?) > > Signed-off-by: Hannes Laimer > --- > pbs-datastore/src/datastore.rs | 4 +++- > src/api2/config/datastore.rs | 37 +++++++++++++++++++++++++++++++++- > 2 files changed, 39 insertions(+), 2 deletions(-) > > diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs > index cadf9245..83e4dcb0 100644 > --- a/pbs-datastore/src/datastore.rs > +++ b/pbs-datastore/src/datastore.rs > @@ -1525,7 +1525,9 @@ impl DataStore { > // weird, but ok > } > Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => { > - warn!("Cannot delete datastore directory (is it a mount point?).") > + if datastore_config.backing_device.is_none() { > + warn!("Cannot delete datastore directory (is it a mount point?).") > + } > } > Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => { > warn!("Datastore directory not empty, not deleting.") > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 9140a7a4..60bff9e2 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -1,4 +1,4 @@ > -use std::path::PathBuf; > +use std::path::{Path, PathBuf}; > > use ::serde::{Deserialize, Serialize}; > use anyhow::{bail, Error}; > @@ -29,6 +29,7 @@ use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_bac > use crate::api2::config::verify::delete_verification_job; > use pbs_config::CachedUserInfo; > > +use pbs_datastore::is_datastore_mounted_at; > use proxmox_rest_server::WorkerTask; > > use crate::server::jobstate; > @@ -557,6 +558,21 @@ pub async fn delete_datastore( > http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name); > } > > + let store_config: DataStoreConfig = config.lookup("datastore", &name)?; > + let mount_status = store_config > + .get_mount_point() > + .zip(store_config.backing_device.as_ref()) > + .map(|(mount_point, device_uuid)| { > + is_datastore_mounted_at(mount_point, device_uuid.to_string()) > + }); another instance of this ;) > + > + if destroy_data && mount_status == Some(false) { > + http_bail!( > + BAD_REQUEST, > + "cannot destroy data on '{name}' unless the datastore is mounted" > + ); > + } > + > if !keep_job_configs { > for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { > delete_verification_job(job.config.id, None, rpcenv)? > @@ -583,6 +599,19 @@ pub async fn delete_datastore( > > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > + let name_copy = name.clone(); nit: why/leftover? > + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) > + { > + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); > + let _ = proxmox_daemon::command_socket::send_raw( > + sock, > + &format!( > + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", > + name_copy > + ), > + ) > + .await; > + }; > > let upid = WorkerTask::new_thread( > "delete-datastore", > @@ -595,6 +624,12 @@ pub async fn delete_datastore( > // ignore errors > let _ = jobstate::remove_state_file("prune", &name); > let _ = jobstate::remove_state_file("garbage_collection", &name); > + if destroy_data { > + if let Some(mount_point) = store_config.get_mount_point() { > + let _ = unmount_by_mountpoint(Path::new(&mount_point)); > + let _ = std::fs::remove_dir(&mount_point); errors here should be logged I think? ignoring them is okay (IMHO the same applies above for the state files..) > + } > + } > > if let Err(err) = > proxmox_async::runtime::block_on(crate::server::notify_datastore_removed()) > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From h.laimer at proxmox.com Thu Nov 21 15:41:23 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Thu, 21 Nov 2024 15:41:23 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 09/26] pbs-api-types: add mount_status field to DataStoreListItem In-Reply-To: <1732199058.a4aqwwxo18.astroid@yuna.none> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-10-h.laimer@proxmox.com> <1732199058.a4aqwwxo18.astroid@yuna.none> Message-ID: On Thu Nov 21, 2024 at 3:27 PM CET, Fabian Gr?nbichler wrote: > On November 13, 2024 4:00 pm, Hannes Laimer wrote: >> Only removable datastores have a mount status, so normal ones will have >> `None`, and for removable ones it is either mounted (`Some(true)`) or >> not mounted (`Some(false)`). >> >> Signed-off-by: Hannes Laimer >> --- >> changes since v12: >> * replace is_availabl+removable field combo, with single mount_status >> field >> >> pbs-api-types/src/datastore.rs | 9 ++++++++- >> src/api2/admin/datastore.rs | 22 ++++++++++++++-------- >> src/api2/status/mod.rs | 29 +++++++++++++++++++++++++---- >> 3 files changed, 47 insertions(+), 13 deletions(-) >> >> diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs >> index 888f5d5b..e111d692 100644 >> --- a/pbs-api-types/src/datastore.rs >> +++ b/pbs-api-types/src/datastore.rs >> @@ -454,6 +454,9 @@ impl DataStoreConfig { >> pub struct DataStoreListItem { >> pub store: String, >> pub comment: Option, >> + /// Is datastore mounted, None for not-removable datastores >> + #[serde(skip_serializing_if = "Option::is_none")] >> + pub mount_status: Option, > > Option is okay for internal usage, but in an api type, wouldn't a > proper enum be nicer? > > NonRemovable, Mounted, NotMounted > I had, but `NonRemovable` kind of bothered me since it is not really a mount status. But what you are saying makes sense. >> /// If the datastore is in maintenance mode, information about it >> #[serde(skip_serializing_if = "Option::is_none")] >> pub maintenance: Option, >> @@ -1453,6 +1456,9 @@ pub struct DataStoreStatusListItem { >> /// The available bytes of the underlying storage. (-1 on error) >> #[serde(skip_serializing_if = "Option::is_none")] >> pub avail: Option, >> + /// The datastore is mounted, None for not-removable datastores >> + #[serde(skip_serializing_if = "Option::is_none")] >> + pub mount_status: Option, > > Option is okay for internal usage, but in an api type, wouldn't a > proper enum be nicer? also would allow differentiating datastore types > more easily in client code (if just for display purposes) > > NonRemovable, Mounted, NotMounted > >> /// A list of usages of the past (last Month). >> #[serde(skip_serializing_if = "Option::is_none")] >> pub history: Option>>, >> @@ -1477,12 +1483,13 @@ pub struct DataStoreStatusListItem { >> } >> >> impl DataStoreStatusListItem { >> - pub fn empty(store: &str, err: Option) -> Self { >> + pub fn empty(store: &str, err: Option, mount_status: Option) -> Self { >> DataStoreStatusListItem { >> store: store.to_owned(), >> total: None, >> used: None, >> avail: None, >> + mount_status, >> history: None, >> history_start: None, >> history_delta: None, >> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs >> index a12262e7..a9d9040f 100644 >> --- a/src/api2/admin/datastore.rs >> +++ b/src/api2/admin/datastore.rs >> @@ -1310,8 +1310,8 @@ pub fn get_datastore_list( >> >> let mut list = Vec::new(); >> >> - for (store, (_, data)) in &config.sections { >> - let acl_path = &["datastore", store]; >> + for (store, (_, data)) in config.sections { >> + let acl_path = &["datastore", &store]; >> let user_privs = user_info.lookup_privs(&auth_id, acl_path); >> let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; >> >> @@ -1322,15 +1322,21 @@ pub fn get_datastore_list( >> } >> } >> >> + let store_config: DataStoreConfig = serde_json::from_value(data)?; >> + >> + let mount_status = store_config >> + .get_mount_point() >> + .zip(store_config.backing_device.as_ref()) >> + .map(|(mount_point, device_uuid)| { >> + is_datastore_mounted_at(mount_point, device_uuid.to_string()) >> + }); > > another variant of this helper ;) > >> + >> if allowed || allow_id { >> list.push(DataStoreListItem { >> store: store.clone(), >> - comment: if !allowed { >> - None >> - } else { >> - data["comment"].as_str().map(String::from) >> - }, >> - maintenance: data["maintenance-mode"].as_str().map(String::from), >> + comment: store_config.comment.filter(|_| allowed), >> + mount_status, >> + maintenance: store_config.maintenance_mode, >> }); >> } >> } >> diff --git a/src/api2/status/mod.rs b/src/api2/status/mod.rs >> index 113aa985..508331fe 100644 >> --- a/src/api2/status/mod.rs >> +++ b/src/api2/status/mod.rs >> @@ -10,11 +10,12 @@ use proxmox_schema::api; >> use proxmox_sortable_macro::sortable; >> >> use pbs_api_types::{ >> - Authid, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, >> + Authid, DataStoreConfig, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, >> + PRIV_DATASTORE_BACKUP, >> }; >> >> use pbs_config::CachedUserInfo; >> -use pbs_datastore::DataStore; >> +use pbs_datastore::{is_datastore_mounted_at, DataStore}; >> >> use crate::server::metric_collection::rrd::extract_rrd_data; >> use crate::tools::statistics::linear_regression; >> @@ -51,10 +52,25 @@ pub async fn datastore_status( >> for (store, (_, _)) in &config.sections { >> let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); >> let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; >> + >> + let store_config = config.lookup::("datastore", store)?; >> + >> + let mount_status = store_config >> + .get_mount_point() >> + .zip(store_config.backing_device.as_ref()) >> + .map(|(mount_point, device_uuid)| { >> + is_datastore_mounted_at(mount_point, device_uuid.to_string()) >> + }); >> + >> + if let Some(false) = mount_status { >> + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); >> + continue; >> + } >> + >> if !allowed { >> if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) { >> if can_access_any_namespace(datastore, &auth_id, &user_info) { >> - list.push(DataStoreStatusListItem::empty(store, None)); >> + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); >> } >> } >> continue; >> @@ -63,7 +79,11 @@ pub async fn datastore_status( >> let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) { >> Ok(datastore) => datastore, >> Err(err) => { >> - list.push(DataStoreStatusListItem::empty(store, Some(err.to_string()))); >> + list.push(DataStoreStatusListItem::empty( >> + store, >> + Some(err.to_string()), >> + mount_status, >> + )); >> continue; >> } >> }; >> @@ -74,6 +94,7 @@ pub async fn datastore_status( >> total: Some(status.total), >> used: Some(status.used), >> avail: Some(status.available), >> + mount_status, >> history: None, >> history_start: None, >> history_delta: None, >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> >> > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From s.ivanov at proxmox.com Thu Nov 21 15:47:52 2024 From: s.ivanov at proxmox.com (Stoiko Ivanov) Date: Thu, 21 Nov 2024 15:47:52 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] docs: fix wrong product name in certificate docs Message-ID: <20241121144752.449846-1-s.ivanov@proxmox.com> this got reported via e-mail - seems this one occurrence was forgotten. grepped through the docs (and the whole repo) for 'Mail' and 'Gateway', and it seems this was the only one. Fixes: cbd7db1d ("docs: certificates") Signed-off-by: Stoiko Ivanov --- docs/certificate-management.rst | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/docs/certificate-management.rst b/docs/certificate-management.rst index 1c884630..bc3c28b4 100644 --- a/docs/certificate-management.rst +++ b/docs/certificate-management.rst @@ -44,10 +44,8 @@ web-interface/API or using the ``proxmox-backup-manager`` CLI tool. Upload Custom Certificate ~~~~~~~~~~~~~~~~~~~~~~~~~ -If you already have a certificate which you want to use for a Proxmox -Mail Gateway host, you can simply upload that certificate over the web -interface. - +If you already have a certificate which you want to use for a `Proxmox Backup`_ +host, you can simply upload that certificate over the web interface. .. image:: images/screenshots/pbs-gui-certs-upload-custom.png :target: _images/pbs-gui-certs-upload-custom.png -- 2.39.5 From f.gruenbichler at proxmox.com Thu Nov 21 15:51:18 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 15:51:18 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 22/26] api: node: allow creation of removable datastore through directory endpoint In-Reply-To: <20241113150102.164820-23-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-23-h.laimer@proxmox.com> Message-ID: <1732200015.edfu1kxtjr.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > src/api2/node/disks/directory.rs | 59 +++++++++++++++++++++++++++++--- > 1 file changed, 54 insertions(+), 5 deletions(-) > > diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs > index 7f540220..7e020e27 100644 > --- a/src/api2/node/disks/directory.rs > +++ b/src/api2/node/disks/directory.rs > @@ -123,6 +123,11 @@ pub fn list_datastore_mounts() -> Result, Error> { > description: "Configure a datastore using the directory.", > type: bool, > optional: true, > + default: false, > + }, > + "removable-datastore": { > + description: "The added datastore is removable.", > + type: bool, > }, > filesystem: { > type: FileSystemType, > @@ -141,7 +146,8 @@ pub fn list_datastore_mounts() -> Result, Error> { > pub fn create_datastore_disk( > name: String, > disk: String, > - add_datastore: Option, > + add_datastore: bool, > + removable_datastore: bool, > filesystem: Option, > rpcenv: &mut dyn RpcEnvironment, > ) -> Result { > @@ -155,8 +161,51 @@ pub fn create_datastore_disk( > bail!("disk '{}' is already in use.", disk); > } > > - let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); > + if add_datastore && removable_datastore { > + let upid_str = WorkerTask::new_thread( > + "dircreate", > + Some(name.clone()), > + auth_id, > + to_stdout, > + move |_worker| { > + info!("create removable datastore '{name}' on disk {disk}"); > + > + let filesystem = filesystem.unwrap_or(FileSystemType::Ext4); > + > + let manager = DiskManage::new(); > + > + let disk = manager.disk_by_name(&disk)?; > + > + let partition = create_single_linux_partition(&disk)?; > + create_file_system(&partition, filesystem)?; > + > + let uuid = get_fs_uuid(&partition)?; > + > + let lock = pbs_config::datastore::lock_config()?; > + let datastore: DataStoreConfig = serde_json::from_value( > + json!({ "name": name, "path": name, "backing-device": uuid }), > + )?; > + > + let (config, _digest) = pbs_config::datastore::config()?; > > + if config.sections.get(&datastore.name).is_some() { > + bail!("datastore '{}' already exists.", datastore.name); > + } > + > + // we don't have to check if the UUID is already in use since we just created the > + // fs ourself > + > + crate::api2::config::datastore::do_create_datastore( > + lock, config, datastore, false, this is also the case for the regular non-removable datastores here, but it also means that one bug in create_datastore was missed, and some checks are missing (some can never fail, but nested checks would make sense for the non-removable existing code below as well, so maybe they should be moved out into a helper that can be re-used for that?) > + )?; > + > + Ok(()) > + }, this is very similar to the code below (note shown here in the patch) for non-removable datastores, and could easily be switched around.. > + )?; > + return Ok(upid_str); > + }; > + > + let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); if this part here is skipped for removable datastores, then a single worker thread implementation with some conditional parts can be used.. > // check if the default path exists already. > // bail if it is not empty or another filesystem mounted on top > let default_path = std::path::PathBuf::from(&mount_point); > @@ -183,7 +232,6 @@ pub fn create_datastore_disk( > move |_worker| { > info!("create datastore '{name}' on disk {disk}"); > > - let add_datastore = add_datastore.unwrap_or(false); > let filesystem = filesystem.unwrap_or(FileSystemType::Ext4); > > let manager = DiskManage::new(); > @@ -248,8 +296,9 @@ pub fn delete_datastore_disk(name: String) -> Result<(), Error> { > // path of datastore cannot be changed > let (config, _) = pbs_config::datastore::config()?; > let datastores: Vec = config.convert_to_typed_array("datastore")?; > - let conflicting_datastore: Option = > - datastores.into_iter().find(|ds| ds.absolute_path() == path); > + let conflicting_datastore: Option = datastores.into_iter().find(|ds| { > + ds.absolute_path() == path || ds.get_mount_point().map_or(false, |mp| mp == path) isn't this redundant? for removable datastores, absolute_path will match path as well.. > + }); > > if let Some(conflicting_datastore) = conflicting_datastore { > bail!( > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Thu Nov 21 15:54:22 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 15:54:22 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 23/26] api: node: include removable datastores in directory list In-Reply-To: <20241113150102.164820-24-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> <20241113150102.164820-24-h.laimer@proxmox.com> Message-ID: <1732200691.cybyurdruo.astroid@yuna.none> On November 13, 2024 4:00 pm, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > src/api2/node/disks/directory.rs | 29 ++++++++++++++++++++++++++++- > 1 file changed, 28 insertions(+), 1 deletion(-) > > diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs > index 7e020e27..21d2bcc4 100644 > --- a/src/api2/node/disks/directory.rs > +++ b/src/api2/node/disks/directory.rs > @@ -45,6 +45,8 @@ pub struct DatastoreMountInfo { > pub path: String, > /// The mounted device. > pub device: String, > + /// This is removable > + pub removable: bool, > /// File system type > pub filesystem: Option, > /// Mount options > @@ -61,7 +63,7 @@ pub struct DatastoreMountInfo { > } > }, > returns: { > - description: "List of systemd datastore mount units.", > + description: "List of removable-datastore devices and systemd datastore mount units.", > type: Array, > items: { > type: DatastoreMountInfo, > @@ -100,6 +102,31 @@ pub fn list_datastore_mounts() -> Result, Error> { > path: data.Where, > filesystem: data.Type, > options: data.Options, > + removable: false, > + }); > + } > + > + let (config, _digest) = pbs_config::datastore::config()?; > + let store_list: Vec = config.convert_to_typed_array("datastore")?; > + > + for item in store_list > + .into_iter() > + .filter(|store| store.backing_device.is_some()) > + { > + let Some(backing_device) = item.backing_device.as_deref() else { > + continue; > + }; > + let Some(mount_point) = item.get_mount_point() else { > + continue; > + }; this is redundant, get_mount_point will only return Some if there's a backing_device.. in fact, I think we can remove the get_mount_point fn entirely, and always check for backing_device (absolute_path will then return the mountpoint..) there's only a few places (beside this) where we only look at get_mount_point, and those can easily be adapted to make backing_device() *the* single way to check if a datastore is a removable one, which makes reasoning a lot easier.. > + list.push(DatastoreMountInfo { > + unitfile: "datastore config".to_string(), > + name: item.name.clone(), > + device: format!("/dev/disk/by-uuid/{backing_device}"), > + path: mount_point, > + filesystem: None, > + options: None, > + removable: true, > }); > } > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Thu Nov 21 16:13:14 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Thu, 21 Nov 2024 16:13:14 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v13 00/26] add removable datastores In-Reply-To: <20241113150102.164820-1-h.laimer@proxmox.com> References: <20241113150102.164820-1-h.laimer@proxmox.com> Message-ID: <1732201067.dkrajab3k1.astroid@yuna.none> needs a slight rebase, lots of smallish comments on individual patches. there's a few typos in comments and commit messages, the latter should be mostly caught by a spell checker when resending.. things that might be nice to address before shipping this in a (public) package/resending a rebased version: - the mount_status return value in the api return value would be nicer as an enum, instead of an optional bool.. - the internal command for auto-mounting might be nicer as a standalone executable without CLI handler/.. (but that could be done later as well, given that it is marked as internal already - it might also increase build times, which might be an argument against it..) - creating a second removable datastore on a device fails, since the create_datastore API endpoint has a wrong check (see comments on corresponding patch) - pre-existing: while creating a datastore, we hold a lock on the config which can cause all sorts of operations to run into a (lock) timeout, the lock scope could maybe be reduced (for removable datastores, creating the chunk store might take a long time..) - unmounting a datastore over the UI spams the log with: 6:08:17 yuna proxmox-backup-proxy[812419]: GET /api2/json/admin/datastore/removable2/status: 400 Bad Request: [client [::ffff:192.168.16.37]:47734] Removable Datastore is not mounted - unmounting over the UI, then pulling my USB drive logged the following: Nov 21 16:09:04 yuna kernel: EXT4-fs (sda1): shut down requested (2) Nov 21 16:09:04 yuna kernel: Aborting journal on device sda1-8. Nov 21 16:09:04 yuna kernel: device offline error, dev sda, sector 29624320 op 0x1:(WRITE) flags 0x9800 phys_seg 1 prio class 0 Nov 21 16:09:04 yuna kernel: Buffer I/O error on dev sda1, logical block 3702784, lost sync page write Nov 21 16:09:04 yuna kernel: JBD2: I/O error when updating journal superblock for sda1-8. doesn't sound good? figured that out later, what I did was: - create "directory" removable datastore over UI - create second removable datastore on same device (patching out the wrong patch mentioned above) - remove first datastore over UI (it stayed mounted, but was removed from config and UI!) the failure to unmount when removing is reproducible for me the rest is mostly code style/hygiene related, and can be done as follow-ups if needed. I didn't look at the UI patches, just did a cursory test drive of the resulting UI! On November 13, 2024 4:00 pm, Hannes Laimer wrote: > These patches add support for removable datastores. All removable > datastores have a backing-device(a UUID) associated with them. > Removable datastores work like normal ones, just that they can be > unplugged. It is possible to create a removable datastore, sync > backups onto it, unplug it and use it on a different PBS. > > The datastore path specified is relative to the root of the used > device. Removable datastores are bind mounted to > /mnt/datastore/. Multiple datastores can be created on a single > device, but only device with a single datastore on them will be > auto-mounted. > > When a removable datastore is deleted and 'destroy-data' is set, the > device has to be mounted. If 'destroy-data' is not set the datastore > can be deleted even if the device is not present. Removable datastores > are automatically mounted when plugged in. > > v13: thanks @Fabian * allow multiple datastore on devices * replace > `is_datastore_available` by a more specific function, it is now > removable datastore specific and won't be called for normal ones * > replace removable/is_available in status structs with mount_state, > which is `None` for normal datastore as it makes it less ambiguous > what is meant * remove notion of 'available' from normal datastores > and replace it with mounted/mount_status for removable ones, as it > never really made sense in the first place * abort of an unmount task > will now reset the maintanance mode * add check for race when setting > maintenance at end of unmounting task * improve documentation and > commit messages * remove not needed tokio::spawn * only auto mount > devices with single datastore on them * drop ptach that added flag > for excluding used partitions * make auto mount service not dynamic * > add debug command to scan devices for datastores they may contain * > rebase onto master > > v12: thanks @Wolfgang * use bind mounts, so now /path/to/ds is > mounted to /mnt/datastore/ this is a bit cleaner and allows for > multiple datastores on a single device to be mounted individually, if > we want to allow that in the future * small code improvements > > > v11: * rebase onto master > > v10: thanks @Gabriel and @Wolfgang * make is_datastore_available more > robust * fix a lot of wording * drop format on uuid_mount command for > UUID * only gather_disk_stats if datastore is available * overall code > improvements * ui: include model in partition selector * rebased onto > master > > v9: * change mount point to `/mnt/datastore/` * update > "Directory" list UI * add `absolute_path()` from Dietmar's RFC * > update docs > > v8: * still depends on [1] * paths for removable datastores are now > relative to `/mnt/removable_datastore/` * add support for > creation of removable datastore through the "create directory" > endpoint (last 3 patches) * update datastore creation UI * update docs > > v7: * depends on [1] * improve logging when waiting for tasks * drop > `update-datatore-cache` refactoring > * fix some commit messages > > [1] https://lists.proxmox.com/pipermail/pbs-devel/2024-April/008739.html > > v6: > * remove 'drop' flag in datastore cache > * use maintenance-mode 'unmount' for unmounting process, only for the > unmounting not for being unmounted > * rename/simplify update-datastore-cache command > * ui: integrate new unmounting maintenance mode > * basically a mix of v3 and v4 > > v5: thanks @Dietmar and @Christian > * drop --force for unmount since it'll always fail if tasks are still running, and if > there are not normal unount will work > * improve several commit messages > * improve error message wording > * add removable datastore section to docs > * add documentation for is_datastore_available > > v4: thanks a lot @Dietmar and @Christian > * make check if mounted wayyy faster > * don't keep track of mounting state > * drop Unplugged maintenance mode > * use UUID_FORMAT for uuid field > * a lot of small things, like use of bail!, inline format!, ... > * include improvement to cache handling > > v3: > * remove lazy unmounting (since 9cba51ac782d04085c0af55128f32178e5132358 is applied) > * fix CLI (un)mount command, thanks @Gabriel > * add removable datastore CLI autocomplete helper > * rebase onto master > * move ui patches to the end > > thanks @Lukas and @Thomas for the feedback > v2: > * fix datastore 'add' button in the UI > * some format!("{}", a) -> format!("{a}") > * replace `const` with `let` in js code > * change icon `fa-usb` -> `fa-plug` > * add some docs > * add JDoc for parseMaintenanceMode > * proxmox-schema dep bump > > Dietmar Maurer (2): > config: factor out method to get the absolute datastore path > maintenance: add 'Unmount' maintenance type > > Hannes Laimer (24): > tools: add disks utility functions > pbs-api-types: add backing-device to DataStoreConfig > disks: add UUID to partition info > datastore: add helper for checking if a datastore is mounted > api: admin: add (un)mount endpoint for removable datastores > api: removable datastore creation > pbs-api-types: add mount_status field to DataStoreListItem > bin: manager: add (un)mount command > add auto-mounting for removable datastores > datastore: handle deletion of removable datastore properly > docs: add removable datastores section > ui: add partition selector form > ui: add removable datastore creation support > ui: add (un)mount button to summary > ui: tree: render unmounted datastores correctly > ui: utils: make parseMaintenanceMode more robust > ui: add datastore status mask for unmounted removable datastores > ui: maintenance: fix disable msg field if no type is selected > ui: render 'unmount' maintenance mode correctly > api: node: allow creation of removable datastore through directory > endpoint > api: node: include removable datastores in directory list > node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR > ui: support create removable datastore through directory creation > bin: debug: add inspect device command > > debian/proxmox-backup-server.install | 1 + > debian/proxmox-backup-server.udev | 3 + > docs/storage.rst | 38 +++ > etc/Makefile | 3 +- > etc/removable-device-attach at .service | 8 + > pbs-api-types/src/datastore.rs | 46 +++- > pbs-api-types/src/maintenance.rs | 7 +- > pbs-config/src/datastore.rs | 14 + > pbs-datastore/src/datastore.rs | 88 +++++- > pbs-datastore/src/lib.rs | 2 +- > src/api2/admin/datastore.rs | 289 ++++++++++++++++++-- > src/api2/config/datastore.rs | 87 +++++- > src/api2/node/disks/directory.rs | 104 ++++++- > src/api2/status/mod.rs | 29 +- > src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++ > src/bin/proxmox_backup_manager/datastore.rs | 136 ++++++++- > src/server/metric_collection/mod.rs | 18 +- > src/tools/disks/mod.rs | 39 ++- > www/DirectoryList.js | 13 + > www/Makefile | 1 + > www/NavigationTree.js | 17 +- > www/Utils.js | 33 ++- > www/css/ext6-pbs.css | 20 ++ > www/datastore/DataStoreListSummary.js | 1 + > www/datastore/Summary.js | 113 +++++++- > www/form/PartitionSelector.js | 81 ++++++ > www/window/CreateDirectory.js | 14 + > www/window/DataStoreEdit.js | 37 +++ > www/window/MaintenanceOptions.js | 17 +- > 29 files changed, 1328 insertions(+), 80 deletions(-) > create mode 100644 etc/removable-device-attach at .service > create mode 100644 www/form/PartitionSelector.js > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Thu Nov 21 16:43:36 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 16:43:36 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/4] server: push: add error context to all target api calls In-Reply-To: <20241121154337.471425-1-c.ebner@proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: <20241121154337.471425-4-c.ebner@proxmox.com> Make it clear from the context that these error messages stem from the response of an api call rather than a local error. Signed-off-by: Christian Ebner --- src/server/push.rs | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/src/server/push.rs b/src/server/push.rs index 86cef5520..fe2e11220 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -219,7 +219,9 @@ async fn remove_target_namespace( if params.target.supports_prune_delete_stats { let data = result["data"].take(); serde_json::from_value(data).map_err(|err| { - format_err!("removing target namespace {target_namespace} failed - {err}") + format_err!( + "Failed to remove remote namespace {target_namespace}, remote returned: {err}" + ) }) } else { Ok(BackupGroupDeleteStats::default()) @@ -236,7 +238,8 @@ async fn fetch_target_groups( let args = Some(serde_json::json!({ "ns": target_namespace.name() })); let mut result = params.target.client.get(&api_path, args).await?; - let groups: Vec = serde_json::from_value(result["data"].take())?; + let groups: Vec = serde_json::from_value(result["data"].take()) + .map_err(|err| format_err!("Failed to fetch remote groups, remote returned: {err}"))?; let (mut owned, not_owned) = groups.into_iter().fold( (Vec::new(), HashSet::new()), @@ -277,8 +280,9 @@ async fn remove_target_group( if params.target.supports_prune_delete_stats { let data = result["data"].take(); - serde_json::from_value(data) - .map_err(|err| format_err!("removing target group {backup_group} failed - {err}")) + serde_json::from_value(data).map_err(|err| { + format_err!("Failed to remove remote group {backup_group}, remote returned: {err}") + }) } else { Ok(BackupGroupDeleteStats::default()) } @@ -313,7 +317,7 @@ async fn check_or_create_target_namespace( match params.target.client.post(&api_path, Some(args)).await { Ok(_) => info!("Successfully created new namespace {current} on remote"), Err(err) => { - bail!("Remote creation of namespace {current} failed, remote returned: {err}") + bail!("Creation of remote namespace {current} failed, remote returned: {err}") } } existing_target_namespaces.push(current.clone()); @@ -585,7 +589,8 @@ async fn fetch_target_snapshots( args["ns"] = serde_json::to_value(target_namespace)?; } let mut result = params.target.client.get(&api_path, Some(args)).await?; - let snapshots: Vec = serde_json::from_value(result["data"].take())?; + let snapshots: Vec = serde_json::from_value(result["data"].take()) + .map_err(|err| format_err!("Failed to fetch remote snapshots, remote returned: {err}"))?; Ok(snapshots) } @@ -603,7 +608,12 @@ async fn forget_target_snapshot( if !target_namespace.is_root() { args["ns"] = serde_json::to_value(target_namespace)?; } - params.target.client.delete(&api_path, Some(args)).await?; + params + .target + .client + .delete(&api_path, Some(args)) + .await + .map_err(|err| format_err!("Failed to remove remote snapshot, remote returned: {err}"))?; Ok(()) } -- 2.39.5 From c.ebner at proxmox.com Thu Nov 21 16:43:35 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 16:43:35 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/4] server: push: consistently use remote over target for error messages In-Reply-To: <20241121154337.471425-1-c.ebner@proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: <20241121154337.471425-3-c.ebner@proxmox.com> Mixing of terms only makes the errors harder to understand. In order to make error messages more intuitive, always refer to the sync push target as remote, mention the remote explicitly and/or improve messages where needed. Signed-off-by: Christian Ebner --- src/server/push.rs | 35 +++++++++++++++++++++-------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/src/server/push.rs b/src/server/push.rs index 2181634c6..86cef5520 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -197,7 +197,7 @@ async fn remove_target_namespace( target_namespace: &BackupNamespace, ) -> Result { if target_namespace.is_root() { - bail!("cannot remove root namespace from target"); + bail!("Cannot remove root namespace from remote"); } check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) @@ -295,7 +295,7 @@ async fn check_or_create_target_namespace( // Sub-namespaces have to be created by creating parent components first. check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) - .map_err(|err| format_err!("Creating namespace not allowed - {err}"))?; + .map_err(|err| format_err!("Creating remote namespace not allowed - {err}"))?; let mut parent = BackupNamespace::root(); for component in target_namespace.components() { @@ -311,7 +311,7 @@ async fn check_or_create_target_namespace( args["parent"] = serde_json::to_value(parent.clone())?; } match params.target.client.post(&api_path, Some(args)).await { - Ok(_) => info!("Created new namespace on target: {current}"), + Ok(_) => info!("Successfully created new namespace {current} on remote"), Err(err) => { bail!("Remote creation of namespace {current} failed, remote returned: {err}") } @@ -445,18 +445,19 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result 0 { warn!( - "kept {protected_count} protected snapshots of namespace '{target_namespace}'", + "Kept {protected_count} protected snapshots of remote namespace {target_namespace}", protected_count = delete_stats.protected_snapshots(), ); continue; } } Err(err) => { - warn!("failed to remove vanished namespace {target_namespace} - {err}"); + warn!("Encountered errors: {err}"); + warn!("Failed to remove vanished namespace {target_namespace} from remote!"); continue; } } - info!("removed vanished namespace {target_namespace}"); + info!("Successfully removed vanished namespace {target_namespace} from remote"); } if !params.target.supports_prune_delete_stats { @@ -481,7 +482,7 @@ pub(crate) async fn push_namespace( let target_namespace = params.map_to_target(namespace)?; // Check if user is allowed to perform backups on remote datastore check_ns_remote_datastore_privs(params, &target_namespace, PRIV_REMOTE_DATASTORE_BACKUP) - .map_err(|err| format_err!("Pushing to remote not allowed - {err}"))?; + .map_err(|err| format_err!("Pushing to remote namespace not allowed - {err}"))?; let mut list: Vec = params .source @@ -527,7 +528,8 @@ pub(crate) async fn push_namespace( match push_group(params, namespace, &group, &mut progress).await { Ok(sync_stats) => stats.add(sync_stats), Err(err) => { - warn!("sync group '{group}' failed - {err}"); + warn!("Encountered errors: {err}"); + warn!("Failed to push group {group} to remote!"); errors = true; } } @@ -543,13 +545,13 @@ pub(crate) async fn push_namespace( continue; } - info!("delete vanished group '{target_group}'"); + info!("Removed vanished group {target_group} from remote"); match remove_target_group(params, &target_namespace, &target_group).await { Ok(delete_stats) => { if delete_stats.protected_snapshots() > 0 { warn!( - "kept {protected_count} protected snapshots of group '{target_group}'", + "Kept {protected_count} protected snapshots of group {target_group} on remote", protected_count = delete_stats.protected_snapshots(), ); } @@ -560,7 +562,8 @@ pub(crate) async fn push_namespace( })); } Err(err) => { - warn!("failed to delete vanished group - {err}"); + warn!("Encountered errors: {err}"); + warn!("Failed to remove vanished group {target_group} from remote!"); errors = true; continue; } @@ -693,7 +696,7 @@ pub(crate) async fn push_group( } if snapshot.protected { info!( - "don't delete vanished snapshot {name} (protected)", + "Kept protected snapshot {name} on remote", name = snapshot.backup ); continue; @@ -701,12 +704,16 @@ pub(crate) async fn push_group( if let Err(err) = forget_target_snapshot(params, &target_namespace, &snapshot.backup).await { + info!("Encountered errors: {err}"); info!( - "could not delete vanished snapshot {name} - {err}", + "Failed to remove vanished snapshot {name} from remote!", name = snapshot.backup ); } - info!("delete vanished snapshot {name}", name = snapshot.backup); + info!( + "Removed vanished snapshot {name} from remote", + name = snapshot.backup + ); stats.add(SyncStats::from(RemovedVanishedStats { snapshots: 1, groups: 0, -- 2.39.5 From c.ebner at proxmox.com Thu Nov 21 16:43:37 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 16:43:37 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 4/4] server: push: various smaller improvements to error messages In-Reply-To: <20241121154337.471425-1-c.ebner@proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: <20241121154337.471425-5-c.ebner@proxmox.com> Various smaller adaptions such as capitalization of the start of sentences, expansion of abbreviations and shortening of to long error messages. To improve consistency with the rest of the error messages for the sync job in push direction. Signed-off-by: Christian Ebner --- src/server/push.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/server/push.rs b/src/server/push.rs index fe2e11220..7a4e21f37 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -129,7 +129,7 @@ impl PushParameters { // push assumes namespace support on the remote side, fail early if missing if api_version.major < 2 || (api_version.major == 2 && api_version.minor < 2) { - bail!("unsupported remote api version, minimum v2.2 required"); + bail!("Unsupported remote api version, minimum v2.2 required"); } let supports_prune_delete_stats = api_version.major > 3 @@ -408,7 +408,8 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result { errors = true; - info!("Encountered errors while syncing namespace {source_namespace} - {err}"); + info!("Encountered errors: {err}"); + info!("Failed to sync {source_store_and_ns} into {target_store_and_ns}!"); } } } @@ -465,12 +466,12 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result manifest, Err(err) => { // No manifest in snapshot or failed to read, warn and skip - log::warn!("failed to load manifest - {err}"); + log::warn!("Encountered errors: {err}"); + log::warn!("Failed to load manifest for '{snapshot}'!"); return Ok(stats); } }; -- 2.39.5 From c.ebner at proxmox.com Thu Nov 21 16:43:33 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 16:43:33 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] improve push sync job log messages Message-ID: <20241121154337.471425-1-c.ebner@proxmox.com> This is a small series of patches with the intend to improve the log messages for the sync job in push direction, mainly adding context to error messages from the remote when the error stems from an api call, limiting line length and improving usage of consistent terms for better readablility and easier understanding. Christian Ebner (4): server: push: fix needless borrow clippy warning server: push: consistently use remote over target for error messages server: push: add error context to all target api calls server: push: various smaller improvements to error messages src/server/push.rs | 79 ++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 30 deletions(-) -- 2.39.5 From c.ebner at proxmox.com Thu Nov 21 16:43:34 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 16:43:34 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/4] server: push: fix needless borrow clippy warning In-Reply-To: <20241121154337.471425-1-c.ebner@proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: <20241121154337.471425-2-c.ebner@proxmox.com> Signed-off-by: Christian Ebner --- src/server/push.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/server/push.rs b/src/server/push.rs index 4a222627b..2181634c6 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -422,7 +422,7 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result References: <20241121144752.449846-1-s.ivanov@proxmox.com> Message-ID: <02801dba-446c-4718-bd5f-06eb068262cf@proxmox.com> Am 21.11.24 um 15:47 schrieb Stoiko Ivanov: > this got reported via e-mail - seems this one occurrence was > forgotten. grepped through the docs (and the whole repo) for 'Mail' > and 'Gateway', and it seems this was the only one. > > Fixes: cbd7db1d ("docs: certificates") > Signed-off-by: Stoiko Ivanov > --- > docs/certificate-management.rst | 6 ++---- > 1 file changed, 2 insertions(+), 4 deletions(-) > > applied, thanks! From t.lamprecht at proxmox.com Thu Nov 21 17:01:05 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 21 Nov 2024 17:01:05 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <65ce8683-8e27-4d4e-a2f3-9d05960f2e72@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> <65ce8683-8e27-4d4e-a2f3-9d05960f2e72@proxmox.com> Message-ID: <6bf543a9-1d7c-412e-9862-40e42ddf005e@proxmox.com> Am 21.11.24 um 10:58 schrieb Christian Ebner: > On 11/21/24 10:23, Thomas Lamprecht wrote: > Well, that is something I did not consider at all! So with that > viewpoint, adding this to PBS specifically is surely not the best way. > As discussed with Fabain off list, version based matching will be the > best way forward here, and dropping the incompatibility check once EOL > is reached. If we add such a thing that you proposed we should definitively get the story somewhat straight w.r.t. how we want to handle this for all projects, and define when to define a feature and when not, with some extra care on the interfaces, as those are relatively set in stone. >> >> Something tangentially related: >> >> In general, it might be also worth thinking about how the protection flag can >> be better synced ? FWICT it's now set if the source has it set and then never >> will get unset manually anymore? Remembering the source of the flag (i.e., >> sync from remote vs local api) could be an option to differentiate here when >> it's OK to clear on sync transiently again (probably guarded as option in the >> job). But here I'm a bit more distanced from the matter than you are, I'll need >> to think a bit more about this all. >> >> For now maybe order the whole API feature thing towards the end of the series >> and we can still commit all earlier patches already and decide on this a >> (short) time later. > > Not sure if I correctly interpreted you rational here. > As Fabian mentioned, the additional parameter only included in the api > calls is not to handle how we sync the flag, but rather how to act in > case the sync jobs should prune vanished snapshots/groups from the remote. Yeah, ignore that part; I forgot that we do not sync the protected flag at all, sorry for the noise. From t.lamprecht at proxmox.com Thu Nov 21 17:04:23 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Thu, 21 Nov 2024 17:04:23 +0100 Subject: [pbs-devel] applied-series: [PATCH docs 0/3] extend documentation for change detection mode In-Reply-To: <20241118092435.81880-1-c.ebner@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> Message-ID: Am 18.11.24 um 10:24 schrieb Christian Ebner: > Add sections explaining the change detection modes in more technical > details and reference to this sections in the client usage section, > which should cover more the how-to-use than the how-it-works. > > Christian Ebner (3): > docs: explain the working principle of the change detection modes > docs: reference technical change detection mode section for client > docs: client: fix formatting by using double ticks > > docs/backup-client.rst | 38 +++++-------- > docs/technical-overview.rst | 108 ++++++++++++++++++++++++++++++++++++ > 2 files changed, 123 insertions(+), 23 deletions(-) > applied series, thanks! I took the liberty of transofrming Shannon's "sounds good to me" into a Reviewed-by, holler at me if I should not do that anymore in the future. From g.goller at proxmox.com Thu Nov 21 17:06:06 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 17:06:06 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] improve push sync job log messages In-Reply-To: <20241121154337.471425-1-c.ebner@proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: I think I'd be nice if we use `.context()` instead of `format_err` and debug print ("{err:?}") instead of default ("{}") here. As these messages are all errors they shouldn't appear too often in the log and if an error happens, you get much more information. What do you think? From m.sandoval at proxmox.com Thu Nov 21 17:05:56 2024 From: m.sandoval at proxmox.com (Maximiliano Sandoval) Date: Thu, 21 Nov 2024 17:05:56 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 29/29] docs: add section for sync jobs in push direction In-Reply-To: <20241031121519.434337-30-c.ebner@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-30-c.ebner@proxmox.com> Message-ID: Christian Ebner writes: > --- a/docs/managing-remotes.rst > +++ b/docs/managing-remotes.rst > @@ -227,3 +227,39 @@ the web interface or using the ``proxmox-backup-manager`` command-line tool: > .. code-block:: console > > # proxmox-backup-manager sync-job update ID --rate-in 20MiB > + > +Sync Direction Push > +^^^^^^^^^^^^^^^^^^^ > + > +Sync jobs can be configured for pull or push direction. Sync jobs in push > +direction are not identical in behaviour because of the limited access to the This should be 'behavior'. From c.ebner at proxmox.com Thu Nov 21 17:15:05 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 17:15:05 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <6bf543a9-1d7c-412e-9862-40e42ddf005e@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> <65ce8683-8e27-4d4e-a2f3-9d05960f2e72@proxmox.com> <6bf543a9-1d7c-412e-9862-40e42ddf005e@proxmox.com> Message-ID: <9ccf5606-ff5a-4473-96b3-a9ea80ab0dcf@proxmox.com> On 11/21/24 17:01, Thomas Lamprecht wrote: > Am 21.11.24 um 10:58 schrieb Christian Ebner: >> On 11/21/24 10:23, Thomas Lamprecht wrote: >> Well, that is something I did not consider at all! So with that >> viewpoint, adding this to PBS specifically is surely not the best way. >> As discussed with Fabain off list, version based matching will be the >> best way forward here, and dropping the incompatibility check once EOL >> is reached. > > If we add such a thing that you proposed we should definitively get the > story somewhat straight w.r.t. how we want to handle this for all projects, > and define when to define a feature and when not, with some extra care on > the interfaces, as those are relatively set in stone. Regarding this, have we considered exposing the API schema to the client, something like [0]? Fetching the remote API schema and therefore knowing which parameters are available would have not only covered the additional check, but also allowed to see what response value to expect. Although, I guess this boils down to the same set of maintenance burden in the end, hard to maintain code because of to many condition checks. [0] https://swagger.io/specification/ From c.ebner at proxmox.com Thu Nov 21 17:26:22 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Thu, 21 Nov 2024 17:26:22 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] improve push sync job log messages In-Reply-To: References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: On 11/21/24 17:06, Gabriel Goller wrote: > I think I'd be nice if we use `.context()` instead of `format_err` and > debug print ("{err:?}") instead of default ("{}") here. As these > messages are all errors they shouldn't appear too often in the log and > if an error happens, you get much more information. > > What do you think? Hmm, can check that out, but wouldn't that disrupt the single line character of the log entries, prefixed by the time stamp? From s.ivanov at proxmox.com Thu Nov 21 17:45:46 2024 From: s.ivanov at proxmox.com (Stoiko Ivanov) Date: Thu, 21 Nov 2024 17:45:46 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup] docs: fix wrong product name in certificate docs In-Reply-To: <02801dba-446c-4718-bd5f-06eb068262cf@proxmox.com> References: <20241121144752.449846-1-s.ivanov@proxmox.com> <02801dba-446c-4718-bd5f-06eb068262cf@proxmox.com> Message-ID: <20241121174546.551f2ef6@rosa.proxmox.com> On Thu, 21 Nov 2024 16:59:38 +0100 Thomas Lamprecht wrote: > Am 21.11.24 um 15:47 schrieb Stoiko Ivanov: > > this got reported via e-mail - seems this one occurrence was > > forgotten. grepped through the docs (and the whole repo) for 'Mail' > > and 'Gateway', and it seems this was the only one. > > > > Fixes: cbd7db1d ("docs: certificates") got applied faster than the feedback from the reporter got in :), but at least to have it publicly acknowledged in the mailing-list archives: Reported-by: Simon Harhues > > Signed-off-by: Stoiko Ivanov > > --- > > docs/certificate-management.rst | 6 ++---- > > 1 file changed, 2 insertions(+), 4 deletions(-) > > > > > > applied, thanks! From f.gruenbichler at proxmox.com Thu Nov 21 17:49:37 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Thu, 21 Nov 2024 17:49:37 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup v13 00/26] add removable datastores In-Reply-To: <1732201067.dkrajab3k1.astroid@yuna.none> References: <20241113150102.164820-1-h.laimer@proxmox.com> <1732201067.dkrajab3k1.astroid@yuna.none> Message-ID: <33251245.8037.1732207777798@webmail.proxmox.com> one more thing: it probably makes sense to additionally require Sys.Modify on /system/disks for paths adding/(modifying?)/removing removable datastores, to mimic the permissions required for adding a directory-backed managed datastore.. > Fabian Gr?nbichler hat am 21.11.2024 16:13 CET geschrieben: > > > needs a slight rebase, lots of smallish comments on individual patches. > there's a few typos in comments and commit messages, the latter should > be mostly caught by a spell checker when resending.. > > things that might be nice to address before shipping this in a (public) > package/resending a rebased version: > > - the mount_status return value in the api return value would be nicer > as an enum, instead of an optional bool.. > - the internal command for auto-mounting might be nicer as a standalone > executable without CLI handler/.. (but that could be done later as > well, given that it is marked as internal already - it might also > increase build times, which might be an argument against it..) > - creating a second removable datastore on a device fails, since the > create_datastore API endpoint has a wrong check (see comments on > corresponding patch) > - pre-existing: while creating a datastore, we hold a lock on the config > which can cause all sorts of operations to run into a (lock) timeout, > the lock scope could maybe be reduced (for removable datastores, > creating the chunk store might take a long time..) > - unmounting a datastore over the UI spams the log with: > > 6:08:17 yuna proxmox-backup-proxy[812419]: GET /api2/json/admin/datastore/removable2/status: 400 Bad Request: [client [::ffff:192.168.16.37]:47734] Removable Datastore is not mounted > > - unmounting over the UI, then pulling my USB drive logged the > following: > > Nov 21 16:09:04 yuna kernel: EXT4-fs (sda1): shut down requested (2) > Nov 21 16:09:04 yuna kernel: Aborting journal on device sda1-8. > Nov 21 16:09:04 yuna kernel: device offline error, dev sda, sector 29624320 op 0x1:(WRITE) flags 0x9800 phys_seg 1 prio class 0 > Nov 21 16:09:04 yuna kernel: Buffer I/O error on dev sda1, logical block 3702784, lost sync page write > Nov 21 16:09:04 yuna kernel: JBD2: I/O error when updating journal superblock for sda1-8. > > doesn't sound good? figured that out later, what I did was: > > - create "directory" removable datastore over UI > - create second removable datastore on same device (patching out the > wrong patch mentioned above) > - remove first datastore over UI (it stayed mounted, but was removed > from config and UI!) > > the failure to unmount when removing is reproducible for me > > the rest is mostly code style/hygiene related, and can be done as > follow-ups if needed. > > I didn't look at the UI patches, just did a cursory test drive of the > resulting UI! > > On November 13, 2024 4:00 pm, Hannes Laimer wrote: > > These patches add support for removable datastores. All removable > > datastores have a backing-device(a UUID) associated with them. > > Removable datastores work like normal ones, just that they can be > > unplugged. It is possible to create a removable datastore, sync > > backups onto it, unplug it and use it on a different PBS. > > > > The datastore path specified is relative to the root of the used > > device. Removable datastores are bind mounted to > > /mnt/datastore/. Multiple datastores can be created on a single > > device, but only device with a single datastore on them will be > > auto-mounted. > > > > When a removable datastore is deleted and 'destroy-data' is set, the > > device has to be mounted. If 'destroy-data' is not set the datastore > > can be deleted even if the device is not present. Removable datastores > > are automatically mounted when plugged in. > > > > v13: thanks @Fabian * allow multiple datastore on devices * replace > > `is_datastore_available` by a more specific function, it is now > > removable datastore specific and won't be called for normal ones * > > replace removable/is_available in status structs with mount_state, > > which is `None` for normal datastore as it makes it less ambiguous > > what is meant * remove notion of 'available' from normal datastores > > and replace it with mounted/mount_status for removable ones, as it > > never really made sense in the first place * abort of an unmount task > > will now reset the maintanance mode * add check for race when setting > > maintenance at end of unmounting task * improve documentation and > > commit messages * remove not needed tokio::spawn * only auto mount > > devices with single datastore on them * drop ptach that added flag > > for excluding used partitions * make auto mount service not dynamic * > > add debug command to scan devices for datastores they may contain * > > rebase onto master > > > > v12: thanks @Wolfgang * use bind mounts, so now /path/to/ds is > > mounted to /mnt/datastore/ this is a bit cleaner and allows for > > multiple datastores on a single device to be mounted individually, if > > we want to allow that in the future * small code improvements > > > > > > v11: * rebase onto master > > > > v10: thanks @Gabriel and @Wolfgang * make is_datastore_available more > > robust * fix a lot of wording * drop format on uuid_mount command for > > UUID * only gather_disk_stats if datastore is available * overall code > > improvements * ui: include model in partition selector * rebased onto > > master > > > > v9: * change mount point to `/mnt/datastore/` * update > > "Directory" list UI * add `absolute_path()` from Dietmar's RFC * > > update docs > > > > v8: * still depends on [1] * paths for removable datastores are now > > relative to `/mnt/removable_datastore/` * add support for > > creation of removable datastore through the "create directory" > > endpoint (last 3 patches) * update datastore creation UI * update docs > > > > v7: * depends on [1] * improve logging when waiting for tasks * drop > > `update-datatore-cache` refactoring > > * fix some commit messages > > > > [1] https://lists.proxmox.com/pipermail/pbs-devel/2024-April/008739.html > > > > v6: > > * remove 'drop' flag in datastore cache > > * use maintenance-mode 'unmount' for unmounting process, only for the > > unmounting not for being unmounted > > * rename/simplify update-datastore-cache command > > * ui: integrate new unmounting maintenance mode > > * basically a mix of v3 and v4 > > > > v5: thanks @Dietmar and @Christian > > * drop --force for unmount since it'll always fail if tasks are still running, and if > > there are not normal unount will work > > * improve several commit messages > > * improve error message wording > > * add removable datastore section to docs > > * add documentation for is_datastore_available > > > > v4: thanks a lot @Dietmar and @Christian > > * make check if mounted wayyy faster > > * don't keep track of mounting state > > * drop Unplugged maintenance mode > > * use UUID_FORMAT for uuid field > > * a lot of small things, like use of bail!, inline format!, ... > > * include improvement to cache handling > > > > v3: > > * remove lazy unmounting (since 9cba51ac782d04085c0af55128f32178e5132358 is applied) > > * fix CLI (un)mount command, thanks @Gabriel > > * add removable datastore CLI autocomplete helper > > * rebase onto master > > * move ui patches to the end > > > > thanks @Lukas and @Thomas for the feedback > > v2: > > * fix datastore 'add' button in the UI > > * some format!("{}", a) -> format!("{a}") > > * replace `const` with `let` in js code > > * change icon `fa-usb` -> `fa-plug` > > * add some docs > > * add JDoc for parseMaintenanceMode > > * proxmox-schema dep bump > > > > Dietmar Maurer (2): > > config: factor out method to get the absolute datastore path > > maintenance: add 'Unmount' maintenance type > > > > Hannes Laimer (24): > > tools: add disks utility functions > > pbs-api-types: add backing-device to DataStoreConfig > > disks: add UUID to partition info > > datastore: add helper for checking if a datastore is mounted > > api: admin: add (un)mount endpoint for removable datastores > > api: removable datastore creation > > pbs-api-types: add mount_status field to DataStoreListItem > > bin: manager: add (un)mount command > > add auto-mounting for removable datastores > > datastore: handle deletion of removable datastore properly > > docs: add removable datastores section > > ui: add partition selector form > > ui: add removable datastore creation support > > ui: add (un)mount button to summary > > ui: tree: render unmounted datastores correctly > > ui: utils: make parseMaintenanceMode more robust > > ui: add datastore status mask for unmounted removable datastores > > ui: maintenance: fix disable msg field if no type is selected > > ui: render 'unmount' maintenance mode correctly > > api: node: allow creation of removable datastore through directory > > endpoint > > api: node: include removable datastores in directory list > > node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR > > ui: support create removable datastore through directory creation > > bin: debug: add inspect device command > > > > debian/proxmox-backup-server.install | 1 + > > debian/proxmox-backup-server.udev | 3 + > > docs/storage.rst | 38 +++ > > etc/Makefile | 3 +- > > etc/removable-device-attach at .service | 8 + > > pbs-api-types/src/datastore.rs | 46 +++- > > pbs-api-types/src/maintenance.rs | 7 +- > > pbs-config/src/datastore.rs | 14 + > > pbs-datastore/src/datastore.rs | 88 +++++- > > pbs-datastore/src/lib.rs | 2 +- > > src/api2/admin/datastore.rs | 289 ++++++++++++++++++-- > > src/api2/config/datastore.rs | 87 +++++- > > src/api2/node/disks/directory.rs | 104 ++++++- > > src/api2/status/mod.rs | 29 +- > > src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++ > > src/bin/proxmox_backup_manager/datastore.rs | 136 ++++++++- > > src/server/metric_collection/mod.rs | 18 +- > > src/tools/disks/mod.rs | 39 ++- > > www/DirectoryList.js | 13 + > > www/Makefile | 1 + > > www/NavigationTree.js | 17 +- > > www/Utils.js | 33 ++- > > www/css/ext6-pbs.css | 20 ++ > > www/datastore/DataStoreListSummary.js | 1 + > > www/datastore/Summary.js | 113 +++++++- > > www/form/PartitionSelector.js | 81 ++++++ > > www/window/CreateDirectory.js | 14 + > > www/window/DataStoreEdit.js | 37 +++ > > www/window/MaintenanceOptions.js | 17 +- > > 29 files changed, 1328 insertions(+), 80 deletions(-) > > create mode 100644 etc/removable-device-attach at .service > > create mode 100644 www/form/PartitionSelector.js > > > > -- > > 2.39.5 > > > > > > > > _______________________________________________ > > pbs-devel mailing list > > pbs-devel at lists.proxmox.com > > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > > > > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From g.goller at proxmox.com Thu Nov 21 18:04:03 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Thu, 21 Nov 2024 18:04:03 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] improve push sync job log messages In-Reply-To: References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: On 21.11.2024 17:26, Christian Ebner wrote: >On 11/21/24 17:06, Gabriel Goller wrote: >>I think I'd be nice if we use `.context()` instead of `format_err` and >>debug print ("{err:?}") instead of default ("{}") here. As these >>messages are all errors they shouldn't appear too often in the log and >>if an error happens, you get much more information. >> >>What do you think? > >Hmm, can check that out, but wouldn't that disrupt the single line >character of the log entries, prefixed by the time stamp? We could use "{err:#}", which will print everything in one line? Although on the other hand, I'd understand if you want to have strict control over what is displayed to the users, so no hard feelings on this one. From f.gruenbichler at proxmox.com Thu Nov 21 20:17:51 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Thu, 21 Nov 2024 20:17:51 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup v4 1/4] snapshot: add helper function to retrieve verify_state In-Reply-To: <20241121133509.289419-2-g.goller@proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> <20241121133509.289419-2-g.goller@proxmox.com> Message-ID: <2086615808.8058.1732216671890@webmail.proxmox.com> > Gabriel Goller hat am 21.11.2024 14:35 CET geschrieben: > > > Add helper functions to retrieve the verify_state from the manifest of a > snapshot. Replaced all the manual "verify_state" parsing with the helper > function. > > Suggested-by: Fabian Gr?nbichler > Signed-off-by: Gabriel Goller > --- > pbs-datastore/src/backup_info.rs | 15 +++++++++++++-- > pbs-datastore/src/manifest.rs | 14 +++++++++++++- > src/api2/admin/datastore.rs | 16 +++++++--------- > src/api2/backup/mod.rs | 13 ++++++------- > src/backup/verify.rs | 7 +++---- > 5 files changed, 42 insertions(+), 23 deletions(-) > > diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs > index 62d12b1183df..2d8e0a6d92da 100644 > --- a/pbs-datastore/src/backup_info.rs > +++ b/pbs-datastore/src/backup_info.rs > @@ -8,8 +8,8 @@ use anyhow::{bail, format_err, Error}; > use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; > > use pbs_api_types::{ > - Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, > - BACKUP_FILE_REGEX, > + Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState, > + BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, > }; > use pbs_config::{open_backup_lockfile, BackupLockGuard}; > > @@ -555,6 +555,17 @@ impl BackupDir { > > Ok(()) > } > + > + /// Load the verify state from the manifest. > + pub fn verify_state(&self) -> Result, anyhow::Error> { > + let manifest = self.load_manifest()?; > + Ok(manifest > + .0 > + .verify_state() > + .ok() > + .flatten() > + .map(|svs| svs.state)) this still looks slightly wrong to me - if verify_state() returns an error, it's mapped to None (by the call to `ok()`), which would hide an inner parse error for the verification state? I think the following should be correctly bubble up errors when loading the manifest or when parsing the contained verify state while returning Ok(None) if no state is contained in the manifest: Ok(self.load_manifest()?.0.verify_state()?.map(|svs| svs.state)) > + } > } > > impl AsRef for BackupDir { > diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs > index c3df014272a0..3013fab97221 100644 > --- a/pbs-datastore/src/manifest.rs > +++ b/pbs-datastore/src/manifest.rs > @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; > use serde::{Deserialize, Serialize}; > use serde_json::{json, Value}; > > -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; > +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState}; > use pbs_tools::crypt_config::CryptConfig; > > pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; > @@ -242,6 +242,18 @@ impl BackupManifest { > let manifest: BackupManifest = serde_json::from_value(json)?; > Ok(manifest) > } > + > + /// Get the verify state of the snapshot > + /// > + /// Note: New snapshots, which have not been verified yet, do not have a status and this > + /// function will return `Ok(None)`. > + pub fn verify_state(&self) -> Result, anyhow::Error> { > + let verify = self.unprotected["verify_state"].clone(); > + if verify.is_null() { > + return Ok(None); > + } > + Ok(Some(serde_json::from_value::(verify)?)) this looks good to me now! :) > + } > } > > impl TryFrom for BackupManifest { > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index 99b579f02c50..3624dba41199 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -537,15 +537,13 @@ unsafe fn list_snapshots_blocking( > } > }; > > - let verification = manifest.unprotected["verify_state"].clone(); > - let verification: Option = > - match serde_json::from_value(verification) { > - Ok(verify) => verify, > - Err(err) => { > - eprintln!("error parsing verification state : '{}'", err); > - None > - } > - }; > + let verification: Option = match manifest.verify_state() { > + Ok(verify) => verify, > + Err(err) => { > + eprintln!("error parsing verification state : '{}'", err); > + None > + } > + }; this as well! > > let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); > > diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs > index ea0d0292ec58..605c75e2dfa9 100644 > --- a/src/api2/backup/mod.rs > +++ b/src/api2/backup/mod.rs > @@ -19,9 +19,9 @@ use proxmox_sortable_macro::sortable; > use proxmox_sys::fs::lock_dir_noblock_shared; > > use pbs_api_types::{ > - Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, > - BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, > - BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, > + Authid, BackupNamespace, BackupType, Operation, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, > + BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, > + CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, > }; > use pbs_config::CachedUserInfo; > use pbs_datastore::index::IndexFile; > @@ -159,13 +159,12 @@ fn upgrade_to_backup_protocol( > let info = backup_group.last_backup(true).unwrap_or(None); > if let Some(info) = info { > let (manifest, _) = info.backup_dir.load_manifest()?; > - let verify = manifest.unprotected["verify_state"].clone(); > - match serde_json::from_value::(verify) { > - Ok(verify) => match verify.state { > + match manifest.verify_state() { > + Ok(Some(verify)) => match verify.state { > VerifyState::Ok => Some(info), > VerifyState::Failed => None, > }, > - Err(_) => { > + Ok(None) | Err(_) => { > // no verify state found, treat as valid this as well, although it might make sense to log this here as well (pre-existing) > Some(info) > } > diff --git a/src/backup/verify.rs b/src/backup/verify.rs > index 6ef7e8eb3ebb..20c605c4dde6 100644 > --- a/src/backup/verify.rs > +++ b/src/backup/verify.rs > @@ -553,10 +553,9 @@ pub fn verify_filter( > return true; > } > > - let raw_verify_state = manifest.unprotected["verify_state"].clone(); > - match serde_json::from_value::(raw_verify_state) { > - Err(_) => true, // no last verification, always include > - Ok(last_verify) => { > + match manifest.verify_state() { > + Ok(None) | Err(_) => true, // no last verification, always include same here! I think/hope the Err path for these should only trigger when somebody messes up manifests, but.. > + Ok(Some(last_verify)) => { > match outdated_after { > None => false, // never re-verify if ignored and no max age > Some(max_age) => { > -- > 2.39.5 From f.gruenbichler at proxmox.com Thu Nov 21 20:21:59 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Thu, 21 Nov 2024 20:21:59 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup v4 0/4] fix #3786: resync corrupt chunks in sync-job In-Reply-To: <20241121133509.289419-1-g.goller@proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> Message-ID: <1974064487.8066.1732216919425@webmail.proxmox.com> Consider patches 2-4, and 1 with the slight adaptation for the first helper Reviewed-by: Fabian Gr?nbichler > Gabriel Goller hat am 21.11.2024 14:35 CET geschrieben: > > > Add an option `resync-corrupt` that resyncs corrupt snapshots when running > sync-job. This option checks if the local snapshot failed the last > verification and if it did, overwrites the local snapshot with the > remote one. > > This is quite useful, as we currently don't have an option to "fix" > broken chunks/snapshots in any way, even if a healthy version is on > another (e.g. offsite) instance. > > Important things to note are also: this has a slight performance > penalty, as all the manifests have to be looked through, and a > verification job has to be run beforehand, otherwise we do not know > if the snapshot is healthy. > > Note: This series was originally written by Shannon! I just picked it > up, rebased, and fixed the obvious comments on the last series. > > Changelog v4 (thanks @Fabian): > - make verify_state bubble up errors > - call verify_state helper everywhere we need the verify_state > - resync broken manifests (so resync when load_manifest fails) > > Changelog v3 (thanks @Fabian): > - filter out snapshots earlier in the pull_group function > - move verify_state to BackupManifest and fixed invocations > - reverted verify_state Option -> Result state (It doesn't matter if we get an > error, we get that quite often f.e. in new backups) > - removed some unnecessary log lines > - removed some unnecessary imports and modifications > - rebase to current master > > Changelog v2 (thanks @Thomas): > - order git trailers > - adjusted schema description to include broken indexes > - change verify_state to return a Result<_,_> > - print error if verify_state is not able to read the state > - update docs on pull_snapshot function > - simplify logic by combining flags > - move log line out of loop to only print once that we resync the snapshot > > Changelog since RFC (Shannon's work): > - rename option from deep-sync to resync-corrupt > - rebase on latest master (and change implementation details, as a > lot has changed around sync-jobs) > > proxmox-backup: > > Gabriel Goller (4): > snapshot: add helper function to retrieve verify_state > fix #3786: api: add resync-corrupt option to sync jobs > fix #3786: ui/cli: add resync-corrupt option on sync-jobs > fix #3786: docs: add resync-corrupt option to sync-job > > docs/managing-remotes.rst | 6 +++ > pbs-api-types/src/jobs.rs | 10 +++++ > pbs-datastore/src/backup_info.rs | 15 ++++++- > pbs-datastore/src/manifest.rs | 14 +++++- > src/api2/admin/datastore.rs | 16 +++---- > src/api2/backup/mod.rs | 13 +++--- > src/api2/config/sync.rs | 4 ++ > src/api2/pull.rs | 9 +++- > src/backup/verify.rs | 7 ++- > src/bin/proxmox-backup-manager.rs | 16 ++++++- > src/server/pull.rs | 72 ++++++++++++++++++++++++------- > www/window/SyncJobEdit.js | 11 +++++ > 12 files changed, 151 insertions(+), 42 deletions(-) > > > Summary over all repositories: > 12 files changed, 151 insertions(+), 42 deletions(-) > > -- > Generated by git-murpp 0.7.1 > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From f.gruenbichler at proxmox.com Thu Nov 21 20:32:56 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Thu, 21 Nov 2024 20:32:56 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup 0/4] improve push sync job log messages In-Reply-To: References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: <1840736894.8111.1732217576163@webmail.proxmox.com> > Gabriel Goller hat am 21.11.2024 18:04 CET geschrieben: > > > On 21.11.2024 17:26, Christian Ebner wrote: > >On 11/21/24 17:06, Gabriel Goller wrote: > >>I think I'd be nice if we use `.context()` instead of `format_err` and > >>debug print ("{err:?}") instead of default ("{}") here. As these > >>messages are all errors they shouldn't appear too often in the log and > >>if an error happens, you get much more information. > >> > >>What do you think? > > > >Hmm, can check that out, but wouldn't that disrupt the single line > >character of the log entries, prefixed by the time stamp? > > We could use "{err:#}", which will print everything in one line? > > Although on the other hand, I'd understand if you want to have strict > control over what is displayed to the users, so no hard feelings on > this one. big tasks/job like the sync ones are a bit special w.r.t. error handling because they "catch" errors often at intermediate levels, log them, record that an error occur, but then proceed with a sensible next unit of work. so it's not possible to just add a lot of context to an error like with simple(r) API calls, where many/most errors can be treated as fatal, maybe requiring a bit of cleanup before bailing completely. I think a good compromise is to treat those units of work as a sort of error scope, and - add context where it is missing (e.g., low level file or network access, where just bubbling up errors might be meaningless) - and then when logging the error, think about formatting e.g., for syncs we have groups as lowest unit of work - if something fails within a group, we abort that group, but proceed with the next one. but if we just log "syncing group X failed - permission denied" that doesn't help whatsoever. it might be better to have three lines of warnings if something goes wrong, if those three lines contain appropriate information like - request X / file access Y /.. failed with error Z (root cause) - while processing snapshot A in group B (important context, because the request or path of the root cause might not tell us this) - syncing group B failed (result of the error chain) achieving that requires carefully analyzing all error sources/chains though. when in doubt, I'd rather have a bit too much context initially (that usually means too much is added at lower levels and we can optimize there) than too little, but within reason ;) we can't travel back in time and add needed information to debug issues to our or users' logs, a stray duplicate context can easily be filtered out though. of course that doesn't mean every error is allowed to grow 10 lines long by default, balance is still important even when doing a first pass/implementation of improved error messages! and of course, the general rule applies - if we use errors with proper context, it should be done fairly consistently, so that the next level up can actually use it. it might be easier to use format_err or explicit additional `warn!` invocations to improve the situation immediately, and postpone adding proper contexts to do it for all of sync without time pressure. From f.gruenbichler at proxmox.com Fri Nov 22 08:47:44 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 08:47:44 +0100 Subject: [pbs-devel] applied-series: [PATCH vma-to-pbs v6 0/4] add support for bulk import of a dump directory In-Reply-To: <20241113155802.190824-1-f.schauer@proxmox.com> References: <20241113155802.190824-1-f.schauer@proxmox.com> Message-ID: <173226166449.2118190.5349364419191043948@yuna.proxmox.com> thanks! Quoting Filip Schauer (2024-11-13 16:57:58) > When a path to a directory is provided in the vma_file argument, try to > upload all VMA backups in the directory. This also handles compressed > VMA files, notes and logs. If a vmid is specified with --vmid, only the > backups of that particular vmid are uploaded. > > Also improve the readability of the log messages to keep track of all > imported backups. > > Changed since v5: > * Extend the help text by seperate usages for single VMA import and bulk > import > * Move variables into format strings where possible > > Changed since v4: > * Switch grouped_vmas from Vec> to HashMap> > * Remove dependency on itertools > * bail when no backups were found > * Default to yes on the bulk import confirmation prompt > * bail on invalid input to the bulk import confirmation prompt > > Changed since v3: > * Mention in the description of the --vmid argument, that it is required > if a single VMA file is provided > * Construct grouped_vmas in place > * Add debug logs when gathering files for bulk import > * Log a summary of the files gathered for bulk import > * Remove the "confusing VMA file path" error message in the second > commit > * Switch chunk_stats from Arc> to > Arc<[AtomicU64; 256]> and use fetch_add to atomically increment and > fetch the chunk stat > * Ask for confirmation before bulk import > * Add --yes option to skip the confirmation prompt > > Changed since v2: > * Make skipping a VMID on error optional with the --skip-failed option > * Switch log output from stderr to stdout > * Bump itertools to 0.13 > > Changed since v1: > * Do not recurse through dump directory > * Compile regex once before iterating over the files in the dump > directory > * Use extract on regex capture groups > * Do not use deprecated method `chrono::NaiveDateTime::timestamp` > * Use proxmox_sys::fs::file_read_optional_string > * Group VMA files by VMID and continue with next VMID on error > * Move the BackupVmaToPbsArgs split into its own commit > * Remove hard coded occurences of 255 > * Use level-based logging instead of println > > Filip Schauer (4): > add support for bulk import of a dump directory > add option to skip vmids whose backups failed to upload > use level-based logging instead of println > log device upload progress as a percentage > > Cargo.toml | 4 + > src/main.rs | 198 +++++++++++++++++++++++++++++++++++++++++++++---- > src/vma2pbs.rs | 108 +++++++++++++++++++++------ > 3 files changed, 271 insertions(+), 39 deletions(-) > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Fri Nov 22 09:01:53 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 09:01:53 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup] chunk_store: fix problem with permission checking In-Reply-To: <20241113124047.97456-1-h.laimer@proxmox.com> References: <20241113124047.97456-1-h.laimer@proxmox.com> Message-ID: <173226251335.2118190.721601310758920434@yuna.proxmox.com> with Gabriel's trailers folded in. we can always make this more relaxed if issues pop up. Quoting Hannes Laimer (2024-11-13 13:40:47) > Permissions are stored in the lower 9 bits (rwxrwxrwx), > so we have to mask `st_mode` with 0o777. > The datastore root dir is created with 755, the `.chunks` dir and its > contents with 750 and the `.lock` file with 644, this changes the > expected permissions accordingly. > > Signed-off-by: Hannes Laimer > --- > pbs-datastore/src/chunk_store.rs | 10 +++++----- > 1 file changed, 5 insertions(+), 5 deletions(-) > > diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs > index 38a88584..29d5874a 100644 > --- a/pbs-datastore/src/chunk_store.rs > +++ b/pbs-datastore/src/chunk_store.rs > @@ -576,7 +576,7 @@ impl ChunkStore { > Ok(stat) => { > if stat.st_uid != u32::from(pbs_config::backup_user()?.uid) > || stat.st_gid != u32::from(pbs_config::backup_group()?.gid) > - || stat.st_mode != file_mode > + || stat.st_mode & 0o777 != file_mode > { > bail!( > "unable to open existing chunk store path {:?} - permissions or owner not correct", > @@ -598,22 +598,22 @@ impl ChunkStore { > /// subdirectories and the lock file. > pub fn verify_chunkstore>(path: T) -> Result<(), Error> { > // Check datastore root path perm/owner > - ChunkStore::check_permissions(path.as_ref(), 0o700)?; > + ChunkStore::check_permissions(path.as_ref(), 0o755)?; > > let chunk_dir = Self::chunk_dir(path.as_ref()); > // Check datastore .chunks path perm/owner > - ChunkStore::check_permissions(&chunk_dir, 0o700)?; > + ChunkStore::check_permissions(&chunk_dir, 0o750)?; > > // Check all .chunks subdirectories > for i in 0..64 * 1024 { > let mut l1path = chunk_dir.clone(); > l1path.push(format!("{:04x}", i)); > - ChunkStore::check_permissions(&l1path, 0o700)?; > + ChunkStore::check_permissions(&l1path, 0o750)?; > } > > // Check .lock file > let lockfile_path = Self::lockfile_path(path.as_ref()); > - ChunkStore::check_permissions(lockfile_path, 0o600)?; > + ChunkStore::check_permissions(lockfile_path, 0o644)?; > Ok(()) > } > } > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Fri Nov 22 09:08:01 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 09:08:01 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v2] fix: allow datastore creation in directory with lost+found directory In-Reply-To: <20241120105505.238476-1-g.goller@proxmox.com> References: <20241120105505.238476-1-g.goller@proxmox.com> Message-ID: <173226288122.2118190.4517531377570129879@yuna.proxmox.com> with a slight style change by pulling out the `to_str()` into its own line, to reduce the total number of lines ;) Quoting Gabriel Goller (2024-11-20 11:55:05) > When creating a datastore without the "reuse-datastore" option and the > datastore contains a `lost+found` directory (which is quite common), the > creation fails. Add `lost+found` to the ignore list. > > Reported here: https://forum.proxmox.com/threads/bug-when-adding-new-storage-task-error-datastore-path-is-not-empty.157629/#post-721733 > > Fixes: 6e101ff75777 ("fix #5439: allow to reuse existing datastore") > Signed-off-by: Gabriel Goller > --- > > v2, thanks @Fiona: > - exact match with == instead of `.starts_with` > > src/api2/config/datastore.rs | 5 ++++- > 1 file changed, 4 insertions(+), 1 deletion(-) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 374c302fcf28..9c36cb312a99 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -89,7 +89,10 @@ pub(crate) fn do_create_datastore( > if let Ok(dir) = std::fs::read_dir(&path) { > for file in dir { > let name = file?.file_name(); > - if !name.to_str().map_or(false, |name| name.starts_with('.')) { > + if !name > + .to_str() > + .map_or(false, |name| name.starts_with('.') || name == "lost+found") > + { > bail!("datastore path is not empty"); > } > } > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Fri Nov 22 09:17:14 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 09:17:14 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup] fix #5801: backup_manager: make api call on datastore update In-Reply-To: <20241018100122.206452-1-g.goller@proxmox.com> References: <20241018100122.206452-1-g.goller@proxmox.com> Message-ID: <173226343400.2118190.5167435132897081016@yuna.proxmox.com> thanks! Quoting Gabriel Goller (2024-10-18 12:01:22) > When updating the datastore config using `proxmox-backup-manager` we > need to make an api-call, because the api-route starts a tokio task to > update the proxy-cache and the client will kill the task if we don't > wait. With an api-call the tokio task will be executed on the api > process and runs in the background while the endpoint handler has > already returned. > > Signed-off-by: Gabriel Goller > --- > > Note: this is not so nice, ideally we would like to call every endpoint > handler directly, but I'm afraid we don't have another choice here. > > src/bin/proxmox_backup_manager/datastore.rs | 63 ++++++++++++++++++--- > 1 file changed, 56 insertions(+), 7 deletions(-) > > diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs > index 383bcd242955..3a349451f62a 100644 > --- a/src/bin/proxmox_backup_manager/datastore.rs > +++ b/src/bin/proxmox_backup_manager/datastore.rs > @@ -1,15 +1,17 @@ > -use anyhow::Error; > -use serde_json::Value; > - > +use pbs_api_types::{ > + DataStoreConfig, DataStoreConfigUpdater, DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA, > +}; > +use pbs_client::view_task_result; > use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; > use proxmox_schema::api; > > -use pbs_api_types::{DataStoreConfig, DATASTORE_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA}; > -use pbs_client::view_task_result; > - > use proxmox_backup::api2; > +use proxmox_backup::api2::config::datastore::DeletableProperty; > use proxmox_backup::client_helpers::connect_to_localhost; > > +use anyhow::Error; > +use serde_json::Value; > + > #[api( > input: { > properties: { > @@ -139,6 +141,53 @@ async fn delete_datastore(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> > Ok(()) > } > > +#[api( > + protected: true, > + input: { > + properties: { > + name: { > + schema: DATASTORE_SCHEMA, > + }, > + update: { > + type: DataStoreConfigUpdater, > + flatten: true, > + }, > + delete: { > + description: "List of properties to delete.", > + type: Array, > + optional: true, > + items: { > + type: DeletableProperty, > + } > + }, > + digest: { > + optional: true, > + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, > + }, > + "output-format": { > + schema: OUTPUT_FORMAT, > + optional: true, > + }, > + }, > + }, > +)] > +/// Update datastore configuration. > +async fn update_datastore(name: String, mut param: Value) -> Result<(), Error> { > + let output_format = extract_output_format(&mut param); > + let client = connect_to_localhost()?; > + > + let result = client > + .put( > + format!("api2/json/config/datastore/{name}").as_str(), > + Some(param), > + ) > + .await?; > + > + view_task_result(&client, result, &output_format).await?; > + > + Ok(()) > +} > + > pub fn datastore_commands() -> CommandLineInterface { > let cmd_def = CliCommandMap::new() > .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) > @@ -154,7 +203,7 @@ pub fn datastore_commands() -> CommandLineInterface { > ) > .insert( > "update", > - CliCommand::new(&api2::config::datastore::API_METHOD_UPDATE_DATASTORE) > + CliCommand::new(&API_METHOD_UPDATE_DATASTORE) > .arg_param(&["name"]) > .completion_cb("name", pbs_config::datastore::complete_datastore_name) > .completion_cb( > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From c.ebner at proxmox.com Fri Nov 22 09:41:23 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 09:41:23 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] improve push sync job log messages In-Reply-To: <1840736894.8111.1732217576163@webmail.proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> <1840736894.8111.1732217576163@webmail.proxmox.com> Message-ID: On 11/21/24 20:32, Fabian Gr?nbichler wrote: > >> Gabriel Goller hat am 21.11.2024 18:04 CET geschrieben: >> >> >> On 21.11.2024 17:26, Christian Ebner wrote: >>> On 11/21/24 17:06, Gabriel Goller wrote: >>>> I think I'd be nice if we use `.context()` instead of `format_err` and >>>> debug print ("{err:?}") instead of default ("{}") here. As these >>>> messages are all errors they shouldn't appear too often in the log and >>>> if an error happens, you get much more information. >>>> >>>> What do you think? >>> >>> Hmm, can check that out, but wouldn't that disrupt the single line >>> character of the log entries, prefixed by the time stamp? >> >> We could use "{err:#}", which will print everything in one line? >> >> Although on the other hand, I'd understand if you want to have strict >> control over what is displayed to the users, so no hard feelings on >> this one. > > big tasks/job like the sync ones are a bit special w.r.t. error handling because they "catch" errors often at intermediate levels, log them, record that an error occur, but then proceed with a sensible next unit of work. so it's not possible to just add a lot of context to an error like with simple(r) API calls, where many/most errors can be treated as fatal, maybe requiring a bit of cleanup before bailing completely. > > I think a good compromise is to treat those units of work as a sort of error scope, and > - add context where it is missing (e.g., low level file or network access, where just bubbling up errors might be meaningless) > - and then when logging the error, think about formatting The current series already tries to follow your lead here, following along the lines of you followup patches to the original series. I will however double check where the suggested adding of an anyhow Error context makes sense and how to best log soft errors without disrupting the logs to much, increasing the verbosity for now as you suggested below. > > e.g., for syncs we have groups as lowest unit of work - if something fails within a group, we abort that group, but proceed with the next one. but if we just log "syncing group X failed - permission denied" that doesn't help whatsoever. it might be better to have three lines of warnings if something goes wrong, if those three lines contain appropriate information like > - request X / file access Y /.. failed with error Z (root cause) > - while processing snapshot A in group B (important context, because the request or path of the root cause might not tell us this) > - syncing group B failed (result of the error chain) > > achieving that requires carefully analyzing all error sources/chains though. when in doubt, I'd rather have a bit too much context initially (that usually means too much is added at lower levels and we can optimize there) than too little, but within reason ;) we can't travel back in time and add needed information to debug issues to our or users' logs, a stray duplicate context can easily be filtered out though. of course that doesn't mean every error is allowed to grow 10 lines long by default, balance is still important even when doing a first pass/implementation of improved error messages! > > and of course, the general rule applies - if we use errors with proper context, it should be done fairly consistently, so that the next level up can actually use it. it might be easier to use format_err or explicit additional `warn!` invocations to improve the situation immediately, and postpone adding proper contexts to do it for all of sync without time pressure. Yes, aligning the sync job logs for push and pull would of course also be of interest here, but given that the pull job also logs information not controlled by the job directly, but also indirectly by e.g. logs of operations on the datastore, that will require some more refactoring and reorganization of the logic to get it right, without breaking other logging. From f.gruenbichler at proxmox.com Fri Nov 22 10:01:28 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 10:01:28 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/4] server: push: add error context to all target api calls In-Reply-To: <20241121154337.471425-4-c.ebner@proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> <20241121154337.471425-4-c.ebner@proxmox.com> Message-ID: <173226608868.2118190.15465009393024345476@yuna.proxmox.com> Quoting Christian Ebner (2024-11-21 16:43:36) > Make it clear from the context that these error messages stem from > the response of an api call rather than a local error. > > Signed-off-by: Christian Ebner > --- > src/server/push.rs | 24 +++++++++++++++++------- > 1 file changed, 17 insertions(+), 7 deletions(-) > > diff --git a/src/server/push.rs b/src/server/push.rs > index 86cef5520..fe2e11220 100644 > --- a/src/server/push.rs > +++ b/src/server/push.rs > @@ -219,7 +219,9 @@ async fn remove_target_namespace( > if params.target.supports_prune_delete_stats { > let data = result["data"].take(); > serde_json::from_value(data).map_err(|err| { > - format_err!("removing target namespace {target_namespace} failed - {err}") > + format_err!( > + "Failed to remove remote namespace {target_namespace}, remote returned: {err}" > + ) this is attached to the wrong error - it should be attached to the client.delete call right above.. this here should instead add the context that we failed to parse the returned value (which should never happen, that means we missed some API breakage..) > }) > } else { > Ok(BackupGroupDeleteStats::default()) > @@ -236,7 +238,8 @@ async fn fetch_target_groups( > let args = Some(serde_json::json!({ "ns": target_namespace.name() })); > > let mut result = params.target.client.get(&api_path, args).await?; > - let groups: Vec = serde_json::from_value(result["data"].take())?; > + let groups: Vec = serde_json::from_value(result["data"].take()) > + .map_err(|err| format_err!("Failed to fetch remote groups, remote returned: {err}"))?; same here, just with get instead of delete ;) > > let (mut owned, not_owned) = groups.into_iter().fold( > (Vec::new(), HashSet::new()), > @@ -277,8 +280,9 @@ async fn remove_target_group( > > if params.target.supports_prune_delete_stats { > let data = result["data"].take(); > - serde_json::from_value(data) > - .map_err(|err| format_err!("removing target group {backup_group} failed - {err}")) > + serde_json::from_value(data).map_err(|err| { > + format_err!("Failed to remove remote group {backup_group}, remote returned: {err}") > + }) here as well > } else { > Ok(BackupGroupDeleteStats::default()) > } > @@ -313,7 +317,7 @@ async fn check_or_create_target_namespace( > match params.target.client.post(&api_path, Some(args)).await { > Ok(_) => info!("Successfully created new namespace {current} on remote"), > Err(err) => { > - bail!("Remote creation of namespace {current} failed, remote returned: {err}") > + bail!("Creation of remote namespace {current} failed, remote returned: {err}") > } > } > existing_target_namespaces.push(current.clone()); > @@ -585,7 +589,8 @@ async fn fetch_target_snapshots( > args["ns"] = serde_json::to_value(target_namespace)?; > } > let mut result = params.target.client.get(&api_path, Some(args)).await?; > - let snapshots: Vec = serde_json::from_value(result["data"].take())?; > + let snapshots: Vec = serde_json::from_value(result["data"].take()) > + .map_err(|err| format_err!("Failed to fetch remote snapshots, remote returned: {err}"))?; here as well > > Ok(snapshots) > } > @@ -603,7 +608,12 @@ async fn forget_target_snapshot( > if !target_namespace.is_root() { > args["ns"] = serde_json::to_value(target_namespace)?; > } > - params.target.client.delete(&api_path, Some(args)).await?; > + params > + .target > + .client > + .delete(&api_path, Some(args)) > + .await > + .map_err(|err| format_err!("Failed to remove remote snapshot, remote returned: {err}"))?; this should probably be just "Request to remote returned {err}", since the call site already logs the snapshot name and the fact that this is removal failing ;) > > Ok(()) > } > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From g.goller at proxmox.com Fri Nov 22 10:02:44 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 10:02:44 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v4 1/4] snapshot: add helper function to retrieve verify_state In-Reply-To: <2086615808.8058.1732216671890@webmail.proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> <20241121133509.289419-2-g.goller@proxmox.com> <2086615808.8058.1732216671890@webmail.proxmox.com> Message-ID: <2pz4wpna4u4fkvtdpblj4xj62o4n746zbnsfqepw76jyphmp6d@djir34ghshw5> On 21.11.2024 20:17, Fabian Gr?nbichler wrote: >> Gabriel Goller hat am 21.11.2024 14:35 CET geschrieben: >> + >> + /// Load the verify state from the manifest. >> + pub fn verify_state(&self) -> Result, anyhow::Error> { >> + let manifest = self.load_manifest()?; >> + Ok(manifest >> + .0 >> + .verify_state() >> + .ok() >> + .flatten() >> + .map(|svs| svs.state)) > >this still looks slightly wrong to me - if verify_state() returns an error, it's mapped to None (by the call to `ok()`), which would hide an inner parse error for the verification state? > >I think the following should be correctly bubble up errors when loading the manifest or when parsing the contained verify state while returning Ok(None) if no state is contained in the manifest: > >Ok(self.load_manifest()?.0.verify_state()?.map(|svs| svs.state)) I agree. I was somehow fixed on the load_manifest error always being the outer one, but tbh it doesn't matter. > >> + } >> } >> >> @@ -159,13 +159,12 @@ fn upgrade_to_backup_protocol( >> let info = backup_group.last_backup(true).unwrap_or(None); >> if let Some(info) = info { >> let (manifest, _) = info.backup_dir.load_manifest()?; >> - let verify = manifest.unprotected["verify_state"].clone(); >> - match serde_json::from_value::(verify) { >> - Ok(verify) => match verify.state { >> + match manifest.verify_state() { >> + Ok(Some(verify)) => match verify.state { >> VerifyState::Ok => Some(info), >> VerifyState::Failed => None, >> }, >> - Err(_) => { >> + Ok(None) | Err(_) => { >> // no verify state found, treat as valid > >this as well, although it might make sense to log this here as well (pre-existing) You mean separating the Ok(None) and Err(_) arm and `warn()` on the Err(_) one? >> Some(info) >> } >> diff --git a/src/backup/verify.rs b/src/backup/verify.rs >> index 6ef7e8eb3ebb..20c605c4dde6 100644 >> --- a/src/backup/verify.rs >> +++ b/src/backup/verify.rs >> @@ -553,10 +553,9 @@ pub fn verify_filter( >> return true; >> } >> >> - let raw_verify_state = manifest.unprotected["verify_state"].clone(); >> - match serde_json::from_value::(raw_verify_state) { >> - Err(_) => true, // no last verification, always include >> - Ok(last_verify) => { >> + match manifest.verify_state() { >> + Ok(None) | Err(_) => true, // no last verification, always include > >same here! I think/hope the Err path for these should only trigger when somebody messes up manifests, but.. Yep. From f.gruenbichler at proxmox.com Fri Nov 22 10:04:55 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 10:04:55 +0100 Subject: [pbs-devel] partially-applied: [PATCH proxmox-backup 0/4] improve push sync job log messages In-Reply-To: <20241121154337.471425-1-c.ebner@proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: <173226629516.2118190.12004996331483257411@yuna.proxmox.com> applied patchs 1, 2 and 4 with some small follow-ups: - moved the remove snapshot/group success logging into the corresponding code path, so that failures don't log twice/wrong information - replaced the abort for non-owned groups in the last patch with a skip, we haven't started uploading yet at that point, so we can't abort anything. comments for patch 3 in a separate mail. Quoting Christian Ebner (2024-11-21 16:43:33) > This is a small series of patches with the intend to improve the log > messages for the sync job in push direction, mainly adding context to > error messages from the remote when the error stems from an api call, > limiting line length and improving usage of consistent terms for better > readablility and easier understanding. > > Christian Ebner (4): > server: push: fix needless borrow clippy warning > server: push: consistently use remote over target for error messages > server: push: add error context to all target api calls > server: push: various smaller improvements to error messages > > src/server/push.rs | 79 ++++++++++++++++++++++++++++------------------ > 1 file changed, 49 insertions(+), 30 deletions(-) > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Fri Nov 22 10:08:21 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 10:08:21 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup v4 1/4] snapshot: add helper function to retrieve verify_state In-Reply-To: <2pz4wpna4u4fkvtdpblj4xj62o4n746zbnsfqepw76jyphmp6d@djir34ghshw5> References: <20241121133509.289419-1-g.goller@proxmox.com> <20241121133509.289419-2-g.goller@proxmox.com> <2086615808.8058.1732216671890@webmail.proxmox.com> <2pz4wpna4u4fkvtdpblj4xj62o4n746zbnsfqepw76jyphmp6d@djir34ghshw5> Message-ID: <1650154714.8259.1732266501681@webmail.proxmox.com> > Gabriel Goller hat am 22.11.2024 10:02 CET geschrieben: > On 21.11.2024 20:17, Fabian Gr?nbichler wrote: > >> Gabriel Goller hat am 21.11.2024 14:35 CET geschrieben: > >> @@ -159,13 +159,12 @@ fn upgrade_to_backup_protocol( > >> let info = backup_group.last_backup(true).unwrap_or(None); > >> if let Some(info) = info { > >> let (manifest, _) = info.backup_dir.load_manifest()?; > >> - let verify = manifest.unprotected["verify_state"].clone(); > >> - match serde_json::from_value::(verify) { > >> - Ok(verify) => match verify.state { > >> + match manifest.verify_state() { > >> + Ok(Some(verify)) => match verify.state { > >> VerifyState::Ok => Some(info), > >> VerifyState::Failed => None, > >> }, > >> - Err(_) => { > >> + Ok(None) | Err(_) => { > >> // no verify state found, treat as valid > > > >this as well, although it might make sense to log this here as well (pre-existing) > > You mean separating the Ok(None) and Err(_) arm and `warn()` on the > Err(_) one? yes, exactly.. this should hopefully never trigger, but if it does, not logging it just causes confusion - with log output, there's a chance that additional information gives the deciding clue what's going on :) > >> Some(info) > >> } > >> diff --git a/src/backup/verify.rs b/src/backup/verify.rs > >> index 6ef7e8eb3ebb..20c605c4dde6 100644 > >> --- a/src/backup/verify.rs > >> +++ b/src/backup/verify.rs > >> @@ -553,10 +553,9 @@ pub fn verify_filter( > >> return true; > >> } > >> > >> - let raw_verify_state = manifest.unprotected["verify_state"].clone(); > >> - match serde_json::from_value::(raw_verify_state) { > >> - Err(_) => true, // no last verification, always include > >> - Ok(last_verify) => { > >> + match manifest.verify_state() { > >> + Ok(None) | Err(_) => true, // no last verification, always include > > > >same here! I think/hope the Err path for these should only trigger when somebody messes up manifests, but.. > > Yep. From t.lamprecht at proxmox.com Fri Nov 22 10:26:40 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Fri, 22 Nov 2024 10:26:40 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/2] pxar: add file name to path_info when applying metadata In-Reply-To: <20241008083355.181031-1-f.gruenbichler@proxmox.com> References: <20241008083355.181031-1-f.gruenbichler@proxmox.com> Message-ID: <7a685c6c-4b2d-45c3-980e-6b1800b68d1e@proxmox.com> Am 08.10.24 um 10:33 schrieb Fabian Gr?nbichler: > else, error messages using this path_info refer to the parent directory instead > of the actual file entry causing the problem. since this is just for > informational purposes, lossy conversion is acceptable. > > Signed-off-by: Fabian Gr?nbichler > --- > > Notes: > noticed while preparing patch #2, can/should maybe be applied even if that one isn't ;) > For both patches: Acked-by: Thomas Lamprecht From f.gruenbichler at proxmox.com Fri Nov 22 10:36:19 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 10:36:19 +0100 Subject: [pbs-devel] applied: [PATCH v4 proxmox-backup] fix #5710: api: backup: stat known chunks on backup finish In-Reply-To: <20241008094617.96273-1-c.ebner@proxmox.com> References: <20241008094617.96273-1-c.ebner@proxmox.com> Message-ID: <173226817965.2118190.14001940515863834210@yuna.proxmox.com> Quoting Christian Ebner (2024-10-08 11:46:17) > Known chunks are expected to be present on the datastore a-priori, > allowing clients to only re-index these chunks without uploading the > raw chunk data. The list of reusable known chunks is send to the > client by the server, deduced from the indexed chunks of the previous > backup snapshot of the group. > > If however such a known chunk disappeared (the previous backup > snapshot having been verified before that or not verified just yet), > the backup will finish just fine, leading to a seemingly successful > backup. Only a subsequent verification job will detect the backup > snapshot as being corrupt. > > In order to reduce the impact, stat the list of previously known > chunks when finishing the backup. If a missing chunk is detected, the > backup run itself will fail and the previous backup snapshots verify > state is set to failed. > This prevents the same snapshot from being reused by another, > subsequent backup job. > > Note: > The current backup run might have been just fine, if the now missing > known chunk is not indexed. But since there is no straight forward > way to detect which known chunks have not been reused in the fast > incremental mode for fixed index backups, the backup run is > considered failed. > > link to issue in bugtracker: > https://bugzilla.proxmox.com/show_bug.cgi?id=5710 > > Signed-off-by: Christian Ebner > Tested-by: Gabriel Goller > Reviewed-by: Gabriel Goller > --- > Changes since version 3, thanks to Gabriel for additional comments: > - Use anyhow error context also for manifest update error > - Use `with_context` over mapping the error, which is more concise > > Changes since version 2, thanks to Gabriel for testing and review: > - Use and display anyhow error context > - s/backp/backup/ > > Changes since version 1, thanks to Dietmar and Gabriel for feedback: > - Only stat on backup finish > - Distinguish newly uploaded from previously known chunks, to be able > to only stat the latter. > > New test on my side show a performance degradation of ~2% for the VM > backup and about ~10% for the LXC backup as compared to an unpatched > server. > In contrast to version 1 of the patches the PBS datastore this time > was located on an NFS share backed by an NVME SSD. > > I did perform vzdump backups of a VM with a 32G disk attached and a > LXC container with a Debian install and rootfs of ca. 400M (both off, > no changes in data in-between backup runs). > Again performed 5 runs each after an initial run to assure full chunk > presence on server and valid previous snapshot. > > Here the updated figures: > > ----------------------------------------------------------- > patched | unpatched > ----------------------------------------------------------- > VM | LXC | VM | LXC > ----------------------------------------------------------- > 14.0s ? 0.8s | 2.2s ? 0.1s | 13.7s ? 0.5s | 2.0s ? 0.03s > ----------------------------------------------------------- > > src/api2/backup/environment.rs | 54 +++++++++++++++++++++++++++++----- > src/api2/backup/mod.rs | 22 +++++++++++++- > 2 files changed, 68 insertions(+), 8 deletions(-) > > diff --git a/src/api2/backup/environment.rs b/src/api2/backup/environment.rs > index 99d885e2e..19624fae3 100644 > --- a/src/api2/backup/environment.rs > +++ b/src/api2/backup/environment.rs > @@ -1,4 +1,4 @@ > -use anyhow::{bail, format_err, Error}; > +use anyhow::{bail, format_err, Context, Error}; > use nix::dir::Dir; > use std::collections::HashMap; > use std::sync::{Arc, Mutex}; > @@ -72,8 +72,14 @@ struct FixedWriterState { > incremental: bool, > } > > -// key=digest, value=length > -type KnownChunksMap = HashMap<[u8; 32], u32>; > +#[derive(Copy, Clone)] > +struct KnownChunkInfo { > + uploaded: bool, > + length: u32, > +} > + > +// key=digest, value=KnownChunkInfo > +type KnownChunksMap = HashMap<[u8; 32], KnownChunkInfo>; > > struct SharedBackupState { > finished: bool, > @@ -159,7 +165,13 @@ impl BackupEnvironment { > > state.ensure_unfinished()?; > > - state.known_chunks.insert(digest, length); > + state.known_chunks.insert( > + digest, > + KnownChunkInfo { > + uploaded: false, > + length, > + }, > + ); > > Ok(()) > } > @@ -213,7 +225,13 @@ impl BackupEnvironment { > } > > // register chunk > - state.known_chunks.insert(digest, size); > + state.known_chunks.insert( > + digest, > + KnownChunkInfo { > + uploaded: true, > + length: size, > + }, > + ); > > Ok(()) > } > @@ -248,7 +266,13 @@ impl BackupEnvironment { > } > > // register chunk > - state.known_chunks.insert(digest, size); > + state.known_chunks.insert( > + digest, > + KnownChunkInfo { > + uploaded: true, > + length: size, > + }, > + ); > > Ok(()) > } > @@ -256,7 +280,23 @@ impl BackupEnvironment { > pub fn lookup_chunk(&self, digest: &[u8; 32]) -> Option { > let state = self.state.lock().unwrap(); > > - state.known_chunks.get(digest).copied() > + state > + .known_chunks > + .get(digest) > + .map(|known_chunk_info| known_chunk_info.length) > + } > + > + /// stat known chunks from previous backup, so excluding newly uploaded ones > + pub fn stat_prev_known_chunks(&self) -> Result<(), Error> { > + let state = self.state.lock().unwrap(); > + for (digest, known_chunk_info) in &state.known_chunks { > + if !known_chunk_info.uploaded { > + self.datastore > + .stat_chunk(digest) > + .with_context(|| format!("stat failed on {}", hex::encode(digest)))?; > + } > + } > + Ok(()) > } > > /// Store the writer with an unique ID > diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs > index ea0d0292e..63c49f653 100644 > --- a/src/api2/backup/mod.rs > +++ b/src/api2/backup/mod.rs > @@ -1,6 +1,6 @@ > //! Backup protocol (HTTP2 upgrade) > > -use anyhow::{bail, format_err, Error}; > +use anyhow::{bail, format_err, Context, Error}; > use futures::*; > use hex::FromHex; > use hyper::header::{HeaderValue, CONNECTION, UPGRADE}; > @@ -785,6 +785,26 @@ fn finish_backup( > ) -> Result { > let env: &BackupEnvironment = rpcenv.as_ref(); > > + if let Err(err) = env.stat_prev_known_chunks() { > + env.debug(format!("stat registered chunks failed - {err:?}")); > + > + if let Some(last) = env.last_backup.as_ref() { > + // No need to acquire snapshot lock, already locked when starting the backup > + let verify_state = SnapshotVerifyState { > + state: VerifyState::Failed, > + upid: env.worker.upid().clone(), // backup writer UPID > + }; > + let verify_state = serde_json::to_value(verify_state)?; > + last.backup_dir > + .update_manifest(|manifest| { > + manifest.unprotected["verify_state"] = verify_state; > + }) > + .with_context(|| "manifest update failed")?; > + } > + > + bail!("stat known chunks failed - {err:?}"); > + } > + > env.finish_backup()?; > env.log("successfully finished backup"); > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From g.goller at proxmox.com Fri Nov 22 10:39:11 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 10:39:11 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v4 0/4] fix #3786: resync corrupt chunks in sync-job In-Reply-To: <1974064487.8066.1732216919425@webmail.proxmox.com> References: <20241121133509.289419-1-g.goller@proxmox.com> <1974064487.8066.1732216919425@webmail.proxmox.com> Message-ID: On 21.11.2024 20:21, Fabian Gr?nbichler wrote: >Consider patches 2-4, and 1 with the slight adaptation for the first helper > >Reviewed-by: Fabian Gr?nbichler Thanks for the review! Sent a v5! From g.goller at proxmox.com Fri Nov 22 10:39:16 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 10:39:16 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v5 1/4] snapshot: add helper function to retrieve verify_state In-Reply-To: <20241122093919.59777-1-g.goller@proxmox.com> References: <20241122093919.59777-1-g.goller@proxmox.com> Message-ID: <20241122093919.59777-2-g.goller@proxmox.com> Add helper functions to retrieve the verify_state from the manifest of a snapshot. Replaced all the manual "verify_state" parsing with the helper function. Suggested-by: Fabian Gr?nbichler Signed-off-by: Gabriel Goller --- pbs-datastore/src/backup_info.rs | 9 +++++++-- pbs-datastore/src/manifest.rs | 14 +++++++++++++- src/api2/admin/datastore.rs | 16 +++++++--------- src/api2/backup/mod.rs | 18 +++++++++++------- src/backup/verify.rs | 13 ++++++++----- 5 files changed, 46 insertions(+), 24 deletions(-) diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 62d12b1183df..a581d75757b4 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -8,8 +8,8 @@ use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ - Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, - BACKUP_FILE_REGEX, + Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState, + BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; @@ -555,6 +555,11 @@ impl BackupDir { Ok(()) } + + /// Load the verify state from the manifest. + pub fn verify_state(&self) -> Result, anyhow::Error> { + Ok(self.load_manifest()?.0.verify_state()?.map(|svs| svs.state)) + } } impl AsRef for BackupDir { diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index c3df014272a0..3013fab97221 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState}; use pbs_tools::crypt_config::CryptConfig; pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; @@ -242,6 +242,18 @@ impl BackupManifest { let manifest: BackupManifest = serde_json::from_value(json)?; Ok(manifest) } + + /// Get the verify state of the snapshot + /// + /// Note: New snapshots, which have not been verified yet, do not have a status and this + /// function will return `Ok(None)`. + pub fn verify_state(&self) -> Result, anyhow::Error> { + let verify = self.unprotected["verify_state"].clone(); + if verify.is_null() { + return Ok(None); + } + Ok(Some(serde_json::from_value::(verify)?)) + } } impl TryFrom for BackupManifest { diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 99b579f02c50..3624dba41199 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -537,15 +537,13 @@ unsafe fn list_snapshots_blocking( } }; - let verification = manifest.unprotected["verify_state"].clone(); - let verification: Option = - match serde_json::from_value(verification) { - Ok(verify) => verify, - Err(err) => { - eprintln!("error parsing verification state : '{}'", err); - None - } - }; + let verification: Option = match manifest.verify_state() { + Ok(verify) => verify, + Err(err) => { + eprintln!("error parsing verification state : '{}'", err); + None + } + }; let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index ea0d0292ec58..a735768b0f83 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -8,6 +8,7 @@ use hyper::http::request::Parts; use hyper::{Body, Request, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; +use tracing::warn; use proxmox_rest_server::{H2Service, WorkerTask}; use proxmox_router::{http_err, list_subdirs_api_method}; @@ -19,9 +20,9 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, - BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, + Authid, BackupNamespace, BackupType, Operation, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, + BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, + CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; @@ -159,15 +160,18 @@ fn upgrade_to_backup_protocol( let info = backup_group.last_backup(true).unwrap_or(None); if let Some(info) = info { let (manifest, _) = info.backup_dir.load_manifest()?; - let verify = manifest.unprotected["verify_state"].clone(); - match serde_json::from_value::(verify) { - Ok(verify) => match verify.state { + match manifest.verify_state() { + Ok(Some(verify)) => match verify.state { VerifyState::Ok => Some(info), VerifyState::Failed => None, }, - Err(_) => { + Ok(None) => { // no verify state found, treat as valid Some(info) + }, + Err(err) => { + warn!("error parsing the snapshot manifest: {err:#}"); + Some(info) } } } else { diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6ef7e8eb3ebb..c1abe69a4fde 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -5,7 +5,7 @@ use std::time::Instant; use anyhow::{bail, format_err, Error}; use nix::dir::Dir; -use tracing::{error, info}; +use tracing::{error, info, warn}; use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_worker_task::WorkerTaskContext; @@ -553,10 +553,13 @@ pub fn verify_filter( return true; } - let raw_verify_state = manifest.unprotected["verify_state"].clone(); - match serde_json::from_value::(raw_verify_state) { - Err(_) => true, // no last verification, always include - Ok(last_verify) => { + match manifest.verify_state() { + Err(err) => { + warn!("error reading manifest: {err:#}"); + true + } + Ok(None) => true, // no last verification, always include + Ok(Some(last_verify)) => { match outdated_after { None => false, // never re-verify if ignored and no max age Some(max_age) => { -- 2.39.5 From g.goller at proxmox.com Fri Nov 22 10:39:17 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 10:39:17 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v5 2/4] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <20241122093919.59777-1-g.goller@proxmox.com> References: <20241122093919.59777-1-g.goller@proxmox.com> Message-ID: <20241122093919.59777-3-g.goller@proxmox.com> This option allows us to "fix" corrupt snapshots (and/or their chunks) by pulling them from another remote. When traversing the remote snapshots, we check if it exists locally, and if it is, we check if the last verification of it failed. If the local snapshot is broken and the `resync-corrupt` option is turned on, we pull in the remote snapshot, overwriting the local one. This is very useful and has been requested a lot, as there is currently no way to "fix" corrupt chunks/snapshots even if the user has a healthy version of it on their offsite instance. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller Reviewed-by: Fabian Gr?nbichler --- pbs-api-types/src/jobs.rs | 10 ++++++ src/api2/config/sync.rs | 4 +++ src/api2/pull.rs | 9 ++++- src/server/pull.rs | 72 ++++++++++++++++++++++++++++++--------- 4 files changed, 78 insertions(+), 17 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e8056beb00cb..52520811b560 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -536,6 +536,10 @@ impl SyncDirection { } } +pub const RESYNC_CORRUPT_SCHEMA: Schema = + BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") + .schema(); + #[api( properties: { id: { @@ -590,6 +594,10 @@ impl SyncDirection { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + } } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -623,6 +631,8 @@ pub struct SyncJobConfig { pub limit: RateLimitConfig, #[serde(skip_serializing_if = "Option::is_none")] pub transfer_last: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub resync_corrupt: Option, } impl SyncJobConfig { diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 78eb7320566b..7ff6cae029d1 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -471,6 +471,9 @@ pub fn update_sync_job( if let Some(transfer_last) = update.transfer_last { data.transfer_last = Some(transfer_last); } + if let Some(resync_corrupt) = update.resync_corrupt { + data.resync_corrupt = Some(resync_corrupt); + } if update.limit.rate_in.is_some() { data.limit.rate_in = update.limit.rate_in; @@ -629,6 +632,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator ns: None, owner: Some(write_auth_id.clone()), comment: None, + resync_corrupt: None, remove_vanished: None, max_depth: None, group_filter: None, diff --git a/src/api2/pull.rs b/src/api2/pull.rs index d039dab59c65..d8ed1a7347b5 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -10,7 +10,7 @@ use pbs_api_types::{ Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, - TRANSFER_LAST_SCHEMA, + RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, }; use pbs_config::CachedUserInfo; use proxmox_rest_server::WorkerTask; @@ -87,6 +87,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { sync_job.group_filter.clone(), sync_job.limit.clone(), sync_job.transfer_last, + sync_job.resync_corrupt, ) } } @@ -132,6 +133,10 @@ impl TryFrom<&SyncJobConfig> for PullParameters { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, }, }, access: { @@ -156,6 +161,7 @@ async fn pull( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -193,6 +199,7 @@ async fn pull( group_filter, limit, transfer_last, + resync_corrupt, )?; // fixme: set to_stdout to false? diff --git a/src/server/pull.rs b/src/server/pull.rs index 08b55956ce52..40d872d2487c 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -12,7 +12,8 @@ use tracing::info; use pbs_api_types::{ print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + RateLimitConfig, Remote, VerifyState, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, + PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -55,6 +56,8 @@ pub(crate) struct PullParameters { group_filter: Vec, /// How many snapshots should be transferred at most (taking the newest N snapshots) transfer_last: Option, + /// Whether to re-sync corrupted snapshots + resync_corrupt: bool, } impl PullParameters { @@ -72,12 +75,14 @@ impl PullParameters { group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, ) -> Result { if let Some(max_depth) = max_depth { ns.check_max_depth(max_depth)?; remote_ns.check_max_depth(max_depth)?; }; let remove_vanished = remove_vanished.unwrap_or(false); + let resync_corrupt = resync_corrupt.unwrap_or(false); let source: Arc = if let Some(remote) = remote { let (remote_config, _digest) = pbs_config::remote::config()?; @@ -116,6 +121,7 @@ impl PullParameters { max_depth, group_filter, transfer_last, + resync_corrupt, }) } } @@ -323,7 +329,7 @@ async fn pull_single_archive<'a>( /// /// Pulling a snapshot consists of the following steps: /// - (Re)download the manifest -/// -- if it matches, only download log and treat snapshot as already synced +/// -- if it matches and is not corrupt, only download log and treat snapshot as already synced /// - Iterate over referenced files /// -- if file already exists, verify contents /// -- if not, pull it from the remote @@ -332,6 +338,7 @@ async fn pull_snapshot<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let mut sync_stats = SyncStats::default(); let mut manifest_name = snapshot.full_path(); @@ -352,7 +359,7 @@ async fn pull_snapshot<'a>( return Ok(sync_stats); } - if manifest_name.exists() { + if manifest_name.exists() && !corrupt { let manifest_blob = proxmox_lang::try_block!({ let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| { format_err!("unable to open local manifest {manifest_name:?} - {err}") @@ -381,7 +388,7 @@ async fn pull_snapshot<'a>( let mut path = snapshot.full_path(); path.push(&item.filename); - if path.exists() { + if !corrupt && path.exists() { match ArchiveType::from_path(&item.filename)? { ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path)?; @@ -443,6 +450,7 @@ async fn pull_snapshot_from<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let (_path, is_new, _snap_lock) = snapshot .datastore() @@ -451,7 +459,8 @@ async fn pull_snapshot_from<'a>( let sync_stats = if is_new { info!("sync snapshot {}", snapshot.dir()); - match pull_snapshot(reader, snapshot, downloaded_chunks).await { + // this snapshot is new, so it can never be corrupt + match pull_snapshot(reader, snapshot, downloaded_chunks, false).await { Err(err) => { if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( snapshot.backup_ns(), @@ -468,8 +477,12 @@ async fn pull_snapshot_from<'a>( } } } else { - info!("re-sync snapshot {}", snapshot.dir()); - pull_snapshot(reader, snapshot, downloaded_chunks).await? + if corrupt { + info!("re-sync snapshot {} due to corruption", snapshot.dir()); + } else { + info!("re-sync snapshot {}", snapshot.dir()); + } + pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await? }; Ok(sync_stats) @@ -523,26 +536,52 @@ async fn pull_group( .last_successful_backup(&target_ns, group)? .unwrap_or(i64::MIN); - let list: Vec = raw_list + // Filter remote BackupDirs to include in pull + // Also stores if the snapshot is corrupt (verification job failed) + let list: Vec<(BackupDir, bool)> = raw_list .into_iter() .enumerate() - .filter(|&(pos, ref dir)| { + .filter_map(|(pos, dir)| { source_snapshots.insert(dir.time); + // If resync_corrupt is set, check if the corresponding local snapshot failed to + // verification + if params.resync_corrupt { + let local_dir = params + .target + .store + .backup_dir(target_ns.clone(), dir.clone()); + if let Ok(local_dir) = local_dir { + match local_dir.verify_state() { + Ok(Some(state)) => { + if state == VerifyState::Failed { + return Some((dir, true)); + } + } + Ok(None) => { + // The verify_state item was not found in the manifest, this means the + // snapshot is new. + } + Err(_) => { + // There was an error loading the manifest, probably better if we + // resync. + return Some((dir, true)); + } + } + } + } // Note: the snapshot represented by `last_sync_time` might be missing its backup log // or post-backup verification state if those were not yet available during the last // sync run, always resync it if last_sync_time > dir.time { already_synced_skip_info.update(dir.time); - return false; + return None; } - if pos < cutoff && last_sync_time != dir.time { transfer_last_skip_info.update(dir.time); - return false; + return None; } - true + Some((dir, false)) }) - .map(|(_, dir)| dir) .collect(); if already_synced_skip_info.count > 0 { @@ -561,7 +600,7 @@ async fn pull_group( let mut sync_stats = SyncStats::default(); - for (pos, from_snapshot) in list.into_iter().enumerate() { + for (pos, (from_snapshot, corrupt)) in list.into_iter().enumerate() { let to_snapshot = params .target .store @@ -571,7 +610,8 @@ async fn pull_group( .source .reader(source_namespace, &from_snapshot) .await?; - let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await; + let result = + pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone(), corrupt).await; progress.done_snapshots = pos as u64 + 1; info!("percentage done: {progress}"); -- 2.39.5 From g.goller at proxmox.com Fri Nov 22 10:39:15 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 10:39:15 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v5 0/4] fix #3786: resync corrupt chunks in sync-job Message-ID: <20241122093919.59777-1-g.goller@proxmox.com> Add an option `resync-corrupt` that resyncs corrupt snapshots when running sync-job. This option checks if the local snapshot failed the last verification and if it did, overwrites the local snapshot with the remote one. This is quite useful, as we currently don't have an option to "fix" broken chunks/snapshots in any way, even if a healthy version is on another (e.g. offsite) instance. Important things to note are also: this has a slight performance penalty, as all the manifests have to be looked through, and a verification job has to be run beforehand, otherwise we do not know if the snapshot is healthy. Note: This series was originally written by Shannon! I just picked it up, rebased, and fixed the obvious comments on the last series. Changelog v5 (thanks @Fabian): - rebase - don't remove parsing error in verify_state helper - add error logs on failures Changelog v4 (thanks @Fabian): - make verify_state bubble up errors - call verify_state helper everywhere we need the verify_state - resync broken manifests (so resync when load_manifest fails) Changelog v3 (thanks @Fabian): - filter out snapshots earlier in the pull_group function - move verify_state to BackupManifest and fixed invocations - reverted verify_state Option -> Result state (It doesn't matter if we get an error, we get that quite often f.e. in new backups) - removed some unnecessary log lines - removed some unnecessary imports and modifications - rebase to current master Changelog v2 (thanks @Thomas): - order git trailers - adjusted schema description to include broken indexes - change verify_state to return a Result<_,_> - print error if verify_state is not able to read the state - update docs on pull_snapshot function - simplify logic by combining flags - move log line out of loop to only print once that we resync the snapshot Changelog since RFC (Shannon's work): - rename option from deep-sync to resync-corrupt - rebase on latest master (and change implementation details, as a lot has changed around sync-jobs) proxmox-backup: Gabriel Goller (4): snapshot: add helper function to retrieve verify_state fix #3786: api: add resync-corrupt option to sync jobs fix #3786: ui/cli: add resync-corrupt option on sync-jobs fix #3786: docs: add resync-corrupt option to sync-job docs/managing-remotes.rst | 6 +++ pbs-api-types/src/jobs.rs | 10 +++++ pbs-datastore/src/backup_info.rs | 9 +++- pbs-datastore/src/manifest.rs | 14 +++++- src/api2/admin/datastore.rs | 16 +++---- src/api2/backup/mod.rs | 18 +++++--- src/api2/config/sync.rs | 4 ++ src/api2/pull.rs | 9 +++- src/backup/verify.rs | 13 +++--- src/bin/proxmox-backup-manager.rs | 16 ++++++- src/server/pull.rs | 72 ++++++++++++++++++++++++------- www/window/SyncJobEdit.js | 11 +++++ 12 files changed, 155 insertions(+), 43 deletions(-) Summary over all repositories: 12 files changed, 155 insertions(+), 43 deletions(-) -- Generated by git-murpp 0.7.1 From g.goller at proxmox.com Fri Nov 22 10:39:19 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 10:39:19 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v5 4/4] fix #3786: docs: add resync-corrupt option to sync-job In-Reply-To: <20241122093919.59777-1-g.goller@proxmox.com> References: <20241122093919.59777-1-g.goller@proxmox.com> Message-ID: <20241122093919.59777-5-g.goller@proxmox.com> Add short section explaining the `resync-corrupt` option on the sync-job. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller Reviewed-by: Fabian Gr?nbichler --- docs/managing-remotes.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/managing-remotes.rst b/docs/managing-remotes.rst index a7fd5143d236..4a78a9310fa5 100644 --- a/docs/managing-remotes.rst +++ b/docs/managing-remotes.rst @@ -135,6 +135,12 @@ For mixing include and exclude filter, following rules apply: .. note:: The ``protected`` flag of remote backup snapshots will not be synced. +Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have +failed to verify during the last :ref:`maintenance_verification`. Hence, a verification +job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware +that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore +and might take much longer than regular sync jobs. + Namespace Support ^^^^^^^^^^^^^^^^^ -- 2.39.5 From g.goller at proxmox.com Fri Nov 22 10:39:18 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 10:39:18 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v5 3/4] fix #3786: ui/cli: add resync-corrupt option on sync-jobs In-Reply-To: <20241122093919.59777-1-g.goller@proxmox.com> References: <20241122093919.59777-1-g.goller@proxmox.com> Message-ID: <20241122093919.59777-4-g.goller@proxmox.com> Add the `resync-corrupt` option to the ui and the `proxmox-backup-manager` cli. It is listed in the `Advanced` section, because it slows the sync-job down and is useless if no verification job was run beforehand. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller Reviewed-by: Fabian Gr?nbichler --- src/bin/proxmox-backup-manager.rs | 16 ++++++++++++++-- www/window/SyncJobEdit.js | 11 +++++++++++ 2 files changed, 25 insertions(+), 2 deletions(-) diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index d887dc1d50a1..02ca0d028225 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -14,8 +14,8 @@ use pbs_api_types::percent_encoding::percent_encode_component; use pbs_api_types::{ BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::{display_task_log, view_task_result}; use pbs_config::sync; @@ -307,6 +307,7 @@ async fn sync_datastore( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, param: Value, sync_direction: SyncDirection, ) -> Result { @@ -343,6 +344,10 @@ async fn sync_datastore( args["transfer-last"] = json!(transfer_last) } + if let Some(resync) = resync_corrupt { + args["resync-corrupt"] = Value::from(resync); + } + let mut limit_json = json!(limit); let limit_map = limit_json .as_object_mut() @@ -405,6 +410,10 @@ async fn sync_datastore( schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, } } )] @@ -421,6 +430,7 @@ async fn pull_datastore( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, param: Value, ) -> Result { sync_datastore( @@ -434,6 +444,7 @@ async fn pull_datastore( group_filter, limit, transfer_last, + resync_corrupt, param, SyncDirection::Pull, ) @@ -513,6 +524,7 @@ async fn push_datastore( group_filter, limit, transfer_last, + None, param, SyncDirection::Push, ) diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 0e648e7b3e50..03f61bee6494 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -358,6 +358,17 @@ Ext.define('PBS.window.SyncJobEdit', { deleteEmpty: '{!isCreate}', }, }, + { + fieldLabel: gettext('Resync corrupt snapshots'), + xtype: 'proxmoxcheckbox', + name: 'resync-corrupt', + autoEl: { + tag: 'div', + 'data-qtip': gettext('Re-sync snapshots, whose verfification failed.'), + }, + uncheckedValue: false, + value: false, + }, ], }, { -- 2.39.5 From f.gruenbichler at proxmox.com Fri Nov 22 10:42:48 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 10:42:48 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 1/2] pxar: add file name to path_info when applying metadata In-Reply-To: <20241008083355.181031-1-f.gruenbichler@proxmox.com> References: <20241008083355.181031-1-f.gruenbichler@proxmox.com> Message-ID: <173226856889.2118190.1849711355735372910@yuna.proxmox.com> with git trailers folded in, thanks! Quoting Fabian Gr?nbichler (2024-10-08 10:33:54) > else, error messages using this path_info refer to the parent directory instead > of the actual file entry causing the problem. since this is just for > informational purposes, lossy conversion is acceptable. > > Signed-off-by: Fabian Gr?nbichler > --- > > Notes: > noticed while preparing patch #2, can/should maybe be applied even if that one isn't ;) > > pbs-client/src/pxar/extract.rs | 10 ++++++++-- > pbs-client/src/pxar/metadata.rs | 8 +++++++- > 2 files changed, 15 insertions(+), 3 deletions(-) > > diff --git a/pbs-client/src/pxar/extract.rs b/pbs-client/src/pxar/extract.rs > index b1245c5fc..c0a1db05d 100644 > --- a/pbs-client/src/pxar/extract.rs > +++ b/pbs-client/src/pxar/extract.rs > @@ -724,7 +724,10 @@ impl Extractor { > self.feature_flags, > metadata, > file.as_raw_fd(), > - self.dir_stack.path(), > + &self > + .dir_stack > + .path() > + .join(file_name.to_string_lossy().to_string()), > &mut self.on_error, > ) > } > @@ -783,7 +786,10 @@ impl Extractor { > self.feature_flags, > metadata, > file.as_raw_fd(), > - self.dir_stack.path(), > + &self > + .dir_stack > + .path() > + .join(file_name.to_string_lossy().to_string()), > &mut self.on_error, > ) > } > diff --git a/pbs-client/src/pxar/metadata.rs b/pbs-client/src/pxar/metadata.rs > index 8e7a14312..071547094 100644 > --- a/pbs-client/src/pxar/metadata.rs > +++ b/pbs-client/src/pxar/metadata.rs > @@ -72,7 +72,13 @@ pub fn apply_at( > Mode::empty(), > )?; > > - apply(flags, metadata, fd.as_raw_fd(), path_info, on_error) > + apply( > + flags, > + metadata, > + fd.as_raw_fd(), > + &path_info.join(file_name.to_string_lossy().to_string()), > + on_error, > + ) > } > > pub fn apply_initial_flags( > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From c.ebner at proxmox.com Fri Nov 22 11:11:55 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:11:55 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/4] server: push: add error context to all target api calls In-Reply-To: <173226608868.2118190.15465009393024345476@yuna.proxmox.com> References: <20241121154337.471425-1-c.ebner@proxmox.com> <20241121154337.471425-4-c.ebner@proxmox.com> <173226608868.2118190.15465009393024345476@yuna.proxmox.com> Message-ID: On 11/22/24 10:01, Fabian Gr?nbichler wrote: > Quoting Christian Ebner (2024-11-21 16:43:36) >> Make it clear from the context that these error messages stem from >> the response of an api call rather than a local error. >> >> Signed-off-by: Christian Ebner >> --- >> src/server/push.rs | 24 +++++++++++++++++------- >> 1 file changed, 17 insertions(+), 7 deletions(-) >> >> diff --git a/src/server/push.rs b/src/server/push.rs >> index 86cef5520..fe2e11220 100644 >> --- a/src/server/push.rs >> +++ b/src/server/push.rs >> @@ -219,7 +219,9 @@ async fn remove_target_namespace( >> if params.target.supports_prune_delete_stats { >> let data = result["data"].take(); >> serde_json::from_value(data).map_err(|err| { >> - format_err!("removing target namespace {target_namespace} failed - {err}") >> + format_err!( >> + "Failed to remove remote namespace {target_namespace}, remote returned: {err}" >> + ) > > this is attached to the wrong error - it should be attached to the client.delete call right above.. Oof, correct, will fix that. Thanks for catching this! > > this here should instead add the context that we failed to parse the returned > value (which should never happen, that means we missed some API breakage..) > >> }) >> } else { >> Ok(BackupGroupDeleteStats::default()) >> @@ -236,7 +238,8 @@ async fn fetch_target_groups( >> let args = Some(serde_json::json!({ "ns": target_namespace.name() })); >> >> let mut result = params.target.client.get(&api_path, args).await?; >> - let groups: Vec = serde_json::from_value(result["data"].take())?; >> + let groups: Vec = serde_json::from_value(result["data"].take()) >> + .map_err(|err| format_err!("Failed to fetch remote groups, remote returned: {err}"))?; > > same here, just with get instead of delete ;) > >> >> let (mut owned, not_owned) = groups.into_iter().fold( >> (Vec::new(), HashSet::new()), >> @@ -277,8 +280,9 @@ async fn remove_target_group( >> >> if params.target.supports_prune_delete_stats { >> let data = result["data"].take(); >> - serde_json::from_value(data) >> - .map_err(|err| format_err!("removing target group {backup_group} failed - {err}")) >> + serde_json::from_value(data).map_err(|err| { >> + format_err!("Failed to remove remote group {backup_group}, remote returned: {err}") >> + }) > > here as well > >> } else { >> Ok(BackupGroupDeleteStats::default()) >> } >> @@ -313,7 +317,7 @@ async fn check_or_create_target_namespace( >> match params.target.client.post(&api_path, Some(args)).await { >> Ok(_) => info!("Successfully created new namespace {current} on remote"), >> Err(err) => { >> - bail!("Remote creation of namespace {current} failed, remote returned: {err}") >> + bail!("Creation of remote namespace {current} failed, remote returned: {err}") >> } >> } >> existing_target_namespaces.push(current.clone()); >> @@ -585,7 +589,8 @@ async fn fetch_target_snapshots( >> args["ns"] = serde_json::to_value(target_namespace)?; >> } >> let mut result = params.target.client.get(&api_path, Some(args)).await?; >> - let snapshots: Vec = serde_json::from_value(result["data"].take())?; >> + let snapshots: Vec = serde_json::from_value(result["data"].take()) >> + .map_err(|err| format_err!("Failed to fetch remote snapshots, remote returned: {err}"))?; > > here as well > >> >> Ok(snapshots) >> } >> @@ -603,7 +608,12 @@ async fn forget_target_snapshot( >> if !target_namespace.is_root() { >> args["ns"] = serde_json::to_value(target_namespace)?; >> } >> - params.target.client.delete(&api_path, Some(args)).await?; >> + params >> + .target >> + .client >> + .delete(&api_path, Some(args)) >> + .await >> + .map_err(|err| format_err!("Failed to remove remote snapshot, remote returned: {err}"))?; > > this should probably be just "Request to remote returned {err}", since the call > site already logs the snapshot name and the fact that this is removal failing > ;) > >> >> Ok(()) >> } >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> From c.ebner at proxmox.com Fri Nov 22 11:30:06 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:30:06 +0100 Subject: [pbs-devel] [PATCH v5 proxmox-backup 0/5] introduce dedcated archive name api type Message-ID: <20241122103011.165010-1-c.ebner@proxmox.com> There is currently no dedicated api type for the archive names, given as input parameters to several api methods. This patches introduce a dedicated type for archive names, in order to collect the code for checks and eventual mappings into one location and reduce possible unintentional misuse by passing incorrect argument values to the functions and methods consuming the archive names. Further, drop all archive name constants in favor of helper methods on the api type to generate `BackupArchiveName` instances for them. This allows for direct comparison with other `BackupArchiveName` instances. As a positive side effect, the mapping now allows also for the server archive type extensions to be optionally passed as input to several commands, e.g. ``` proxmox-backup-client restore .pxar.didx ``` is now valid, being equal to ``` proxmox-backup-client restore ``` Changes since version 4: - Rebased onto current master - Extended to newly introduced sync jobs in push direction Changes since version 3: - Removed catchall fallback to blob type, reworked type parsing logic - Removed archive name constants in favor of helper methods to generate archive names for them - Extended tests Changes since version 2: - Rebased onto current master - Amended commit messages Changes since version 1 (thanks @Gabriel): - Rebased onto current master - Added unit tests for archive name parsing - Added missing check for invalid archive names ending with '/' - Inlined variable names for format strings - Import implemented traits at top Christian Ebner (5): datastore: move `ArchiveType` to api types api types: introduce `BackupArchiveName` type client/server: use dedicated api type for all archive names client: drop unused parse_archive_type helper api types: add unit tests for backup archive name parsing pbs-api-types/src/datastore.rs | 238 ++++++++++++++++++++++++++- pbs-client/src/backup_reader.rs | 18 +- pbs-client/src/backup_writer.rs | 43 +++-- pbs-client/src/pxar/tools.rs | 3 +- pbs-client/src/tools/mod.rs | 28 ++-- pbs-datastore/src/backup_info.rs | 22 +-- pbs-datastore/src/datastore.rs | 7 +- pbs-datastore/src/lib.rs | 3 - pbs-datastore/src/manifest.rs | 55 +++---- pbs-datastore/src/snapshot_reader.rs | 11 +- proxmox-backup-client/src/catalog.rs | 35 ++-- proxmox-backup-client/src/helper.rs | 7 +- proxmox-backup-client/src/main.rs | 138 +++++++++------- proxmox-backup-client/src/mount.rs | 33 ++-- proxmox-file-restore/src/main.rs | 13 +- src/api2/admin/datastore.rs | 70 ++++---- src/api2/backup/mod.rs | 3 +- src/api2/reader/mod.rs | 7 +- src/api2/tape/restore.rs | 17 +- src/backup/mod.rs | 3 - src/backup/verify.rs | 7 +- src/bin/proxmox_backup_debug/diff.rs | 16 +- src/server/pull.rs | 24 +-- src/server/push.rs | 31 ++-- src/server/sync.rs | 11 +- tests/prune.rs | 5 +- 26 files changed, 539 insertions(+), 309 deletions(-) -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 11:30:09 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:30:09 +0100 Subject: [pbs-devel] [PATCH v5 proxmox-backup 3/5] client/server: use dedicated api type for all archive names In-Reply-To: <20241122103011.165010-1-c.ebner@proxmox.com> References: <20241122103011.165010-1-c.ebner@proxmox.com> Message-ID: <20241122103011.165010-4-c.ebner@proxmox.com> Instead of using the plain String or slices of it for archive names, use the dedicated api type and its methods to parse and check for archive type based on archive filename extension. Thereby, keeping the checks and mappings in the api type and resticting function parameters by the narrower wrapper type to reduce potential misuse. Further, instead of declaring and using the archive name constants throughout the codebase, use the `BackupArchiveName` helpers to generate the archive names for manifest, client logs and encryption keys. This allows for easy archive name comparisons using the same `BackupArchiveName` type, at the cost of some extra allocations and avoids the currently present double constant declaration of `CATALOG_NAME`. A positive ergonomic side effect of this is that commands now also accept the archive type extension optionally, when passing the archive name. E.g. ``` proxmox-backup-client restore .pxar.didx ``` is equal to ``` proxmox-backup-client restore .pxar ``` The previously default mapping of any archive name extension to a blob has been dropped in favor of consistent mapping by the api type helpers. Signed-off-by: Christian Ebner --- changes since version 4: - rebased onto current master - extended to newly introduced sync jobs in push direction pbs-client/src/backup_reader.rs | 18 ++-- pbs-client/src/backup_writer.rs | 43 +++++----- pbs-client/src/pxar/tools.rs | 3 +- pbs-client/src/tools/mod.rs | 28 +++--- pbs-datastore/src/backup_info.rs | 22 ++--- pbs-datastore/src/lib.rs | 3 - pbs-datastore/src/manifest.rs | 33 ++++--- pbs-datastore/src/snapshot_reader.rs | 11 +-- proxmox-backup-client/src/catalog.rs | 35 ++++---- proxmox-backup-client/src/helper.rs | 7 +- proxmox-backup-client/src/main.rs | 124 +++++++++++++++++---------- proxmox-backup-client/src/mount.rs | 33 +++---- proxmox-file-restore/src/main.rs | 13 +-- src/api2/admin/datastore.rs | 70 +++++++-------- src/api2/tape/restore.rs | 17 ++-- src/backup/mod.rs | 3 - src/bin/proxmox_backup_debug/diff.rs | 16 ++-- src/server/pull.rs | 23 ++--- src/server/push.rs | 31 ++++--- src/server/sync.rs | 11 +-- tests/prune.rs | 5 +- 21 files changed, 291 insertions(+), 258 deletions(-) diff --git a/pbs-client/src/backup_reader.rs b/pbs-client/src/backup_reader.rs index 4706abc78..24c2edbba 100644 --- a/pbs-client/src/backup_reader.rs +++ b/pbs-client/src/backup_reader.rs @@ -6,13 +6,12 @@ use std::sync::Arc; use futures::future::AbortHandle; use serde_json::{json, Value}; -use pbs_api_types::{BackupDir, BackupNamespace}; +use pbs_api_types::{BackupArchiveName, BackupDir, BackupNamespace}; use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::MANIFEST_BLOB_NAME; use pbs_datastore::{BackupManifest, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::sha::sha256; @@ -127,7 +126,8 @@ impl BackupReader { /// The manifest signature is verified if we have a crypt_config. pub async fn download_manifest(&self) -> Result<(BackupManifest, Vec), Error> { let mut raw_data = Vec::with_capacity(64 * 1024); - self.download(MANIFEST_BLOB_NAME, &mut raw_data).await?; + self.download(BackupArchiveName::manifest().as_ref(), &mut raw_data) + .await?; let blob = DataBlob::load_from_reader(&mut &raw_data[..])?; // no expected digest available let data = blob.decode(None, None)?; @@ -145,11 +145,11 @@ impl BackupReader { pub async fn download_blob( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result, Error> { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; tmpfile.seek(SeekFrom::Start(0))?; let (csum, size) = sha256(&mut tmpfile)?; @@ -167,11 +167,11 @@ impl BackupReader { pub async fn download_dynamic_index( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; let index = DynamicIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read dynamic index '{}' - {}", name, err))?; @@ -190,11 +190,11 @@ impl BackupReader { pub async fn download_fixed_index( &self, manifest: &BackupManifest, - name: &str, + name: &BackupArchiveName, ) -> Result { let mut tmpfile = crate::tools::create_tmp_file()?; - self.download(name, &mut tmpfile).await?; + self.download(name.as_ref(), &mut tmpfile).await?; let index = FixedIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read fixed index '{}' - {}", name, err))?; diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 2ffd0b9ba..c1869c431 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -13,13 +13,13 @@ use tokio::io::AsyncReadExt; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use pbs_api_types::{ArchiveType, BackupDir, BackupNamespace}; +use pbs_api_types::{ArchiveType, BackupArchiveName, BackupDir, BackupNamespace}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; -use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1}; +use pbs_datastore::manifest::BackupManifest; +use pbs_datastore::PROXMOX_BACKUP_PROTOCOL_ID_V1; use pbs_tools::crypt_config::CryptConfig; use proxmox_human_byte::HumanByte; @@ -269,7 +269,7 @@ impl BackupWriter { /// Upload chunks and index pub async fn upload_index_chunk_info( &self, - archive_name: &str, + archive_name: &BackupArchiveName, stream: impl Stream>, options: UploadOptions, ) -> Result { @@ -361,7 +361,7 @@ impl BackupWriter { pub async fn upload_stream( &self, - archive_name: &str, + archive_name: &BackupArchiveName, stream: impl Stream>, options: UploadOptions, injections: Option>, @@ -387,13 +387,13 @@ impl BackupWriter { if !manifest .files() .iter() - .any(|file| file.filename == archive_name) + .any(|file| file.filename == archive_name.as_ref()) { log::info!("Previous manifest does not contain an archive called '{archive_name}', skipping download.."); } else { // try, but ignore errors - match ArchiveType::from_path(archive_name) { - Ok(ArchiveType::FixedIndex) => { + match archive_name.archive_type() { + ArchiveType::FixedIndex => { if let Err(err) = self .download_previous_fixed_index( archive_name, @@ -405,7 +405,7 @@ impl BackupWriter { log::warn!("Error downloading .fidx from previous manifest: {}", err); } } - Ok(ArchiveType::DynamicIndex) => { + ArchiveType::DynamicIndex => { if let Err(err) = self .download_previous_dynamic_index( archive_name, @@ -429,12 +429,6 @@ impl BackupWriter { .as_u64() .unwrap(); - let archive = if log::log_enabled!(log::Level::Debug) { - archive_name - } else { - pbs_tools::format::strip_server_file_extension(archive_name) - }; - let upload_stats = Self::upload_chunk_info_stream( self.h2.clone(), wid, @@ -448,12 +442,17 @@ impl BackupWriter { }, options.compress, injections, - archive, + archive_name, ) .await?; let size_dirty = upload_stats.size - upload_stats.size_reused; let size: HumanByte = upload_stats.size.into(); + let archive = if log::log_enabled!(log::Level::Debug) { + archive_name.to_string() + } else { + archive_name.without_type_extension() + }; if upload_stats.chunk_injected > 0 { log::info!( @@ -463,7 +462,7 @@ impl BackupWriter { ); } - if archive_name != CATALOG_NAME { + if *archive_name != BackupArchiveName::catalog() { let speed: HumanByte = ((size_dirty * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into(); let size_dirty: HumanByte = size_dirty.into(); @@ -629,7 +628,7 @@ impl BackupWriter { pub async fn download_previous_fixed_index( &self, - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { @@ -664,7 +663,7 @@ impl BackupWriter { pub async fn download_previous_dynamic_index( &self, - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, known_chunks: Arc>>, ) -> Result { @@ -711,7 +710,7 @@ impl BackupWriter { pub async fn download_previous_manifest(&self) -> Result { let mut raw_data = Vec::with_capacity(64 * 1024); - let param = json!({ "archive-name": MANIFEST_BLOB_NAME }); + let param = json!({ "archive-name": BackupArchiveName::manifest().to_string() }); self.h2 .download("previous", Some(param), &mut raw_data) .await?; @@ -739,7 +738,7 @@ impl BackupWriter { crypt_config: Option>, compress: bool, injections: Option>, - archive: &str, + archive: &BackupArchiveName, ) -> impl Future> { let mut counters = UploadCounters::new(); let counters_readonly = counters.clone(); @@ -831,7 +830,7 @@ impl BackupWriter { fn upload_merged_chunk_stream( h2: H2Client, wid: u64, - archive: &str, + archive: &BackupArchiveName, prefix: &str, stream: impl Stream>, index_csum: Arc>>, diff --git a/pbs-client/src/pxar/tools.rs b/pbs-client/src/pxar/tools.rs index b076daf6b..483ef19b8 100644 --- a/pbs-client/src/pxar/tools.rs +++ b/pbs-client/src/pxar/tools.rs @@ -14,6 +14,7 @@ use pxar::accessor::ReadAt; use pxar::format::StatxTimestamp; use pxar::{mode, Entry, EntryKind, Metadata}; +use pbs_api_types::BackupArchiveName; use pbs_datastore::catalog::{ArchiveEntry, CatalogEntryType, DirEntryAttribute}; use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt}; @@ -330,7 +331,7 @@ pub fn handle_root_with_optional_format_version_prelude, manifest: &BackupManifest, crypt_config: Option>, diff --git a/pbs-client/src/tools/mod.rs b/pbs-client/src/tools/mod.rs index 28db6f348..8068dc004 100644 --- a/pbs-client/src/tools/mod.rs +++ b/pbs-client/src/tools/mod.rs @@ -17,7 +17,9 @@ use proxmox_router::cli::{complete_file_name, shellword_split}; use proxmox_schema::*; use proxmox_sys::fs::file_get_json; -use pbs_api_types::{Authid, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL}; +use pbs_api_types::{ + Authid, BackupArchiveName, BackupNamespace, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL, +}; use pbs_datastore::BackupManifest; use crate::{BackupRepository, HttpClient, HttpClientOptions}; @@ -548,19 +550,18 @@ pub fn place_xdg_file( } pub fn get_pxar_archive_names( - archive_name: &str, + archive_name: &BackupArchiveName, manifest: &BackupManifest, -) -> Result<(String, Option), Error> { - let (filename, ext) = match archive_name.strip_suffix(".didx") { - Some(filename) => (filename, ".didx"), - None => (archive_name, ""), - }; +) -> Result<(BackupArchiveName, Option), Error> { + let filename = archive_name.without_type_extension(); + let ext = archive_name.archive_type().extension(); - // Check if archive with given extension is present + // Check if archive is given as split archive or regular archive and is present in manifest, + // otherwise goto fallback below if manifest .files() .iter() - .any(|fileinfo| fileinfo.filename == format!("{filename}.didx")) + .any(|fileinfo| fileinfo.filename == archive_name.as_ref()) { // check if already given as one of split archive name variants if let Some(base) = filename @@ -568,8 +569,8 @@ pub fn get_pxar_archive_names( .or_else(|| filename.strip_suffix(".ppxar")) { return Ok(( - format!("{base}.mpxar{ext}"), - Some(format!("{base}.ppxar{ext}")), + format!("{base}.mpxar.{ext}").as_str().try_into()?, + Some(format!("{base}.ppxar.{ext}").as_str().try_into()?), )); } return Ok((archive_name.to_owned(), None)); @@ -577,7 +578,10 @@ pub fn get_pxar_archive_names( // if not, try fallback from regular to split archive if let Some(base) = filename.strip_suffix(".pxar") { - return get_pxar_archive_names(&format!("{base}.mpxar{ext}"), manifest); + return get_pxar_archive_names( + &format!("{base}.mpxar.{ext}").as_str().try_into()?, + manifest, + ); } bail!("archive not found in manifest"); diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 62d12b118..2d445f916 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -8,14 +8,12 @@ use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ - Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, - BACKUP_FILE_REGEX, + Authid, BackupArchiveName, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, + BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; -use crate::manifest::{ - BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME, -}; +use crate::manifest::{BackupManifest, MANIFEST_LOCK_NAME}; use crate::{DataBlob, DataStore}; /// BackupGroup is a directory containing a list of BackupDir @@ -139,7 +137,7 @@ impl BackupGroup { } let mut manifest_path = PathBuf::from(backup_time); - manifest_path.push(MANIFEST_BLOB_NAME); + manifest_path.push(BackupArchiveName::manifest().as_ref()); use nix::fcntl::{openat, OFlag}; match openat( @@ -492,7 +490,7 @@ impl BackupDir { /// Load the manifest without a lock. Must not be written back. pub fn load_manifest(&self) -> Result<(BackupManifest, u64), Error> { - let blob = self.load_blob(MANIFEST_BLOB_NAME)?; + let blob = self.load_blob(BackupArchiveName::manifest().as_ref())?; let raw_size = blob.raw_size(); let manifest = BackupManifest::try_from(blob)?; Ok((manifest, raw_size)) @@ -515,7 +513,7 @@ impl BackupDir { let raw_data = blob.raw_data(); let mut path = self.full_path(); - path.push(MANIFEST_BLOB_NAME); + path.push(BackupArchiveName::manifest().as_ref()); // atomic replace invalidates flock - no other writes past this point! replace_file(&path, raw_data, CreateOptions::new(), false)?; @@ -527,8 +525,8 @@ impl BackupDir { let full_path = self.full_path(); let mut wanted_files = std::collections::HashSet::new(); - wanted_files.insert(MANIFEST_BLOB_NAME.to_string()); - wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string()); + wanted_files.insert(BackupArchiveName::manifest().to_string()); + wanted_files.insert(BackupArchiveName::client_log().to_string()); manifest.files().iter().for_each(|item| { wanted_files.insert(item.filename.clone()); }); @@ -636,7 +634,9 @@ impl BackupInfo { pub fn is_finished(&self) -> bool { // backup is considered unfinished if there is no manifest - self.files.iter().any(|name| name == MANIFEST_BLOB_NAME) + self.files + .iter() + .any(|name| name == BackupArchiveName::manifest().as_ref()) } } diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 202b09558..8050cf4d0 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -144,9 +144,6 @@ #![deny(unsafe_op_in_unsafe_fn)] -// Note: .pcat1 => Proxmox Catalog Format version 1 -pub const CATALOG_NAME: &str = "catalog.pcat1.didx"; - /// Directory path where active operations counters are saved. pub const ACTIVE_OPERATIONS_DIR: &str = concat!( pbs_buildcfg::PROXMOX_BACKUP_RUN_DIR_M!(), diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index 823c85003..51ec117ea 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -3,13 +3,10 @@ use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{ArchiveType, BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{BackupArchiveName, BackupType, CryptMode, Fingerprint}; use pbs_tools::crypt_config::CryptConfig; -pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck"; -pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob"; -pub const ENCRYPTED_KEY_BLOB_NAME: &str = "rsa-encrypted.key.blob"; fn crypt_mode_none() -> CryptMode { CryptMode::None @@ -68,14 +65,13 @@ impl BackupManifest { pub fn add_file( &mut self, - filename: String, + filename: &BackupArchiveName, size: u64, csum: [u8; 32], crypt_mode: CryptMode, ) -> Result<(), Error> { - let _archive_type = ArchiveType::from_path(&filename)?; // check type self.files.push(FileInfo { - filename, + filename: filename.to_string(), size, csum, crypt_mode, @@ -87,8 +83,11 @@ impl BackupManifest { &self.files[..] } - pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> { - let info = self.files.iter().find(|item| item.filename == name); + pub fn lookup_file_info(&self, name: &BackupArchiveName) -> Result<&FileInfo, Error> { + let info = self + .files + .iter() + .find(|item| item.filename == name.as_ref()); match info { None => bail!("manifest does not contain file '{}'", name), @@ -96,7 +95,12 @@ impl BackupManifest { } } - pub fn verify_file(&self, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> { + pub fn verify_file( + &self, + name: &BackupArchiveName, + csum: &[u8; 32], + size: u64, + ) -> Result<(), Error> { let info = self.lookup_file_info(name)?; if size != info.size { @@ -256,8 +260,13 @@ fn test_manifest_signature() -> Result<(), Error> { let mut manifest = BackupManifest::new("host/elsa/2020-06-26T13:56:05Z".parse()?); - manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?; - manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?; + manifest.add_file( + &"test1.img.fidx".try_into()?, + 200, + [1u8; 32], + CryptMode::Encrypt, + )?; + manifest.add_file(&"abc.blob".try_into()?, 200, [2u8; 32], CryptMode::None)?; manifest.unprotected["note"] = "This is not protected by the signature.".into(); diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index 432701ea0..ef70c7013 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -8,13 +8,14 @@ use nix::dir::Dir; use proxmox_sys::fs::lock_dir_noblock_shared; -use pbs_api_types::{print_store_and_ns, ArchiveType, BackupNamespace, Operation}; +use pbs_api_types::{ + print_store_and_ns, ArchiveType, BackupArchiveName, BackupNamespace, Operation, +}; use crate::backup_info::BackupDir; use crate::dynamic_index::DynamicIndexReader; use crate::fixed_index::FixedIndexReader; use crate::index::IndexFile; -use crate::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use crate::DataStore; /// Helper to access the contents of a datastore backup snapshot @@ -62,14 +63,14 @@ impl SnapshotReader { }; let mut client_log_path = snapshot_path; - client_log_path.push(CLIENT_LOG_BLOB_NAME); + client_log_path.push(BackupArchiveName::client_log().as_ref()); - let mut file_list = vec![MANIFEST_BLOB_NAME.to_string()]; + let mut file_list = vec![BackupArchiveName::manifest().to_string()]; for item in manifest.files() { file_list.push(item.filename.clone()); } if client_log_path.exists() { - file_list.push(CLIENT_LOG_BLOB_NAME.to_string()); + file_list.push(BackupArchiveName::client_log().to_string()); } Ok(Self { diff --git a/proxmox-backup-client/src/catalog.rs b/proxmox-backup-client/src/catalog.rs index a55c9effe..39416d1d4 100644 --- a/proxmox-backup-client/src/catalog.rs +++ b/proxmox-backup-client/src/catalog.rs @@ -7,9 +7,8 @@ use serde_json::Value; use proxmox_router::cli::*; use proxmox_schema::api; -use pbs_api_types::BackupNamespace; +use pbs_api_types::{BackupArchiveName, BackupNamespace}; use pbs_client::pxar::tools::get_remote_pxar_reader; -use pbs_client::tools::has_pxar_filename_extension; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_tools::crypt_config::CryptConfig; @@ -22,7 +21,7 @@ use crate::{ complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group, extract_repository_from_value, format_key_source, optional_ns_param, record_repository, BackupDir, BufferedDynamicReader, CatalogReader, DynamicIndexReader, - IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, + IndexFile, Shell, KEYFD_SCHEMA, REPO_URL_SCHEMA, }; #[api( @@ -90,7 +89,8 @@ async fn dump_catalog(param: Value) -> Result { let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - let file_info = match manifest.lookup_file_info(CATALOG_NAME) { + let catalog_name = BackupArchiveName::catalog(); + let file_info = match manifest.lookup_file_info(&catalog_name) { Ok(file_info) => file_info, Err(err) => { let mut metadata_archives = Vec::new(); @@ -104,7 +104,7 @@ async fn dump_catalog(param: Value) -> Result { for archive in &metadata_archives { let (reader, archive_size) = get_remote_pxar_reader( - &archive, + &archive.as_str().try_into()?, client.clone(), &manifest, crypt_config.clone(), @@ -128,7 +128,7 @@ async fn dump_catalog(param: Value) -> Result { }; let index = client - .download_dynamic_index(&manifest, CATALOG_NAME) + .download_dynamic_index(&manifest, &catalog_name) .await?; let most_used = index.find_most_used_chunks(8); @@ -170,8 +170,7 @@ async fn dump_catalog(param: Value) -> Result { description: "Group/Snapshot path.", }, "archive-name": { - type: String, - description: "Backup archive name.", + type: BackupArchiveName, }, "repository": { optional: true, @@ -195,7 +194,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let client = connect(&repo)?; let backup_ns = optional_ns_param(¶m)?; let path = required_string_param(¶m, "snapshot")?; - let archive_name = required_string_param(¶m, "archive-name")?; + let server_archive_name: BackupArchiveName = + required_string_param(¶m, "archive-name")?.try_into()?; let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, path).await?; @@ -214,9 +214,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { } }; - let server_archive_name = if has_pxar_filename_extension(archive_name, false) { - format!("{}.didx", archive_name) - } else { + if !server_archive_name.has_pxar_filename_extension() { bail!("Can only mount pxar archives."); }; @@ -233,7 +231,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { let (manifest, _) = client.download_manifest().await?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; - if let Err(_err) = manifest.lookup_file_info(CATALOG_NAME) { + let catalog_name = BackupArchiveName::catalog(); + if let Err(_err) = manifest.lookup_file_info(&catalog_name) { // No catalog, fallback to pxar archive accessor if present let accessor = helper::get_pxar_fuse_accessor( &server_archive_name, @@ -243,7 +242,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { ) .await?; - let state = Shell::new(None, &server_archive_name, accessor).await?; + let state = Shell::new(None, &server_archive_name.as_ref(), accessor).await?; log::info!("Starting interactive shell"); state.shell().await?; record_repository(&repo); @@ -261,17 +260,17 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { ) .await?; - client.download(CATALOG_NAME, &mut tmpfile).await?; + client.download(catalog_name.as_ref(), &mut tmpfile).await?; let index = DynamicIndexReader::new(tmpfile) .map_err(|err| format_err!("unable to read catalog index - {}", err))?; // Note: do not use values stored in index (not trusted) - instead, computed them again let (csum, size) = index.compute_csum(); - manifest.verify_file(CATALOG_NAME, &csum, size)?; + manifest.verify_file(&catalog_name, &csum, size)?; let most_used = index.find_most_used_chunks(8); - let file_info = manifest.lookup_file_info(CATALOG_NAME)?; + let file_info = manifest.lookup_file_info(&catalog_name)?; let chunk_reader = RemoteChunkReader::new( client.clone(), crypt_config, @@ -286,7 +285,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> { catalogfile.seek(SeekFrom::Start(0))?; let catalog_reader = CatalogReader::new(catalogfile); - let state = Shell::new(Some(catalog_reader), &server_archive_name, decoder).await?; + let state = Shell::new(Some(catalog_reader), &server_archive_name.as_ref(), decoder).await?; log::info!("Starting interactive shell"); state.shell().await?; diff --git a/proxmox-backup-client/src/helper.rs b/proxmox-backup-client/src/helper.rs index 60355d7d0..642d66a7b 100644 --- a/proxmox-backup-client/src/helper.rs +++ b/proxmox-backup-client/src/helper.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use anyhow::Error; +use pbs_api_types::BackupArchiveName; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_datastore::BackupManifest; use pbs_tools::crypt_config::CryptConfig; @@ -8,7 +9,7 @@ use pbs_tools::crypt_config::CryptConfig; use crate::{BufferedDynamicReadAt, BufferedDynamicReader, IndexFile}; pub(crate) async fn get_pxar_fuse_accessor( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, @@ -44,7 +45,7 @@ pub(crate) async fn get_pxar_fuse_accessor( } pub(crate) async fn get_pxar_fuse_reader( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, @@ -57,7 +58,7 @@ pub(crate) async fn get_pxar_fuse_reader( } pub(crate) async fn get_buffered_pxar_reader( - archive_name: &str, + archive_name: &BackupArchiveName, client: Arc, manifest: &BackupManifest, crypt_config: Option>, diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index f6fb3555e..a155f56f0 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -25,10 +25,10 @@ use pxar::accessor::aio::Accessor; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ - ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, - ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, - RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, + ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart, + BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, + PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, }; use pbs_client::catalog_shell::Shell; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; @@ -36,7 +36,7 @@ use pbs_client::tools::{ complete_archive_name, complete_auth_id, complete_backup_group, complete_backup_snapshot, complete_backup_source, complete_chunk_size, complete_group_or_snapshot, complete_img_archive_name, complete_namespace, complete_pxar_archive_name, complete_repository, - connect, connect_rate_limited, extract_repository_from_value, has_pxar_filename_extension, + connect, connect_rate_limited, extract_repository_from_value, key_source::{ crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, KEYFILE_SCHEMA, MASTER_PUBKEY_FD_SCHEMA, MASTER_PUBKEY_FILE_SCHEMA, @@ -54,9 +54,8 @@ use pbs_datastore::chunk_store::verify_chunk_size; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::read_chunk::AsyncReadChunk; -use pbs_datastore::CATALOG_NAME; use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig}; use pbs_tools::crypt_config::CryptConfig; use pbs_tools::json; @@ -196,8 +195,8 @@ pub async fn dir_or_last_from_group( async fn backup_directory>( client: &BackupWriter, dir_path: P, - archive_name: &str, - payload_target: Option<&str>, + archive_name: &BackupArchiveName, + payload_target: Option<&BackupArchiveName>, chunk_size: Option, catalog: Option>>>>>, pxar_create_options: pbs_client::pxar::PxarCreateOptions, @@ -276,7 +275,7 @@ async fn backup_directory>( async fn backup_image>( client: &BackupWriter, image_path: P, - archive_name: &str, + archive_name: &BackupArchiveName, chunk_size: Option, upload_options: UploadOptions, ) -> Result { @@ -606,7 +605,12 @@ fn spawn_catalog_upload( tokio::spawn(async move { let catalog_upload_result = client - .upload_stream(CATALOG_NAME, catalog_chunk_stream, upload_options, None) + .upload_stream( + &BackupArchiveName::catalog(), + catalog_chunk_stream, + upload_options, + None, + ) .await; if let Err(ref err) = catalog_upload_result { @@ -1005,13 +1009,21 @@ async fn create_backup( }; for (backup_type, filename, target_base, extension, size) in upload_list { - let target = format!("{target_base}.{extension}"); + let target: BackupArchiveName = format!("{target_base}.{extension}").as_str().try_into()?; match (backup_type, dry_run) { // dry-run - (BackupSpecificationType::CONFIG, true) => log_file("config file", &filename, &target), - (BackupSpecificationType::LOGFILE, true) => log_file("log file", &filename, &target), - (BackupSpecificationType::PXAR, true) => log_file("directory", &filename, &target), - (BackupSpecificationType::IMAGE, true) => log_file("image", &filename, &target), + (BackupSpecificationType::CONFIG, true) => { + log_file("config file", &filename, target.as_ref()) + } + (BackupSpecificationType::LOGFILE, true) => { + log_file("log file", &filename, target.as_ref()) + } + (BackupSpecificationType::PXAR, true) => { + log_file("directory", &filename, target.as_ref()) + } + (BackupSpecificationType::IMAGE, true) => { + log_file("image", &filename, &target.as_ref()) + } // no dry-run (BackupSpecificationType::CONFIG, false) => { let upload_options = UploadOptions { @@ -1020,11 +1032,11 @@ async fn create_backup( ..UploadOptions::default() }; - log_file("config file", &filename, &target); + log_file("config file", &filename, target.as_ref()); let stats = client - .upload_blob_from_file(&filename, &target, upload_options) + .upload_blob_from_file(&filename, target.as_ref(), upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } (BackupSpecificationType::LOGFILE, false) => { // fixme: remove - not needed anymore ? @@ -1034,11 +1046,11 @@ async fn create_backup( ..UploadOptions::default() }; - log_file("log file", &filename, &target); + log_file("log file", &filename, target.as_ref()); let stats = client - .upload_blob_from_file(&filename, &target, upload_options) + .upload_blob_from_file(&filename, target.as_ref(), upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } (BackupSpecificationType::PXAR, false) => { let target_base = if let Some(base) = target_base.strip_suffix(".pxar") { @@ -1050,8 +1062,14 @@ async fn create_backup( let (target, payload_target) = if detection_mode.is_metadata() || detection_mode.is_data() { ( - format!("{target_base}.mpxar.{extension}"), - Some(format!("{target_base}.ppxar.{extension}")), + format!("{target_base}.mpxar.{extension}") + .as_str() + .try_into()?, + Some( + format!("{target_base}.ppxar.{extension}") + .as_str() + .try_into()?, + ), ) } else { (target, None) @@ -1065,12 +1083,12 @@ async fn create_backup( catalog_result_rx = Some(catalog_upload_res.result); } - log_file("directory", &filename, &target); + log_file("directory", &filename, target.as_ref()); if let Some(catalog) = catalog.as_ref() { catalog .lock() .unwrap() - .start_directory(std::ffi::CString::new(target.as_str())?.as_c_str())?; + .start_directory(std::ffi::CString::new(target.as_ref())?.as_c_str())?; } let mut previous_ref = None; @@ -1137,7 +1155,7 @@ async fn create_backup( &client, &filename, &target, - payload_target.as_deref(), + payload_target.as_ref().as_deref(), chunk_size_opt, catalog.as_ref().cloned(), pxar_options, @@ -1147,20 +1165,20 @@ async fn create_backup( if let Some(payload_stats) = payload_stats { manifest.add_file( - payload_target + &payload_target .ok_or_else(|| format_err!("missing payload target archive"))?, payload_stats.size, payload_stats.csum, crypto.mode, )?; } - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; if let Some(catalog) = catalog.as_ref() { catalog.lock().unwrap().end_directory()?; } } (BackupSpecificationType::IMAGE, false) => { - log_file("image", &filename, &target); + log_file("image", &filename, target.as_ref()); let upload_options = UploadOptions { previous_manifest: previous_manifest.clone(), @@ -1172,7 +1190,7 @@ async fn create_backup( let stats = backup_image(&client, &filename, &target, chunk_size_opt, upload_options) .await?; - manifest.add_file(target, stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } } } @@ -1194,12 +1212,17 @@ async fn create_backup( if let Some(catalog_result_rx) = catalog_result_rx { let stats = catalog_result_rx.await??; - manifest.add_file(CATALOG_NAME.to_owned(), stats.size, stats.csum, crypto.mode)?; + manifest.add_file( + &BackupArchiveName::catalog(), + stats.size, + stats.csum, + crypto.mode, + )?; } } if let Some(rsa_encrypted_key) = rsa_encrypted_key { - let target = ENCRYPTED_KEY_BLOB_NAME; + let target = BackupArchiveName::encrypted_key(); log::info!("Upload RSA encoded key to '{}' as {}", repo, target); let options = UploadOptions { compress: false, @@ -1207,9 +1230,9 @@ async fn create_backup( ..UploadOptions::default() }; let stats = client - .upload_blob_from_data(rsa_encrypted_key, target, options) + .upload_blob_from_data(rsa_encrypted_key, target.as_ref(), options) .await?; - manifest.add_file(target.to_string(), stats.size, stats.csum, crypto.mode)?; + manifest.add_file(&target, stats.size, stats.csum, crypto.mode)?; } // create manifest (index.json) // manifests are never encrypted, but include a signature @@ -1225,7 +1248,11 @@ async fn create_backup( ..UploadOptions::default() }; client - .upload_blob_from_data(manifest.into_bytes(), MANIFEST_BLOB_NAME, options) + .upload_blob_from_data( + manifest.into_bytes(), + BackupArchiveName::manifest().as_ref(), + options, + ) .await?; client.finish().await?; @@ -1238,7 +1265,7 @@ async fn create_backup( } async fn prepare_reference( - target: &str, + target: &BackupArchiveName, manifest: Arc, backup_writer: &BackupWriter, backup_reader: Arc, @@ -1250,7 +1277,11 @@ async fn prepare_reference( Ok((target, payload_target)) => (target, payload_target), Err(_) => return Ok(None), }; - let payload_target = payload_target.unwrap_or_default(); + let payload_target = if let Some(payload_target) = payload_target { + payload_target + } else { + return Ok(None); + }; let metadata_ref_index = if let Ok(index) = backup_reader .download_dynamic_index(&manifest, &target) @@ -1299,7 +1330,7 @@ async fn prepare_reference( Ok(Some(pbs_client::pxar::PxarPrevRef { accessor, payload_index: payload_ref_index, - archive_name: target, + archive_name: target.to_string(), })) } @@ -1486,7 +1517,8 @@ async fn restore( ) -> Result { let repo = extract_repository_from_value(¶m)?; - let archive_name = json::required_string_param(¶m, "archive-name")?; + let archive_name: BackupArchiveName = + json::required_string_param(¶m, "archive-name")?.try_into()?; let rate_limit = RateLimitConfig::from_client_config(limit); @@ -1525,11 +1557,9 @@ async fn restore( ) .await?; - let (archive_name, archive_type) = parse_archive_type(archive_name); - let (manifest, backup_index_data) = client.download_manifest().await?; - if archive_name == ENCRYPTED_KEY_BLOB_NAME && crypt_config.is_none() { + if archive_name == BackupArchiveName::encrypted_key() && crypt_config.is_none() { log::info!("Restoring encrypted key blob without original key - skipping manifest fingerprint check!") } else { if manifest.signature.is_some() { @@ -1543,7 +1573,7 @@ async fn restore( manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; } - if archive_name == MANIFEST_BLOB_NAME { + if archive_name == BackupArchiveName::manifest() { if let Some(target) = target { replace_file(target, &backup_index_data, CreateOptions::new(), false)?; } else { @@ -1557,7 +1587,7 @@ async fn restore( return Ok(Value::Null); } - if archive_type == ArchiveType::Blob { + if archive_name.archive_type() == ArchiveType::Blob { let mut reader = client.download_blob(&manifest, &archive_name).await?; if let Some(target) = target { @@ -1576,7 +1606,7 @@ async fn restore( std::io::copy(&mut reader, &mut writer) .map_err(|err| format_err!("unable to pipe data - {}", err))?; } - } else if archive_type == ArchiveType::DynamicIndex { + } else if archive_name.archive_type() == ArchiveType::DynamicIndex { let (archive_name, payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; @@ -1680,7 +1710,7 @@ async fn restore( std::io::copy(&mut reader, &mut writer) .map_err(|err| format_err!("unable to pipe data - {}", err))?; } - } else if archive_type == ArchiveType::FixedIndex { + } else if archive_name.archive_type() == ArchiveType::FixedIndex { let file_info = manifest.lookup_file_info(&archive_name)?; let index = client .download_fixed_index(&manifest, &archive_name) diff --git a/proxmox-backup-client/src/mount.rs b/proxmox-backup-client/src/mount.rs index c15e030f5..0048a8ad4 100644 --- a/proxmox-backup-client/src/mount.rs +++ b/proxmox-backup-client/src/mount.rs @@ -18,8 +18,7 @@ use proxmox_schema::*; use proxmox_sortable_macro::sortable; use proxmox_systemd; -use pbs_api_types::BackupNamespace; -use pbs_client::tools::has_pxar_filename_extension; +use pbs_api_types::{ArchiveType, BackupArchiveName, BackupNamespace}; use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_datastore::cached_chunk_reader::CachedChunkReader; @@ -47,11 +46,7 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new( false, &StringSchema::new("Group/Snapshot path.").schema() ), - ( - "archive-name", - false, - &StringSchema::new("Backup archive name.").schema() - ), + ("archive-name", false, &BackupArchiveName::API_SCHEMA), ( "target", false, @@ -87,11 +82,7 @@ WARNING: Only do this with *trusted* backups!", false, &StringSchema::new("Group/Snapshot path.").schema() ), - ( - "archive-name", - false, - &StringSchema::new("Backup archive name.").schema() - ), + ("archive-name", false, &BackupArchiveName::API_SCHEMA), ("repository", true, &REPO_URL_SCHEMA), ( "keyfile", @@ -208,7 +199,8 @@ fn mount( async fn mount_do(param: Value, pipe: Option) -> Result { let repo = extract_repository_from_value(¶m)?; - let archive_name = required_string_param(¶m, "archive-name")?; + let server_archive_name: BackupArchiveName = + required_string_param(¶m, "archive-name")?.try_into()?; let client = connect(&repo)?; let target = param["target"].as_str(); @@ -230,16 +222,14 @@ async fn mount_do(param: Value, pipe: Option) -> Result { } }; - let server_archive_name = if has_pxar_filename_extension(archive_name, false) { + if server_archive_name.has_pxar_filename_extension() { if target.is_none() { bail!("use the 'mount' command to mount pxar archives"); } - format!("{}.didx", archive_name) - } else if archive_name.ends_with(".img") { + } else if server_archive_name.ends_with(".img.fidx") { if target.is_some() { bail!("use the 'map' command to map drive images"); } - format!("{}.fidx", archive_name) } else { bail!("Can only mount/map pxar archives and drive images."); }; @@ -291,7 +281,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { let mut interrupt = futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed()); - if server_archive_name.ends_with(".didx") { + if server_archive_name.archive_type() == ArchiveType::DynamicIndex { let decoder = helper::get_pxar_fuse_accessor( &server_archive_name, client.clone(), @@ -312,7 +302,7 @@ async fn mount_do(param: Value, pipe: Option) -> Result { // exit on interrupted } } - } else if server_archive_name.ends_with(".fidx") { + } else if server_archive_name.archive_type() == ArchiveType::FixedIndex { let file_info = manifest.lookup_file_info(&server_archive_name)?; let index = client .download_fixed_index(&manifest, &server_archive_name) @@ -326,7 +316,10 @@ async fn mount_do(param: Value, pipe: Option) -> Result { ); let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable(); - let name = &format!("{}:{}/{}", repo, path, archive_name); + let name = &format!( + "{repo}:{path}/{}", + server_archive_name.without_type_extension(), + ); let name_escaped = proxmox_systemd::escape_unit(name, false); let mut session = diff --git a/proxmox-file-restore/src/main.rs b/proxmox-file-restore/src/main.rs index 08354b454..5434a1351 100644 --- a/proxmox-file-restore/src/main.rs +++ b/proxmox-file-restore/src/main.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use anyhow::{bail, format_err, Error}; use futures::StreamExt; +use pbs_api_types::BackupArchiveName; use serde_json::{json, Value}; use tokio::io::AsyncWriteExt; @@ -37,7 +38,6 @@ use pbs_client::{BackupReader, BackupRepository, RemoteChunkReader}; use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute}; use pbs_datastore::dynamic_index::BufferedDynamicReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::CATALOG_NAME; use pbs_key_config::decrypt_key; use pbs_tools::crypt_config::CryptConfig; @@ -149,9 +149,10 @@ async fn list_files( Ok(entries) } ExtractPath::Pxar(file, mut path) => { - if let Ok(file_info) = manifest.lookup_file_info(CATALOG_NAME) { + let catalog_name = BackupArchiveName::catalog(); + if let Ok(file_info) = manifest.lookup_file_info(&catalog_name) { let index = client - .download_dynamic_index(&manifest, CATALOG_NAME) + .download_dynamic_index(&manifest, &catalog_name) .await?; let most_used = index.find_most_used_chunks(8); let chunk_reader = RemoteChunkReader::new( @@ -172,6 +173,7 @@ async fn list_files( path = vec![b'/']; } + let file: BackupArchiveName = file.as_str().try_into()?; let (archive_name, _payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&file, &manifest)?; @@ -191,7 +193,7 @@ async fn list_files( pbs_client::pxar::tools::pxar_metadata_catalog_lookup( accessor, path, - Some(&archive_name), + Some(archive_name.as_ref()), ) .await } @@ -476,10 +478,11 @@ async fn extract( match path { ExtractPath::Pxar(archive_name, path) => { + let archive_name: BackupArchiveName = archive_name.as_str().try_into()?; let (archive_name, payload_archive_name) = pbs_client::tools::get_pxar_archive_names(&archive_name, &manifest)?; let (reader, archive_size) = get_remote_pxar_reader( - &archive_name, + &archive_name.try_into()?, client.clone(), &manifest, crypt_config.clone(), diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 99b579f02..388ee36c1 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -34,15 +34,15 @@ use pxar::accessor::aio::Accessor; use pxar::EntryKind; use pbs_api_types::{ - print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupGroupDeleteStats, - BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, - DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, - Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, - BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, - DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, + BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode, + DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, + JobScheduleStatus, KeepOptions, Operation, PruneJobOptions, SnapshotListItem, + SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, + BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, + MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -54,11 +54,11 @@ use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{ check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, - StoreProgress, CATALOG_NAME, + StoreProgress, }; use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; @@ -124,7 +124,7 @@ fn read_backup_index( } result.push(BackupContent { - filename: MANIFEST_BLOB_NAME.to_string(), + filename: BackupArchiveName::manifest().to_string(), crypt_mode: match manifest.signature { Some(_) => Some(CryptMode::SignOnly), None => Some(CryptMode::None), @@ -1481,12 +1481,13 @@ pub fn download_file_decoded( &backup_dir_api.group, )?; - let file_name = required_string_param(¶m, "file-name")?.to_owned(); + let file_name: BackupArchiveName = + required_string_param(¶m, "file-name")?.try_into()?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{}' - is encrypted", file_name); } } @@ -1501,12 +1502,10 @@ pub fn download_file_decoded( let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(&file_name); + path.push(file_name.as_ref()); - let (_, extension) = file_name.rsplit_once('.').unwrap(); - - let body = match extension { - "didx" => { + let body = match file_name.archive_type() { + ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path).map_err(|err| { format_err!("unable to read dynamic index '{:?}' - {}", &path, err) })?; @@ -1520,7 +1519,7 @@ pub fn download_file_decoded( err })) } - "fidx" => { + ArchiveType::FixedIndex => { let index = FixedIndexReader::open(&path).map_err(|err| { format_err!("unable to read fixed index '{:?}' - {}", &path, err) })?; @@ -1539,7 +1538,7 @@ pub fn download_file_decoded( ), ) } - "blob" => { + ArchiveType::Blob => { let file = std::fs::File::open(&path) .map_err(|err| http_err!(BAD_REQUEST, "File open failed: {}", err))?; @@ -1554,9 +1553,6 @@ pub fn download_file_decoded( ), ) } - extension => { - bail!("cannot download '{}' files", extension); - } }; // fixme: set other headers ? @@ -1613,10 +1609,10 @@ pub fn upload_backup_log( )?; let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?; - let file_name = CLIENT_LOG_BLOB_NAME; + let file_name = BackupArchiveName::client_log(); let mut path = backup_dir.full_path(); - path.push(file_name); + path.push(file_name.as_ref()); if path.exists() { bail!("backup already contains a log."); @@ -1671,7 +1667,7 @@ fn decode_path(path: &str) -> Result, Error> { type: String, }, "archive-name": { - schema: BACKUP_ARCHIVE_NAME_SCHEMA, + type: BackupArchiveName, optional: true, }, }, @@ -1688,12 +1684,12 @@ pub async fn catalog( ns: Option, backup_dir: pbs_api_types::BackupDir, filepath: String, - archive_name: Option, + archive_name: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { let file_name = archive_name .clone() - .unwrap_or_else(|| CATALOG_NAME.to_string()); + .unwrap_or_else(|| BackupArchiveName::catalog()); let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -1713,7 +1709,7 @@ pub async fn catalog( let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == file_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == file_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{file_name}' - is encrypted"); } } @@ -1722,7 +1718,7 @@ pub async fn catalog( tokio::task::spawn_blocking(move || { let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(&file_name); + path.push(file_name.as_ref()); let index = DynamicIndexReader::open(&path) .map_err(|err| format_err!("unable to read dynamic index '{path:?}' - {err}"))?; @@ -1772,7 +1768,7 @@ pub const API_METHOD_PXAR_FILE_DOWNLOAD: ApiMethod = ApiMethod::new( ("backup-time", false, &BACKUP_TIME_SCHEMA), ("filepath", false, &StringSchema::new("Base64 encoded path").schema()), ("tar", true, &BooleanSchema::new("Download as .tar.zst").schema()), - ("archive-name", true, &BACKUP_ARCHIVE_NAME_SCHEMA), + ("archive-name", true, &BackupArchiveName::API_SCHEMA), ]), ) ).access( @@ -1787,11 +1783,11 @@ fn get_local_pxar_reader( datastore: Arc, manifest: &BackupManifest, backup_dir: &BackupDir, - pxar_name: &str, + pxar_name: &BackupArchiveName, ) -> Result<(LocalDynamicReadAt, u64), Error> { let mut path = datastore.base_path(); path.push(backup_dir.relative_path()); - path.push(pxar_name); + path.push(pxar_name.as_ref()); let index = DynamicIndexReader::open(&path) .map_err(|err| format_err!("unable to read dynamic index '{:?}' - {}", &path, err))?; @@ -1849,16 +1845,16 @@ pub fn pxar_file_download( let file_path = split.next().unwrap_or(b"/"); (pxar_name.to_owned(), file_path.to_owned()) }; - let pxar_name = std::str::from_utf8(&pxar_name)?; + let pxar_name: BackupArchiveName = std::str::from_utf8(&pxar_name)?.try_into()?; let (manifest, files) = read_backup_index(&backup_dir)?; for file in files { - if file.filename == pxar_name && file.crypt_mode == Some(CryptMode::Encrypt) { + if file.filename == pxar_name.as_ref() && file.crypt_mode == Some(CryptMode::Encrypt) { bail!("cannot decode '{}' - is encrypted", pxar_name); } } let (pxar_name, payload_archive_name) = - pbs_client::tools::get_pxar_archive_names(pxar_name, &manifest)?; + pbs_client::tools::get_pxar_archive_names(&pxar_name, &manifest)?; let (reader, archive_size) = get_local_pxar_reader(datastore.clone(), &manifest, &backup_dir, &pxar_name)?; diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index a180a4b02..65eda56dd 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -19,18 +19,18 @@ use proxmox_uuid::Uuid; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupDir, BackupNamespace, - CryptMode, NotificationMode, Operation, TapeRestoreNamespace, Userid, - DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, - TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, + parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupArchiveName, + BackupDir, BackupNamespace, CryptMode, NotificationMode, Operation, TapeRestoreNamespace, + Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, + MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, + TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, }; use pbs_client::pxar::tools::handle_root_with_optional_format_version_prelude; use pbs_config::CachedUserInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::BackupManifest; use pbs_datastore::{DataBlob, DataStore}; use pbs_tape::{ BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, @@ -1652,7 +1652,8 @@ fn try_restore_snapshot_archive( } let root_path = Path::new("/"); - let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME); + let manifest_archive_name = BackupArchiveName::manifest(); + let manifest_file_name = OsStr::new(manifest_archive_name.as_ref()); let mut manifest = None; @@ -1732,7 +1733,7 @@ fn try_restore_snapshot_archive( // commit manifest let mut manifest_path = snapshot_path.to_owned(); - manifest_path.push(MANIFEST_BLOB_NAME); + manifest_path.push(BackupArchiveName::manifest().as_ref()); let mut tmp_manifest_path = manifest_path.clone(); tmp_manifest_path.set_extension("tmp"); diff --git a/src/backup/mod.rs b/src/backup/mod.rs index 8c84b8ce8..c5dae69a6 100644 --- a/src/backup/mod.rs +++ b/src/backup/mod.rs @@ -1,8 +1,5 @@ //! Server/client-specific parts for what's otherwise in pbs-datastore. -// Note: .pcat1 => Proxmox Catalog Format version 1 -pub const CATALOG_NAME: &str = "catalog.pcat1.didx"; - mod verify; pub use verify::*; diff --git a/src/bin/proxmox_backup_debug/diff.rs b/src/bin/proxmox_backup_debug/diff.rs index b0436d048..dcd351d93 100644 --- a/src/bin/proxmox_backup_debug/diff.rs +++ b/src/bin/proxmox_backup_debug/diff.rs @@ -13,7 +13,7 @@ use proxmox_human_byte::HumanByte; use proxmox_router::cli::{CliCommand, CliCommandMap, CommandLineInterface}; use proxmox_schema::api; -use pbs_api_types::{BackupNamespace, BackupPart}; +use pbs_api_types::{BackupArchiveName, BackupNamespace, BackupPart}; use pbs_client::tools::key_source::{ crypto_parameters, format_key_source, get_encryption_key_password, KEYFD_SCHEMA, }; @@ -70,8 +70,7 @@ pub fn diff_commands() -> CommandLineInterface { type: String, }, "archive-name": { - description: "Name of the .pxar archive", - type: String, + type: BackupArchiveName, }, "repository": { optional: true, @@ -106,7 +105,7 @@ pub fn diff_commands() -> CommandLineInterface { async fn diff_archive_cmd( prev_snapshot: String, snapshot: String, - archive_name: String, + archive_name: BackupArchiveName, compare_content: bool, color: Option, ns: Option, @@ -140,12 +139,11 @@ async fn diff_archive_cmd( let output_params = OutputParams { color }; - if archive_name.ends_with(".pxar") { - let file_name = format!("{}.didx", archive_name); + if archive_name.ends_with(".pxar.didx") { diff_archive( &prev_snapshot, &snapshot, - &file_name, + &archive_name, &repo_params, compare_content, &output_params, @@ -161,7 +159,7 @@ async fn diff_archive_cmd( async fn diff_archive( snapshot_a: &str, snapshot_b: &str, - file_name: &str, + file_name: &BackupArchiveName, repo_params: &RepoParams, compare_contents: bool, output_params: &OutputParams, @@ -249,7 +247,7 @@ struct OutputParams { async fn open_dynamic_index( snapshot: &str, - archive_name: &str, + archive_name: &BackupArchiveName, params: &RepoParams, ) -> Result<(DynamicIndexReader, Accessor), Error> { let backup_reader = create_backup_reader(snapshot, params).await?; diff --git a/src/server/pull.rs b/src/server/pull.rs index 62c27917c..aac9b0a7c 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -11,9 +11,9 @@ use proxmox_human_byte::HumanByte; use tracing::info; use pbs_api_types::{ - print_store_and_ns, ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, - Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, - PRIV_DATASTORE_BACKUP, + print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, + BackupNamespace, GroupFilter, Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -21,7 +21,7 @@ use pbs_datastore::data_blob::DataBlob; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{BackupManifest, FileInfo}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{check_backup_owner, DataStore, StoreProgress}; use pbs_tools::sha::sha256; @@ -334,16 +334,16 @@ async fn pull_snapshot<'a>( ) -> Result { let mut sync_stats = SyncStats::default(); let mut manifest_name = snapshot.full_path(); - manifest_name.push(MANIFEST_BLOB_NAME); + manifest_name.push(BackupArchiveName::manifest().as_ref()); let mut client_log_name = snapshot.full_path(); - client_log_name.push(CLIENT_LOG_BLOB_NAME); + client_log_name.push(BackupArchiveName::client_log().as_ref()); let mut tmp_manifest_name = manifest_name.clone(); tmp_manifest_name.set_extension("tmp"); let tmp_manifest_blob; if let Some(data) = reader - .load_file_into(MANIFEST_BLOB_NAME, &tmp_manifest_name) + .load_file_into(BackupArchiveName::manifest().as_ref(), &tmp_manifest_name) .await? { tmp_manifest_blob = data; @@ -381,11 +381,12 @@ async fn pull_snapshot<'a>( path.push(&item.filename); if path.exists() { - match ArchiveType::from_path(&item.filename)? { + let filename: BackupArchiveName = item.filename.as_str().try_into()?; + match filename.archive_type() { ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path)?; let (csum, size) = index.compute_csum(); - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); @@ -395,7 +396,7 @@ async fn pull_snapshot<'a>( ArchiveType::FixedIndex => { let index = FixedIndexReader::open(&path)?; let (csum, size) = index.compute_csum(); - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); @@ -405,7 +406,7 @@ async fn pull_snapshot<'a>( ArchiveType::Blob => { let mut tmpfile = std::fs::File::open(&path)?; let (csum, size) = sha256(&mut tmpfile)?; - match manifest.verify_file(&item.filename, &csum, size) { + match manifest.verify_file(&filename, &csum, size) { Ok(_) => continue, Err(err) => { info!("detected changed file {path:?} - {err}"); diff --git a/src/server/push.rs b/src/server/push.rs index 288792e03..ea75a622a 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -10,9 +10,9 @@ use tokio_stream::wrappers::ReceiverStream; use tracing::{info, warn}; use pbs_api_types::{ - print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupDir, BackupGroup, - BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem, - Operation, RateLimitConfig, Remote, SnapshotListItem, PRIV_DATASTORE_BACKUP, + print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName, + BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, + NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, PRIV_REMOTE_DATASTORE_PRUNE, }; @@ -22,7 +22,6 @@ use pbs_datastore::data_blob::ChunkInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{DataStore, StoreProgress}; @@ -805,10 +804,13 @@ pub(crate) async fn push_snapshot( let mut path = backup_dir.full_path(); path.push(&entry.filename); if path.try_exists()? { - match ArchiveType::from_path(&entry.filename)? { + let archive_name = BackupArchiveName::from_path(&entry.filename)?; + match archive_name.archive_type() { ArchiveType::Blob => { let file = std::fs::File::open(path.clone())?; - let backup_stats = backup_writer.upload_blob(file, &entry.filename).await?; + let backup_stats = backup_writer + .upload_blob(file, archive_name.as_ref()) + .await?; stats.add(SyncStats { chunk_count: backup_stats.chunk_count as usize, bytes: backup_stats.size as usize, @@ -821,7 +823,7 @@ pub(crate) async fn push_snapshot( // Add known chunks, ignore errors since archive might not be present let _res = backup_writer .download_previous_dynamic_index( - &entry.filename, + &archive_name, manifest, known_chunks.clone(), ) @@ -830,7 +832,7 @@ pub(crate) async fn push_snapshot( let index = DynamicIndexReader::open(&path)?; let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); let sync_stats = push_index( - &entry.filename, + &archive_name, index, chunk_reader, &backup_writer, @@ -845,7 +847,7 @@ pub(crate) async fn push_snapshot( // Add known chunks, ignore errors since archive might not be present let _res = backup_writer .download_previous_fixed_index( - &entry.filename, + &archive_name, manifest, known_chunks.clone(), ) @@ -855,7 +857,7 @@ pub(crate) async fn push_snapshot( let chunk_reader = reader.chunk_reader(entry.chunk_crypt_mode()); let size = index.index_bytes(); let sync_stats = push_index( - &entry.filename, + &archive_name, index, chunk_reader, &backup_writer, @@ -874,12 +876,13 @@ pub(crate) async fn push_snapshot( // Fetch client log from source and push to target // this has to be handled individually since the log is never part of the manifest let mut client_log_path = backup_dir.full_path(); - client_log_path.push(CLIENT_LOG_BLOB_NAME); + let client_log_name = BackupArchiveName::client_log(); + client_log_path.push(client_log_name.as_ref()); if client_log_path.is_file() { backup_writer .upload_blob_from_file( &client_log_path, - CLIENT_LOG_BLOB_NAME, + client_log_name.as_ref(), upload_options.clone(), ) .await?; @@ -891,7 +894,7 @@ pub(crate) async fn push_snapshot( let backup_stats = backup_writer .upload_blob_from_data( manifest_string.into_bytes(), - MANIFEST_BLOB_NAME, + BackupArchiveName::manifest().as_ref(), upload_options, ) .await?; @@ -912,7 +915,7 @@ pub(crate) async fn push_snapshot( // For fixed indexes, the size must be provided as given by the index reader. #[allow(clippy::too_many_arguments)] async fn push_index<'a>( - filename: &'a str, + filename: &'a BackupArchiveName, index: impl IndexFile + Send + 'static, chunk_reader: Arc, backup_writer: &BackupWriter, diff --git a/src/server/sync.rs b/src/server/sync.rs index a0157ab2d..120c20875 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -17,12 +17,12 @@ use proxmox_rest_server::WorkerTask; use proxmox_router::HttpError; use pbs_api_types::{ - Authid, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, SnapshotListItem, - SyncDirection, SyncJobConfig, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, CryptMode, GroupListItem, + SnapshotListItem, SyncDirection, SyncJobConfig, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_READ, }; use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader}; use pbs_datastore::data_blob::DataBlob; -use pbs_datastore::manifest::CLIENT_LOG_BLOB_NAME; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{DataStore, ListNamespacesRecursive, LocalChunkReader}; @@ -162,15 +162,16 @@ impl SyncSourceReader for RemoteSourceReader { .open(&tmp_path)?; // Note: be silent if there is no log - only log successful download + let client_log_name = BackupArchiveName::client_log(); if let Ok(()) = self .backup_reader - .download(CLIENT_LOG_BLOB_NAME, tmpfile) + .download(client_log_name.as_ref(), tmpfile) .await { if let Err(err) = std::fs::rename(&tmp_path, to_path) { bail!("Atomic rename file {to_path:?} failed - {err}"); } - info!("got backup log file {CLIENT_LOG_BLOB_NAME:?}"); + info!("got backup log file {client_log_name}"); } Ok(()) diff --git a/tests/prune.rs b/tests/prune.rs index 3b3209698..edc614821 100644 --- a/tests/prune.rs +++ b/tests/prune.rs @@ -2,8 +2,7 @@ use std::path::PathBuf; use anyhow::Error; -use pbs_api_types::PruneJobOptions; -use pbs_datastore::manifest::MANIFEST_BLOB_NAME; +use pbs_api_types::{BackupArchiveName, PruneJobOptions}; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{BackupDir, BackupInfo}; @@ -34,7 +33,7 @@ fn create_info(snapshot: &str, partial: bool) -> BackupInfo { let mut files = Vec::new(); if !partial { - files.push(String::from(MANIFEST_BLOB_NAME)); + files.push(BackupArchiveName::manifest().to_string()); } BackupInfo { -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 11:30:08 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:30:08 +0100 Subject: [pbs-devel] [PATCH v5 proxmox-backup 2/5] api types: introduce `BackupArchiveName` type In-Reply-To: <20241122103011.165010-1-c.ebner@proxmox.com> References: <20241122103011.165010-1-c.ebner@proxmox.com> Message-ID: <20241122103011.165010-3-c.ebner@proxmox.com> Introduces a dedicated wrapper type to be used for backup archive names instead of plain strings and associated helper methods for archive type checks and archive name mappings. Signed-off-by: Christian Ebner --- changes since version 4: - rebased onto current master pbs-api-types/src/datastore.rs | 153 ++++++++++++++++++++++++++++++++- 1 file changed, 152 insertions(+), 1 deletion(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 3b9c206db..105984554 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,7 @@ +use std::convert::{AsRef, TryFrom}; use std::fmt; use std::path::{Path, PathBuf}; +use std::str::FromStr; use anyhow::{bail, format_err, Error}; use const_format::concatcp; @@ -1645,7 +1647,7 @@ impl BackupGroupDeleteStats { } } -#[derive(PartialEq, Eq)] +#[derive(Clone, PartialEq, Eq)] /// Allowed variants of backup archives to be contained in a snapshot's manifest pub enum ArchiveType { FixedIndex, @@ -1664,4 +1666,153 @@ impl ArchiveType { }; Ok(archive_type) } + + pub fn extension(&self) -> &'static str { + match self { + ArchiveType::DynamicIndex => "didx", + ArchiveType::FixedIndex => "fidx", + ArchiveType::Blob => "blob", + } + } +} + +#[derive(Clone, PartialEq, Eq)] +/// Name of archive files contained in snapshot's manifest +pub struct BackupArchiveName { + // archive name including the `.fidx`, `.didx` or `.blob` archive type extension + name: String, + // archive type parsed based on given extension + ty: ArchiveType, +} + +impl fmt::Display for BackupArchiveName { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{name}", name = self.name) + } +} + +serde_plain::derive_deserialize_from_fromstr!(BackupArchiveName, "archive name"); + +impl FromStr for BackupArchiveName { + type Err = Error; + + fn from_str(name: &str) -> Result { + Self::try_from(name) + } +} + +serde_plain::derive_serialize_from_display!(BackupArchiveName); + +impl TryFrom<&str> for BackupArchiveName { + type Error = anyhow::Error; + + fn try_from(value: &str) -> Result { + let (name, ty) = Self::parse_archive_type(value)?; + Ok(Self { name, ty }) + } +} + +impl AsRef for BackupArchiveName { + fn as_ref(&self) -> &str { + &self.name + } +} + +impl BackupArchiveName { + pub fn from_path(path: impl AsRef) -> Result { + let path = path.as_ref(); + if path.as_os_str().as_encoded_bytes().last() == Some(&b'/') { + bail!("invalid archive name, got directory"); + } + let file_name = path + .file_name() + .ok_or_else(|| format_err!("invalid archive name"))?; + let name = file_name + .to_str() + .ok_or_else(|| format_err!("archive name not valid UTF-8"))?; + + Self::try_from(name) + } + + pub fn catalog() -> Self { + // Note: .pcat1 => Proxmox Catalog Format version 1 + Self { + name: "catalog.pcat1.didx".to_string(), + ty: ArchiveType::DynamicIndex, + } + } + + pub fn manifest() -> Self { + Self { + name: "index.json.blob".to_string(), + ty: ArchiveType::Blob, + } + } + + pub fn client_log() -> Self { + Self { + name: "client.log.blob".to_string(), + ty: ArchiveType::Blob, + } + } + + pub fn encrypted_key() -> Self { + Self { + name: "rsa-encrypted.key.blob".to_string(), + ty: ArchiveType::Blob, + } + } + + pub fn archive_type(&self) -> ArchiveType { + self.ty.clone() + } + + pub fn ends_with(&self, postfix: &str) -> bool { + self.name.ends_with(postfix) + } + + pub fn has_pxar_filename_extension(&self) -> bool { + self.name.ends_with(".pxar.didx") + || self.name.ends_with(".mpxar.didx") + || self.name.ends_with(".ppxar.didx") + } + + pub fn without_type_extension(&self) -> String { + self.name + .strip_suffix(&format!(".{ext}", ext = self.ty.extension())) + .unwrap() + .into() + } + + fn parse_archive_type(archive_name: &str) -> Result<(String, ArchiveType), Error> { + // Detect archive type via given server archive name type extension, if present + if let Ok(archive_type) = ArchiveType::from_path(archive_name) { + return Ok((archive_name.into(), archive_type)); + } + + // No server archive name type extension in archive name, map based on extension + let archive_type = match Path::new(archive_name) + .extension() + .and_then(|ext| ext.to_str()) + { + Some("pxar") => ArchiveType::DynamicIndex, + Some("mpxar") => ArchiveType::DynamicIndex, + Some("ppxar") => ArchiveType::DynamicIndex, + Some("pcat1") => ArchiveType::DynamicIndex, + Some("img") => ArchiveType::FixedIndex, + Some("json") => ArchiveType::Blob, + Some("key") => ArchiveType::Blob, + Some("log") => ArchiveType::Blob, + _ => bail!("failed to parse archive type for '{archive_name}'"), + }; + + Ok(( + format!("{archive_name}.{ext}", ext = archive_type.extension()), + archive_type, + )) + } +} + +impl ApiType for BackupArchiveName { + const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; } -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 11:30:10 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:30:10 +0100 Subject: [pbs-devel] [PATCH v5 proxmox-backup 4/5] client: drop unused parse_archive_type helper In-Reply-To: <20241122103011.165010-1-c.ebner@proxmox.com> References: <20241122103011.165010-1-c.ebner@proxmox.com> Message-ID: <20241122103011.165010-5-c.ebner@proxmox.com> Parsing of the type based on the archive name extension is now handled by `BackupArchiveName`. Signed-off-by: Christian Ebner --- changes since version 4: - no changes proxmox-backup-client/src/main.rs | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index a155f56f0..581bc245b 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -1380,18 +1380,6 @@ async fn dump_image( Ok(()) } -fn parse_archive_type(name: &str) -> (String, ArchiveType) { - if name.ends_with(".didx") || name.ends_with(".fidx") || name.ends_with(".blob") { - (name.into(), ArchiveType::from_path(name).unwrap()) - } else if has_pxar_filename_extension(name, false) { - (format!("{}.didx", name), ArchiveType::DynamicIndex) - } else if name.ends_with(".img") { - (format!("{}.fidx", name), ArchiveType::FixedIndex) - } else { - (format!("{}.blob", name), ArchiveType::Blob) - } -} - #[api( input: { properties: { -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 11:30:11 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:30:11 +0100 Subject: [pbs-devel] [PATCH v5 proxmox-backup 5/5] api types: add unit tests for backup archive name parsing In-Reply-To: <20241122103011.165010-1-c.ebner@proxmox.com> References: <20241122103011.165010-1-c.ebner@proxmox.com> Message-ID: <20241122103011.165010-6-c.ebner@proxmox.com> Signed-off-by: Christian Ebner --- changes since version 4: - no changes pbs-api-types/src/datastore.rs | 64 ++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 105984554..df5c22482 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1816,3 +1816,67 @@ impl BackupArchiveName { impl ApiType for BackupArchiveName { const API_SCHEMA: Schema = BACKUP_ARCHIVE_NAME_SCHEMA; } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_invalid_backup_archive_names() { + let invalid_archive_names = ["/invalid/", "/invalid/..", "/invalid/archive-name.invalid"]; + + for archive_name in invalid_archive_names { + assert!(BackupArchiveName::from_path(archive_name).is_err()); + } + } + + #[test] + fn test_valid_didx_backup_archive_names() { + let valid_archive_names = [ + "/valid/archive-name.pxar", + "/valid/archive-name.pxar.didx", + "/valid/archive-name.mpxar", + "/valid/archive-name.mpxar.didx", + "/valid/archive-name.ppxar", + "/valid/archive-name.ppxar.didx", + "/valid/archive-name.pcat1", + "/valid/archive-name.pcat1.didx", + ]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref().ends_with(".didx")); + assert!(archive.archive_type() == ArchiveType::DynamicIndex); + } + } + + #[test] + fn test_valid_fidx_backup_archive_names() { + let valid_archive_names = ["/valid/archive-name.img", "/valid/archive-name.img.fidx"]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref() == "archive-name.img.fidx"); + assert!(archive.without_type_extension() == "archive-name.img"); + assert!(archive.archive_type() == ArchiveType::FixedIndex); + } + } + + #[test] + fn test_valid_blob_backup_archive_names() { + let valid_archive_names = [ + "/valid/index.json", + "/valid/index.json.blob", + "/valid/rsa-encrypted.key", + "/valid/rsa-encrypted.key.blob", + "/valid/archive-name.log", + "/valid/archive-name.log.blob", + ]; + + for archive_name in valid_archive_names { + let archive = BackupArchiveName::from_path(archive_name).unwrap(); + assert!(archive.as_ref().ends_with(".blob")); + assert!(archive.archive_type() == ArchiveType::Blob); + } + } +} -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 11:30:07 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:30:07 +0100 Subject: [pbs-devel] [PATCH v5 proxmox-backup 1/5] datastore: move `ArchiveType` to api types In-Reply-To: <20241122103011.165010-1-c.ebner@proxmox.com> References: <20241122103011.165010-1-c.ebner@proxmox.com> Message-ID: <20241122103011.165010-2-c.ebner@proxmox.com> Moving the `ArchiveType` to avoid crate dependencies on `pbs-datastore`. In preparation for introducing a dedicated `BackupArchiveName` api type, allowing to set the corresponding archive type variant when parsing the archive name based on it's filename. Signed-off-by: Christian Ebner --- changes since version 4: - rebased onto current master pbs-api-types/src/datastore.rs | 23 ++++++++++++++++++++++- pbs-client/src/backup_writer.rs | 4 ++-- pbs-datastore/src/datastore.rs | 7 +++---- pbs-datastore/src/manifest.rs | 24 +----------------------- pbs-datastore/src/snapshot_reader.rs | 4 ++-- proxmox-backup-client/src/main.rs | 12 +++++------- src/api2/backup/mod.rs | 3 +-- src/api2/reader/mod.rs | 7 +++---- src/api2/tape/restore.rs | 10 +++++----- src/backup/verify.rs | 7 ++++--- src/server/pull.rs | 9 ++++----- src/server/push.rs | 4 ++-- 12 files changed, 54 insertions(+), 60 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 3d2b0eabe..3b9c206db 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -1,5 +1,5 @@ use std::fmt; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use anyhow::{bail, format_err, Error}; use const_format::concatcp; @@ -1644,3 +1644,24 @@ impl BackupGroupDeleteStats { self.protected_snapshots += 1; } } + +#[derive(PartialEq, Eq)] +/// Allowed variants of backup archives to be contained in a snapshot's manifest +pub enum ArchiveType { + FixedIndex, + DynamicIndex, + Blob, +} + +impl ArchiveType { + pub fn from_path(archive_name: impl AsRef) -> Result { + let archive_name = archive_name.as_ref(); + let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) { + Some("didx") => ArchiveType::DynamicIndex, + Some("fidx") => ArchiveType::FixedIndex, + Some("blob") => ArchiveType::Blob, + _ => bail!("unknown archive type: {archive_name:?}"), + }; + Ok(archive_type) + } +} diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs index 685510da3..2ffd0b9ba 100644 --- a/pbs-client/src/backup_writer.rs +++ b/pbs-client/src/backup_writer.rs @@ -13,12 +13,12 @@ use tokio::io::AsyncReadExt; use tokio::sync::{mpsc, oneshot}; use tokio_stream::wrappers::ReceiverStream; -use pbs_api_types::{BackupDir, BackupNamespace}; +use pbs_api_types::{ArchiveType, BackupDir, BackupNamespace}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::{CATALOG_NAME, PROXMOX_BACKUP_PROTOCOL_ID_V1}; use pbs_tools::crypt_config::CryptConfig; diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 7c37e522a..2755fed8c 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -18,9 +18,9 @@ use proxmox_sys::process_locker::ProcessLockSharedGuard; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, - DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus, MaintenanceMode, - MaintenanceType, Operation, UPID, + ArchiveType, Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, ChunkOrder, + DataStoreConfig, DatastoreFSyncLevel, DatastoreTuning, GarbageCollectionStatus, + MaintenanceMode, MaintenanceType, Operation, UPID, }; use crate::backup_info::{BackupDir, BackupGroup}; @@ -29,7 +29,6 @@ use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; use crate::fixed_index::{FixedIndexReader, FixedIndexWriter}; use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive}; use crate::index::IndexFile; -use crate::manifest::ArchiveType; use crate::task_tracking::{self, update_active_operations}; use crate::DataBlob; diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index c3df01427..823c85003 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -1,11 +1,9 @@ -use std::path::Path; - use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{ArchiveType, BackupType, CryptMode, Fingerprint}; use pbs_tools::crypt_config::CryptConfig; pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; @@ -56,26 +54,6 @@ pub struct BackupManifest { pub signature: Option, } -#[derive(PartialEq, Eq)] -pub enum ArchiveType { - FixedIndex, - DynamicIndex, - Blob, -} - -impl ArchiveType { - pub fn from_path(archive_name: impl AsRef) -> Result { - let archive_name = archive_name.as_ref(); - let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) { - Some("didx") => ArchiveType::DynamicIndex, - Some("fidx") => ArchiveType::FixedIndex, - Some("blob") => ArchiveType::Blob, - _ => bail!("unknown archive type: {:?}", archive_name), - }; - Ok(archive_type) - } -} - impl BackupManifest { pub fn new(snapshot: pbs_api_types::BackupDir) -> Self { Self { diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index f9c772079..432701ea0 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -8,13 +8,13 @@ use nix::dir::Dir; use proxmox_sys::fs::lock_dir_noblock_shared; -use pbs_api_types::{print_store_and_ns, BackupNamespace, Operation}; +use pbs_api_types::{print_store_and_ns, ArchiveType, BackupNamespace, Operation}; use crate::backup_info::BackupDir; use crate::dynamic_index::DynamicIndexReader; use crate::fixed_index::FixedIndexReader; use crate::index::IndexFile; -use crate::manifest::{ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use crate::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use crate::DataStore; /// Helper to access the contents of a datastore backup snapshot diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index e4034aa99..f6fb3555e 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -25,10 +25,10 @@ use pxar::accessor::aio::Accessor; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ - Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, ClientRateLimitConfig, - CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, RateLimitConfig, - SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, + ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, + ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PruneJobOptions, PruneListItem, + RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, + BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, }; use pbs_client::catalog_shell::Shell; use pbs_client::pxar::{ErrorHandler as PxarErrorHandler, MetadataArchiveReader, PxarPrevRef}; @@ -54,9 +54,7 @@ use pbs_datastore::chunk_store::verify_chunk_size; use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader, LocalDynamicReadAt}; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ - ArchiveType, BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, -}; +use pbs_datastore::manifest::{BackupManifest, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::CATALOG_NAME; use pbs_key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig}; diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index ea0d0292e..92e79a267 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -19,13 +19,12 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, + ArchiveType, Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::ArchiveType; use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1}; use pbs_tools::json::{required_array_param, required_integer_param, required_string_param}; diff --git a/src/api2/reader/mod.rs b/src/api2/reader/mod.rs index 23051653e..50f80de43 100644 --- a/src/api2/reader/mod.rs +++ b/src/api2/reader/mod.rs @@ -19,13 +19,12 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ - Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, - PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, + ArchiveType, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, + DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::ArchiveType; use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_tools::json::required_string_param; diff --git a/src/api2/tape/restore.rs b/src/api2/tape/restore.rs index f7481bacc..a180a4b02 100644 --- a/src/api2/tape/restore.rs +++ b/src/api2/tape/restore.rs @@ -19,10 +19,10 @@ use proxmox_uuid::Uuid; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode, - NotificationMode, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, - DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, + parse_ns_and_snapshot, print_ns_and_snapshot, ArchiveType, Authid, BackupDir, BackupNamespace, + CryptMode, NotificationMode, Operation, TapeRestoreNamespace, Userid, + DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, + PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, }; use pbs_client::pxar::tools::handle_root_with_optional_format_version_prelude; @@ -30,7 +30,7 @@ use pbs_config::CachedUserInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ArchiveType, BackupManifest, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{BackupManifest, MANIFEST_BLOB_NAME}; use pbs_datastore::{DataBlob, DataStore}; use pbs_tape::{ BlockReadError, MediaContentHeader, TapeRead, PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0, diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6ef7e8eb3..fee6ecf5f 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -11,12 +11,13 @@ use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_worker_task::WorkerTaskContext; use pbs_api_types::{ - print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode, - SnapshotVerifyState, VerifyState, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY, UPID, + print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupNamespace, BackupType, + CryptMode, SnapshotVerifyState, VerifyState, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY, + UPID, }; use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ArchiveType, BackupManifest, FileInfo}; +use pbs_datastore::manifest::{BackupManifest, FileInfo}; use pbs_datastore::{DataBlob, DataStore, StoreProgress}; use crate::tools::parallel_handler::ParallelHandler; diff --git a/src/server/pull.rs b/src/server/pull.rs index 08b55956c..62c27917c 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -11,8 +11,9 @@ use proxmox_human_byte::HumanByte; use tracing::info; use pbs_api_types::{ - print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + print_store_and_ns, ArchiveType, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, + Operation, RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, + PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -20,9 +21,7 @@ use pbs_datastore::data_blob::DataBlob; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ - ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME, -}; +use pbs_datastore::manifest::{BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{check_backup_owner, DataStore, StoreProgress}; use pbs_tools::sha::sha256; diff --git a/src/server/push.rs b/src/server/push.rs index 4c489531c..288792e03 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -10,7 +10,7 @@ use tokio_stream::wrappers::ReceiverStream; use tracing::{info, warn}; use pbs_api_types::{ - print_store_and_ns, ApiVersion, ApiVersionInfo, Authid, BackupDir, BackupGroup, + print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_REMOTE_DATASTORE_BACKUP, PRIV_REMOTE_DATASTORE_MODIFY, @@ -22,7 +22,7 @@ use pbs_datastore::data_blob::ChunkInfo; use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::index::IndexFile; -use pbs_datastore::manifest::{ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; +use pbs_datastore::manifest::{CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use pbs_datastore::read_chunk::AsyncReadChunk; use pbs_datastore::{DataStore, StoreProgress}; -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 11:33:39 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 11:33:39 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup 0/5] introduce dedcated archive name api type In-Reply-To: <20241113105007.151258-1-c.ebner@proxmox.com> References: <20241113105007.151258-1-c.ebner@proxmox.com> Message-ID: superseded-by version 4: https://lore.proxmox.com/pbs-devel/20241122103011.165010-1-c.ebner at proxmox.com/T From f.gruenbichler at proxmox.com Fri Nov 22 11:37:30 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 11:37:30 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v5 0/4] fix #3786: resync corrupt chunks in sync-job In-Reply-To: <20241122093919.59777-1-g.goller@proxmox.com> References: <20241122093919.59777-1-g.goller@proxmox.com> Message-ID: <173227185008.2118190.9441547307434136940@yuna.proxmox.com> w.r.t. the off-list discussion - I think resync-corrupt is okay as standalone option, it matches with the others like remove_vanished. but I noticed another thing that requires some more changes - we need to only allow resync-corrupt for pull syncs, not for push ones (for now - it's not impossible to implement it for push as well, but it requires some backend changes and thoughts about the priv implications). Quoting Gabriel Goller (2024-11-22 10:39:15) > Add an option `resync-corrupt` that resyncs corrupt snapshots when running > sync-job. This option checks if the local snapshot failed the last > verification and if it did, overwrites the local snapshot with the > remote one. > > This is quite useful, as we currently don't have an option to "fix" > broken chunks/snapshots in any way, even if a healthy version is on > another (e.g. offsite) instance. > > Important things to note are also: this has a slight performance > penalty, as all the manifests have to be looked through, and a > verification job has to be run beforehand, otherwise we do not know > if the snapshot is healthy. > > Note: This series was originally written by Shannon! I just picked it > up, rebased, and fixed the obvious comments on the last series. > > Changelog v5 (thanks @Fabian): > - rebase > - don't remove parsing error in verify_state helper > - add error logs on failures > > Changelog v4 (thanks @Fabian): > - make verify_state bubble up errors > - call verify_state helper everywhere we need the verify_state > - resync broken manifests (so resync when load_manifest fails) > > Changelog v3 (thanks @Fabian): > - filter out snapshots earlier in the pull_group function > - move verify_state to BackupManifest and fixed invocations > - reverted verify_state Option -> Result state (It doesn't matter if we get an > error, we get that quite often f.e. in new backups) > - removed some unnecessary log lines > - removed some unnecessary imports and modifications > - rebase to current master > > Changelog v2 (thanks @Thomas): > - order git trailers > - adjusted schema description to include broken indexes > - change verify_state to return a Result<_,_> > - print error if verify_state is not able to read the state > - update docs on pull_snapshot function > - simplify logic by combining flags > - move log line out of loop to only print once that we resync the snapshot > > Changelog since RFC (Shannon's work): > - rename option from deep-sync to resync-corrupt > - rebase on latest master (and change implementation details, as a > lot has changed around sync-jobs) > > proxmox-backup: > > Gabriel Goller (4): > snapshot: add helper function to retrieve verify_state > fix #3786: api: add resync-corrupt option to sync jobs > fix #3786: ui/cli: add resync-corrupt option on sync-jobs > fix #3786: docs: add resync-corrupt option to sync-job > > docs/managing-remotes.rst | 6 +++ > pbs-api-types/src/jobs.rs | 10 +++++ > pbs-datastore/src/backup_info.rs | 9 +++- > pbs-datastore/src/manifest.rs | 14 +++++- > src/api2/admin/datastore.rs | 16 +++---- > src/api2/backup/mod.rs | 18 +++++--- > src/api2/config/sync.rs | 4 ++ > src/api2/pull.rs | 9 +++- > src/backup/verify.rs | 13 +++--- > src/bin/proxmox-backup-manager.rs | 16 ++++++- > src/server/pull.rs | 72 ++++++++++++++++++++++++------- > www/window/SyncJobEdit.js | 11 +++++ > 12 files changed, 155 insertions(+), 43 deletions(-) > > > Summary over all repositories: > 12 files changed, 155 insertions(+), 43 deletions(-) > > -- > Generated by git-murpp 0.7.1 > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From g.goller at proxmox.com Fri Nov 22 12:11:50 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 12:11:50 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] reuse-datastore: avoid creating another default prune job Message-ID: <20241122111150.162327-1-g.goller@proxmox.com> If a datastore with a default prune job is removed, the prune job is preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also create a default prune job for every datastore ? this means that when reusing a datastore that previously existed, you end up with duplicate prune jobs. Reported-by: Fabian Gr?nbichler Signed-off-by: Gabriel Goller --- This is a bit janky, because we rely on the default datastore being named `default-{datastore}`, but that shouldn't be an issue. src/api2/config/datastore.rs | 41 ++++++++++++++++++++---------------- src/api2/config/prune.rs | 15 +++++++++++++ 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 37d1528c70fb..cbe67cfc6ac5 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -23,7 +23,9 @@ use pbs_datastore::chunk_store::ChunkStore; use crate::api2::admin::{ prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, }; -use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; +use crate::api2::config::prune::{ + default_prune_job_existing, delete_prune_job, do_create_prune_job, +}; use crate::api2::config::sync::delete_sync_job; use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs}; use crate::api2::config::verify::delete_verification_job; @@ -150,23 +152,26 @@ pub fn create_datastore( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let prune_job_config = config.prune_schedule.as_ref().map(|schedule| { - let mut id = format!("default-{}-{}", config.name, Uuid::generate()); - id.truncate(32); - - PruneJobConfig { - id, - store: config.name.clone(), - comment: None, - disable: false, - schedule: schedule.clone(), - options: PruneJobOptions { - keep: config.keep.clone(), - max_depth: None, - ns: None, - }, - } - }); + let mut prune_job_config = None; + if !default_prune_job_existing(&config.name)? { + prune_job_config = config.prune_schedule.as_ref().map(|schedule| { + let mut id = format!("default-{}-{}", config.name, Uuid::generate()); + id.truncate(32); + + PruneJobConfig { + id, + store: config.name.clone(), + comment: None, + disable: false, + schedule: schedule.clone(), + options: PruneJobOptions { + keep: config.keep.clone(), + max_depth: None, + ns: None, + }, + } + }); + } // clearing prune settings in the datastore config, as they are now handled by prune jobs let config = DataStoreConfig { diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs index ce7b8ce565ce..dafb97e2f1e5 100644 --- a/src/api2/config/prune.rs +++ b/src/api2/config/prune.rs @@ -77,6 +77,21 @@ pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { Ok(()) } +pub fn default_prune_job_existing(datastore: &str) -> Result { + let (section_config, _digest) = prune::config()?; + if section_config + .sections + .keys() + .filter(|s| s.starts_with(&format!("default-{datastore}"))) + .count() + > 0 + { + Ok(true) + } else { + Ok(false) + } +} + #[api( protected: true, input: { -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 12:18:10 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 12:18:10 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] reuse-datastore: avoid creating another default prune job In-Reply-To: <20241122111150.162327-1-g.goller@proxmox.com> References: <20241122111150.162327-1-g.goller@proxmox.com> Message-ID: <3cc8bcda-9bc6-4658-9c9f-cfad4ef38833@proxmox.com> On 11/22/24 12:11, Gabriel Goller wrote: > If a datastore with a default prune job is removed, the prune job is > preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also > create a default prune job for every datastore ? this means that when > reusing a datastore that previously existed, you end up with duplicate > prune jobs. > > Reported-by: Fabian Gr?nbichler > Signed-off-by: Gabriel Goller > --- > > This is a bit janky, because we rely on the default datastore being > named `default-{datastore}`, but that shouldn't be an issue. > > src/api2/config/datastore.rs | 41 ++++++++++++++++++++---------------- > src/api2/config/prune.rs | 15 +++++++++++++ > 2 files changed, 38 insertions(+), 18 deletions(-) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 37d1528c70fb..cbe67cfc6ac5 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -23,7 +23,9 @@ use pbs_datastore::chunk_store::ChunkStore; > use crate::api2::admin::{ > prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, > }; > -use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; > +use crate::api2::config::prune::{ > + default_prune_job_existing, delete_prune_job, do_create_prune_job, > +}; > use crate::api2::config::sync::delete_sync_job; > use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs}; > use crate::api2::config::verify::delete_verification_job; > @@ -150,23 +152,26 @@ pub fn create_datastore( > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > > - let prune_job_config = config.prune_schedule.as_ref().map(|schedule| { > - let mut id = format!("default-{}-{}", config.name, Uuid::generate()); > - id.truncate(32); > - > - PruneJobConfig { > - id, > - store: config.name.clone(), > - comment: None, > - disable: false, > - schedule: schedule.clone(), > - options: PruneJobOptions { > - keep: config.keep.clone(), > - max_depth: None, > - ns: None, > - }, > - } > - }); > + let mut prune_job_config = None; > + if !default_prune_job_existing(&config.name)? { > + prune_job_config = config.prune_schedule.as_ref().map(|schedule| { > + let mut id = format!("default-{}-{}", config.name, Uuid::generate()); > + id.truncate(32); > + > + PruneJobConfig { > + id, > + store: config.name.clone(), > + comment: None, > + disable: false, > + schedule: schedule.clone(), > + options: PruneJobOptions { > + keep: config.keep.clone(), > + max_depth: None, > + ns: None, > + }, > + } > + }); > + } > > // clearing prune settings in the datastore config, as they are now handled by prune jobs > let config = DataStoreConfig { > diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs > index ce7b8ce565ce..dafb97e2f1e5 100644 > --- a/src/api2/config/prune.rs > +++ b/src/api2/config/prune.rs > @@ -77,6 +77,21 @@ pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { > Ok(()) > } > > +pub fn default_prune_job_existing(datastore: &str) -> Result { > + let (section_config, _digest) = prune::config()?; > + if section_config > + .sections > + .keys() > + .filter(|s| s.starts_with(&format!("default-{datastore}"))) > + .count() > + > 0 > + { > + Ok(true) > + } else { > + Ok(false) > + } could be more compact? no need for the if block: let has_default = section_config.sections.keys().filter(...).count() > 0; Ok(has_default) > +} > + > #[api( > protected: true, > input: { From g.goller at proxmox.com Fri Nov 22 13:15:46 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 13:15:46 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v5 0/4] fix #3786: resync corrupt chunks in sync-job In-Reply-To: <20241122093919.59777-1-g.goller@proxmox.com> References: <20241122093919.59777-1-g.goller@proxmox.com> Message-ID: Sent a new version! From g.goller at proxmox.com Fri Nov 22 13:16:13 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 13:16:13 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v6 0/4] fix #3786: resync corrupt chunks in sync-job Message-ID: <20241122121617.185615-1-g.goller@proxmox.com> Add an option `resync-corrupt` that resyncs corrupt snapshots when running sync-job. This option checks if the local snapshot failed the last verification and if it did, overwrites the local snapshot with the remote one. This is quite useful, as we currently don't have an option to "fix" broken chunks/snapshots in any way, even if a healthy version is on another (e.g. offsite) instance. Important things to note are also: this has a slight performance penalty, as all the manifests have to be looked through, and a verification job has to be run beforehand, otherwise we do not know if the snapshot is healthy. Note: This series was originally written by Shannon! I just picked it up, rebased, and fixed the obvious comments on the last series. Changelog v6 (thanks @Fabian): - rebase - only allow resync-chunks option on pull snapshots - fix typo Changelog v5 (thanks @Fabian): - rebase - don't remove parsing error in verify_state helper - add error logs on failures Changelog v4 (thanks @Fabian): - make verify_state bubble up errors - call verify_state helper everywhere we need the verify_state - resync broken manifests (so resync when load_manifest fails) Changelog v3 (thanks @Fabian): - filter out snapshots earlier in the pull_group function - move verify_state to BackupManifest and fixed invocations - reverted verify_state Option -> Result state (It doesn't matter if we get an error, we get that quite often f.e. in new backups) - removed some unnecessary log lines - removed some unnecessary imports and modifications - rebase to current master Changelog v2 (thanks @Thomas): - order git trailers - adjusted schema description to include broken indexes - change verify_state to return a Result<_,_> - print error if verify_state is not able to read the state - update docs on pull_snapshot function - simplify logic by combining flags - move log line out of loop to only print once that we resync the snapshot Changelog since RFC (Shannon's work): - rename option from deep-sync to resync-corrupt - rebase on latest master (and change implementation details, as a lot has changed around sync-jobs) proxmox-backup: Gabriel Goller (4): snapshot: add helper function to retrieve verify_state fix #3786: api: add resync-corrupt option to sync jobs fix #3786: ui/cli: add resync-corrupt option on sync-jobs fix #3786: docs: add resync-corrupt option to sync-job docs/managing-remotes.rst | 6 +++ pbs-api-types/src/jobs.rs | 10 +++++ pbs-datastore/src/backup_info.rs | 9 +++- pbs-datastore/src/manifest.rs | 14 +++++- src/api2/admin/datastore.rs | 16 +++---- src/api2/backup/mod.rs | 18 +++++--- src/api2/config/sync.rs | 4 ++ src/api2/pull.rs | 9 +++- src/backup/verify.rs | 13 +++--- src/bin/proxmox-backup-manager.rs | 16 ++++++- src/server/pull.rs | 72 ++++++++++++++++++++++++------- www/window/SyncJobEdit.js | 14 ++++++ 12 files changed, 158 insertions(+), 43 deletions(-) Summary over all repositories: 12 files changed, 158 insertions(+), 43 deletions(-) -- Generated by git-murpp 0.7.1 From g.goller at proxmox.com Fri Nov 22 13:16:16 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 13:16:16 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v6 3/4] fix #3786: ui/cli: add resync-corrupt option on sync-jobs In-Reply-To: <20241122121617.185615-1-g.goller@proxmox.com> References: <20241122121617.185615-1-g.goller@proxmox.com> Message-ID: <20241122121617.185615-4-g.goller@proxmox.com> Add the `resync-corrupt` option to the ui and the `proxmox-backup-manager` cli. It is listed in the `Advanced` section, because it slows the sync-job down and is useless if no verification job was run beforehand. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller Reviewed-by: Fabian Gr?nbichler --- src/bin/proxmox-backup-manager.rs | 16 ++++++++++++++-- www/window/SyncJobEdit.js | 14 ++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/bin/proxmox-backup-manager.rs b/src/bin/proxmox-backup-manager.rs index d887dc1d50a1..02ca0d028225 100644 --- a/src/bin/proxmox-backup-manager.rs +++ b/src/bin/proxmox-backup-manager.rs @@ -14,8 +14,8 @@ use pbs_api_types::percent_encoding::percent_encode_component; use pbs_api_types::{ BackupNamespace, GroupFilter, RateLimitConfig, SyncDirection, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, - REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, TRANSFER_LAST_SCHEMA, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::{display_task_log, view_task_result}; use pbs_config::sync; @@ -307,6 +307,7 @@ async fn sync_datastore( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, param: Value, sync_direction: SyncDirection, ) -> Result { @@ -343,6 +344,10 @@ async fn sync_datastore( args["transfer-last"] = json!(transfer_last) } + if let Some(resync) = resync_corrupt { + args["resync-corrupt"] = Value::from(resync); + } + let mut limit_json = json!(limit); let limit_map = limit_json .as_object_mut() @@ -405,6 +410,10 @@ async fn sync_datastore( schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, } } )] @@ -421,6 +430,7 @@ async fn pull_datastore( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, param: Value, ) -> Result { sync_datastore( @@ -434,6 +444,7 @@ async fn pull_datastore( group_filter, limit, transfer_last, + resync_corrupt, param, SyncDirection::Pull, ) @@ -513,6 +524,7 @@ async fn push_datastore( group_filter, limit, transfer_last, + None, param, SyncDirection::Push, ) diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 0e648e7b3e50..6fe31dc0befd 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -358,6 +358,20 @@ Ext.define('PBS.window.SyncJobEdit', { deleteEmpty: '{!isCreate}', }, }, + { + fieldLabel: gettext('Re-sync corrupt snapshots'), + xtype: 'proxmoxcheckbox', + name: 'resync-corrupt', + autoEl: { + tag: 'div', + 'data-qtip': gettext('Re-sync snapshots, whose verification failed.'), + }, + cbind: { + disabled: '{syncDirectionPush}', + }, + uncheckedValue: false, + value: false, + }, ], }, { -- 2.39.5 From g.goller at proxmox.com Fri Nov 22 13:16:14 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 13:16:14 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v6 1/4] snapshot: add helper function to retrieve verify_state In-Reply-To: <20241122121617.185615-1-g.goller@proxmox.com> References: <20241122121617.185615-1-g.goller@proxmox.com> Message-ID: <20241122121617.185615-2-g.goller@proxmox.com> Add helper functions to retrieve the verify_state from the manifest of a snapshot. Replaced all the manual "verify_state" parsing with the helper function. Suggested-by: Fabian Gr?nbichler Signed-off-by: Gabriel Goller --- pbs-datastore/src/backup_info.rs | 9 +++++++-- pbs-datastore/src/manifest.rs | 14 +++++++++++++- src/api2/admin/datastore.rs | 16 +++++++--------- src/api2/backup/mod.rs | 18 +++++++++++------- src/backup/verify.rs | 13 ++++++++----- 5 files changed, 46 insertions(+), 24 deletions(-) diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index 62d12b1183df..a581d75757b4 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -8,8 +8,8 @@ use anyhow::{bail, format_err, Error}; use proxmox_sys::fs::{lock_dir_noblock, replace_file, CreateOptions}; use pbs_api_types::{ - Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, - BACKUP_FILE_REGEX, + Authid, BackupGroupDeleteStats, BackupNamespace, BackupType, GroupFilter, VerifyState, + BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, }; use pbs_config::{open_backup_lockfile, BackupLockGuard}; @@ -555,6 +555,11 @@ impl BackupDir { Ok(()) } + + /// Load the verify state from the manifest. + pub fn verify_state(&self) -> Result, anyhow::Error> { + Ok(self.load_manifest()?.0.verify_state()?.map(|svs| svs.state)) + } } impl AsRef for BackupDir { diff --git a/pbs-datastore/src/manifest.rs b/pbs-datastore/src/manifest.rs index c3df014272a0..3013fab97221 100644 --- a/pbs-datastore/src/manifest.rs +++ b/pbs-datastore/src/manifest.rs @@ -5,7 +5,7 @@ use anyhow::{bail, format_err, Error}; use serde::{Deserialize, Serialize}; use serde_json::{json, Value}; -use pbs_api_types::{BackupType, CryptMode, Fingerprint}; +use pbs_api_types::{BackupType, CryptMode, Fingerprint, SnapshotVerifyState}; use pbs_tools::crypt_config::CryptConfig; pub const MANIFEST_BLOB_NAME: &str = "index.json.blob"; @@ -242,6 +242,18 @@ impl BackupManifest { let manifest: BackupManifest = serde_json::from_value(json)?; Ok(manifest) } + + /// Get the verify state of the snapshot + /// + /// Note: New snapshots, which have not been verified yet, do not have a status and this + /// function will return `Ok(None)`. + pub fn verify_state(&self) -> Result, anyhow::Error> { + let verify = self.unprotected["verify_state"].clone(); + if verify.is_null() { + return Ok(None); + } + Ok(Some(serde_json::from_value::(verify)?)) + } } impl TryFrom for BackupManifest { diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 99b579f02c50..3624dba41199 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -537,15 +537,13 @@ unsafe fn list_snapshots_blocking( } }; - let verification = manifest.unprotected["verify_state"].clone(); - let verification: Option = - match serde_json::from_value(verification) { - Ok(verify) => verify, - Err(err) => { - eprintln!("error parsing verification state : '{}'", err); - None - } - }; + let verification: Option = match manifest.verify_state() { + Ok(verify) => verify, + Err(err) => { + eprintln!("error parsing verification state : '{}'", err); + None + } + }; let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); diff --git a/src/api2/backup/mod.rs b/src/api2/backup/mod.rs index 63c49f6537f7..dce40e5318b8 100644 --- a/src/api2/backup/mod.rs +++ b/src/api2/backup/mod.rs @@ -8,6 +8,7 @@ use hyper::http::request::Parts; use hyper::{Body, Request, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; +use tracing::warn; use proxmox_rest_server::{H2Service, WorkerTask}; use proxmox_router::{http_err, list_subdirs_api_method}; @@ -19,9 +20,9 @@ use proxmox_sortable_macro::sortable; use proxmox_sys::fs::lock_dir_noblock_shared; use pbs_api_types::{ - Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState, - BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, - BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, + Authid, BackupNamespace, BackupType, Operation, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, + BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, + CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; use pbs_datastore::index::IndexFile; @@ -159,15 +160,18 @@ fn upgrade_to_backup_protocol( let info = backup_group.last_backup(true).unwrap_or(None); if let Some(info) = info { let (manifest, _) = info.backup_dir.load_manifest()?; - let verify = manifest.unprotected["verify_state"].clone(); - match serde_json::from_value::(verify) { - Ok(verify) => match verify.state { + match manifest.verify_state() { + Ok(Some(verify)) => match verify.state { VerifyState::Ok => Some(info), VerifyState::Failed => None, }, - Err(_) => { + Ok(None) => { // no verify state found, treat as valid Some(info) + }, + Err(err) => { + warn!("error parsing the snapshot manifest: {err:#}"); + Some(info) } } } else { diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6ef7e8eb3ebb..c1abe69a4fde 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -5,7 +5,7 @@ use std::time::Instant; use anyhow::{bail, format_err, Error}; use nix::dir::Dir; -use tracing::{error, info}; +use tracing::{error, info, warn}; use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_worker_task::WorkerTaskContext; @@ -553,10 +553,13 @@ pub fn verify_filter( return true; } - let raw_verify_state = manifest.unprotected["verify_state"].clone(); - match serde_json::from_value::(raw_verify_state) { - Err(_) => true, // no last verification, always include - Ok(last_verify) => { + match manifest.verify_state() { + Err(err) => { + warn!("error reading manifest: {err:#}"); + true + } + Ok(None) => true, // no last verification, always include + Ok(Some(last_verify)) => { match outdated_after { None => false, // never re-verify if ignored and no max age Some(max_age) => { -- 2.39.5 From g.goller at proxmox.com Fri Nov 22 13:16:17 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 13:16:17 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v6 4/4] fix #3786: docs: add resync-corrupt option to sync-job In-Reply-To: <20241122121617.185615-1-g.goller@proxmox.com> References: <20241122121617.185615-1-g.goller@proxmox.com> Message-ID: <20241122121617.185615-5-g.goller@proxmox.com> Add short section explaining the `resync-corrupt` option on the sync-job. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller Reviewed-by: Fabian Gr?nbichler --- docs/managing-remotes.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/managing-remotes.rst b/docs/managing-remotes.rst index a7fd5143d236..4a78a9310fa5 100644 --- a/docs/managing-remotes.rst +++ b/docs/managing-remotes.rst @@ -135,6 +135,12 @@ For mixing include and exclude filter, following rules apply: .. note:: The ``protected`` flag of remote backup snapshots will not be synced. +Enabling the advanced option 'resync-corrupt' will re-sync all snapshots that have +failed to verify during the last :ref:`maintenance_verification`. Hence, a verification +job needs to be run before a sync job with 'resync-corrupt' can be carried out. Be aware +that a 'resync-corrupt'-job needs to check the manifests of all snapshots in a datastore +and might take much longer than regular sync jobs. + Namespace Support ^^^^^^^^^^^^^^^^^ -- 2.39.5 From g.goller at proxmox.com Fri Nov 22 13:16:15 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Fri, 22 Nov 2024 13:16:15 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v6 2/4] fix #3786: api: add resync-corrupt option to sync jobs In-Reply-To: <20241122121617.185615-1-g.goller@proxmox.com> References: <20241122121617.185615-1-g.goller@proxmox.com> Message-ID: <20241122121617.185615-3-g.goller@proxmox.com> This option allows us to "fix" corrupt snapshots (and/or their chunks) by pulling them from another remote. When traversing the remote snapshots, we check if it exists locally, and if it is, we check if the last verification of it failed. If the local snapshot is broken and the `resync-corrupt` option is turned on, we pull in the remote snapshot, overwriting the local one. This is very useful and has been requested a lot, as there is currently no way to "fix" corrupt chunks/snapshots even if the user has a healthy version of it on their offsite instance. Originally-by: Shannon Sterz Signed-off-by: Gabriel Goller Reviewed-by: Fabian Gr?nbichler --- pbs-api-types/src/jobs.rs | 10 ++++++ src/api2/config/sync.rs | 4 +++ src/api2/pull.rs | 9 ++++- src/server/pull.rs | 72 ++++++++++++++++++++++++++++++--------- 4 files changed, 78 insertions(+), 17 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e8056beb00cb..52520811b560 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -536,6 +536,10 @@ impl SyncDirection { } } +pub const RESYNC_CORRUPT_SCHEMA: Schema = + BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") + .schema(); + #[api( properties: { id: { @@ -590,6 +594,10 @@ impl SyncDirection { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + } } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -623,6 +631,8 @@ pub struct SyncJobConfig { pub limit: RateLimitConfig, #[serde(skip_serializing_if = "Option::is_none")] pub transfer_last: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub resync_corrupt: Option, } impl SyncJobConfig { diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 78eb7320566b..7ff6cae029d1 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -471,6 +471,9 @@ pub fn update_sync_job( if let Some(transfer_last) = update.transfer_last { data.transfer_last = Some(transfer_last); } + if let Some(resync_corrupt) = update.resync_corrupt { + data.resync_corrupt = Some(resync_corrupt); + } if update.limit.rate_in.is_some() { data.limit.rate_in = update.limit.rate_in; @@ -629,6 +632,7 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator ns: None, owner: Some(write_auth_id.clone()), comment: None, + resync_corrupt: None, remove_vanished: None, max_depth: None, group_filter: None, diff --git a/src/api2/pull.rs b/src/api2/pull.rs index d039dab59c65..d8ed1a7347b5 100644 --- a/src/api2/pull.rs +++ b/src/api2/pull.rs @@ -10,7 +10,7 @@ use pbs_api_types::{ Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA, GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA, - TRANSFER_LAST_SCHEMA, + RESYNC_CORRUPT_SCHEMA, TRANSFER_LAST_SCHEMA, }; use pbs_config::CachedUserInfo; use proxmox_rest_server::WorkerTask; @@ -87,6 +87,7 @@ impl TryFrom<&SyncJobConfig> for PullParameters { sync_job.group_filter.clone(), sync_job.limit.clone(), sync_job.transfer_last, + sync_job.resync_corrupt, ) } } @@ -132,6 +133,10 @@ impl TryFrom<&SyncJobConfig> for PullParameters { schema: TRANSFER_LAST_SCHEMA, optional: true, }, + "resync-corrupt": { + schema: RESYNC_CORRUPT_SCHEMA, + optional: true, + }, }, }, access: { @@ -156,6 +161,7 @@ async fn pull( group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; @@ -193,6 +199,7 @@ async fn pull( group_filter, limit, transfer_last, + resync_corrupt, )?; // fixme: set to_stdout to false? diff --git a/src/server/pull.rs b/src/server/pull.rs index 08b55956ce52..40d872d2487c 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -12,7 +12,8 @@ use tracing::info; use pbs_api_types::{ print_store_and_ns, Authid, BackupDir, BackupGroup, BackupNamespace, GroupFilter, Operation, - RateLimitConfig, Remote, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + RateLimitConfig, Remote, VerifyState, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_AUDIT, + PRIV_DATASTORE_BACKUP, }; use pbs_client::BackupRepository; use pbs_config::CachedUserInfo; @@ -55,6 +56,8 @@ pub(crate) struct PullParameters { group_filter: Vec, /// How many snapshots should be transferred at most (taking the newest N snapshots) transfer_last: Option, + /// Whether to re-sync corrupted snapshots + resync_corrupt: bool, } impl PullParameters { @@ -72,12 +75,14 @@ impl PullParameters { group_filter: Option>, limit: RateLimitConfig, transfer_last: Option, + resync_corrupt: Option, ) -> Result { if let Some(max_depth) = max_depth { ns.check_max_depth(max_depth)?; remote_ns.check_max_depth(max_depth)?; }; let remove_vanished = remove_vanished.unwrap_or(false); + let resync_corrupt = resync_corrupt.unwrap_or(false); let source: Arc = if let Some(remote) = remote { let (remote_config, _digest) = pbs_config::remote::config()?; @@ -116,6 +121,7 @@ impl PullParameters { max_depth, group_filter, transfer_last, + resync_corrupt, }) } } @@ -323,7 +329,7 @@ async fn pull_single_archive<'a>( /// /// Pulling a snapshot consists of the following steps: /// - (Re)download the manifest -/// -- if it matches, only download log and treat snapshot as already synced +/// -- if it matches and is not corrupt, only download log and treat snapshot as already synced /// - Iterate over referenced files /// -- if file already exists, verify contents /// -- if not, pull it from the remote @@ -332,6 +338,7 @@ async fn pull_snapshot<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let mut sync_stats = SyncStats::default(); let mut manifest_name = snapshot.full_path(); @@ -352,7 +359,7 @@ async fn pull_snapshot<'a>( return Ok(sync_stats); } - if manifest_name.exists() { + if manifest_name.exists() && !corrupt { let manifest_blob = proxmox_lang::try_block!({ let mut manifest_file = std::fs::File::open(&manifest_name).map_err(|err| { format_err!("unable to open local manifest {manifest_name:?} - {err}") @@ -381,7 +388,7 @@ async fn pull_snapshot<'a>( let mut path = snapshot.full_path(); path.push(&item.filename); - if path.exists() { + if !corrupt && path.exists() { match ArchiveType::from_path(&item.filename)? { ArchiveType::DynamicIndex => { let index = DynamicIndexReader::open(&path)?; @@ -443,6 +450,7 @@ async fn pull_snapshot_from<'a>( reader: Arc, snapshot: &'a pbs_datastore::BackupDir, downloaded_chunks: Arc>>, + corrupt: bool, ) -> Result { let (_path, is_new, _snap_lock) = snapshot .datastore() @@ -451,7 +459,8 @@ async fn pull_snapshot_from<'a>( let sync_stats = if is_new { info!("sync snapshot {}", snapshot.dir()); - match pull_snapshot(reader, snapshot, downloaded_chunks).await { + // this snapshot is new, so it can never be corrupt + match pull_snapshot(reader, snapshot, downloaded_chunks, false).await { Err(err) => { if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir( snapshot.backup_ns(), @@ -468,8 +477,12 @@ async fn pull_snapshot_from<'a>( } } } else { - info!("re-sync snapshot {}", snapshot.dir()); - pull_snapshot(reader, snapshot, downloaded_chunks).await? + if corrupt { + info!("re-sync snapshot {} due to corruption", snapshot.dir()); + } else { + info!("re-sync snapshot {}", snapshot.dir()); + } + pull_snapshot(reader, snapshot, downloaded_chunks, corrupt).await? }; Ok(sync_stats) @@ -523,26 +536,52 @@ async fn pull_group( .last_successful_backup(&target_ns, group)? .unwrap_or(i64::MIN); - let list: Vec = raw_list + // Filter remote BackupDirs to include in pull + // Also stores if the snapshot is corrupt (verification job failed) + let list: Vec<(BackupDir, bool)> = raw_list .into_iter() .enumerate() - .filter(|&(pos, ref dir)| { + .filter_map(|(pos, dir)| { source_snapshots.insert(dir.time); + // If resync_corrupt is set, check if the corresponding local snapshot failed to + // verification + if params.resync_corrupt { + let local_dir = params + .target + .store + .backup_dir(target_ns.clone(), dir.clone()); + if let Ok(local_dir) = local_dir { + match local_dir.verify_state() { + Ok(Some(state)) => { + if state == VerifyState::Failed { + return Some((dir, true)); + } + } + Ok(None) => { + // The verify_state item was not found in the manifest, this means the + // snapshot is new. + } + Err(_) => { + // There was an error loading the manifest, probably better if we + // resync. + return Some((dir, true)); + } + } + } + } // Note: the snapshot represented by `last_sync_time` might be missing its backup log // or post-backup verification state if those were not yet available during the last // sync run, always resync it if last_sync_time > dir.time { already_synced_skip_info.update(dir.time); - return false; + return None; } - if pos < cutoff && last_sync_time != dir.time { transfer_last_skip_info.update(dir.time); - return false; + return None; } - true + Some((dir, false)) }) - .map(|(_, dir)| dir) .collect(); if already_synced_skip_info.count > 0 { @@ -561,7 +600,7 @@ async fn pull_group( let mut sync_stats = SyncStats::default(); - for (pos, from_snapshot) in list.into_iter().enumerate() { + for (pos, (from_snapshot, corrupt)) in list.into_iter().enumerate() { let to_snapshot = params .target .store @@ -571,7 +610,8 @@ async fn pull_group( .source .reader(source_namespace, &from_snapshot) .await?; - let result = pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone()).await; + let result = + pull_snapshot_from(reader, &to_snapshot, downloaded_chunks.clone(), corrupt).await; progress.done_snapshots = pos as u64 + 1; info!("percentage done: {progress}"); -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 13:26:04 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 13:26:04 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup] server: push: add error context to api calls and priv checks Message-ID: <20241122122604.335901-1-c.ebner@proxmox.com> Add an anyhow context to errors and display the full error context in the log output. Further, make it clear which errors stem from api calls by explicitly mentioning this in the context message. This also fixes incorrect error handling by placing the error context on the api result instead of the serde deserialization error for cases this was handled incorrectly. Signed-off-by: Christian Ebner --- changes since version 1: - fix incorrect api result error handling - use anyhow context - show full error context in log output src/server/push.rs | 88 +++++++++++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 32 deletions(-) diff --git a/src/server/push.rs b/src/server/push.rs index 4c489531c..1914cad75 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -3,7 +3,7 @@ use std::collections::HashSet; use std::sync::{Arc, Mutex}; -use anyhow::{bail, format_err, Error}; +use anyhow::{bail, Context, Error}; use futures::stream::{self, StreamExt, TryStreamExt}; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -180,7 +180,12 @@ fn check_ns_remote_datastore_privs( // Fetch the list of namespaces found on target async fn fetch_target_namespaces(params: &PushParameters) -> Result, Error> { let api_path = params.target.datastore_api_path("namespace"); - let mut result = params.target.client.get(&api_path, None).await?; + let mut result = params + .target + .client + .get(&api_path, None) + .await + .context("Fetching remote namespaces failed, remote returned error")?; let namespaces: Vec = serde_json::from_value(result["data"].take())?; let mut namespaces: Vec = namespaces .into_iter() @@ -201,7 +206,7 @@ async fn remove_target_namespace( } check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) - .map_err(|err| format_err!("Pruning remote datastore namespaces not allowed - {err}"))?; + .context("Pruning remote datastore namespaces not allowed")?; let api_path = params.target.datastore_api_path("namespace"); @@ -214,13 +219,17 @@ async fn remove_target_namespace( args["error-on-protected"] = serde_json::to_value(false)?; } - let mut result = params.target.client.delete(&api_path, Some(args)).await?; + let mut result = params + .target + .client + .delete(&api_path, Some(args)) + .await + .context(format!( + "Failed to remove remote namespace {target_namespace}, remote returned error" + ))?; if params.target.supports_prune_delete_stats { - let data = result["data"].take(); - serde_json::from_value(data).map_err(|err| { - format_err!("removing target namespace {target_namespace} failed - {err}") - }) + Ok(serde_json::from_value(result["data"].take())?) } else { Ok(BackupGroupDeleteStats::default()) } @@ -235,7 +244,13 @@ async fn fetch_target_groups( let api_path = params.target.datastore_api_path("groups"); let args = Some(serde_json::json!({ "ns": target_namespace.name() })); - let mut result = params.target.client.get(&api_path, args).await?; + let mut result = params + .target + .client + .get(&api_path, args) + .await + .context("Failed to fetch remote groups, remote returned error")?; + let groups: Vec = serde_json::from_value(result["data"].take())?; let (mut owned, not_owned) = groups.into_iter().fold( @@ -262,7 +277,7 @@ async fn remove_target_group( backup_group: &BackupGroup, ) -> Result { check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) - .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; + .context("Pruning remote datastore contents not allowed")?; let api_path = params.target.datastore_api_path("groups"); @@ -273,12 +288,11 @@ async fn remove_target_group( args["error-on-protected"] = serde_json::to_value(false)?; } - let mut result = params.target.client.delete(&api_path, Some(args)).await?; + let mut result = params.target.client.delete(&api_path, Some(args)).await + .context(format!("Failed to remove remote group {backup_group}, remote returned error"))?; if params.target.supports_prune_delete_stats { - let data = result["data"].take(); - serde_json::from_value(data) - .map_err(|err| format_err!("removing target group {backup_group} failed - {err}")) + Ok(serde_json::from_value(result["data"].take())?) } else { Ok(BackupGroupDeleteStats::default()) } @@ -295,7 +309,7 @@ async fn check_or_create_target_namespace( // Sub-namespaces have to be created by creating parent components first. check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) - .map_err(|err| format_err!("Creating remote namespace not allowed - {err}"))?; + .context("Creating remote namespace not allowed")?; let mut parent = BackupNamespace::root(); for component in target_namespace.components() { @@ -310,12 +324,12 @@ async fn check_or_create_target_namespace( if !parent.is_root() { args["parent"] = serde_json::to_value(parent.clone())?; } - match params.target.client.post(&api_path, Some(args)).await { - Ok(_) => info!("Successfully created new namespace {current} on remote"), - Err(err) => { - bail!("Remote creation of namespace {current} failed, remote returned: {err}") - } - } + + params.target.client.post(&api_path, Some(args)).await + .context("Creation of remote namespace {current} failed, remote returned error")?; + + info!("Successfully created new namespace {current} on remote"); + existing_target_namespaces.push(current.clone()); parent = current; } @@ -377,7 +391,7 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result Result { errors = true; - info!("Encountered errors: {err}"); + info!("Encountered errors: {err:#}"); info!("Failed to sync {source_store_and_ns} into {target_store_and_ns}!"); } } @@ -453,7 +467,7 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result { - warn!("Encountered errors: {err}"); + warn!("Encountered errors: {err:#}"); warn!("Failed to remove vanished namespace {target_namespace} from remote!"); continue; } @@ -483,7 +497,7 @@ pub(crate) async fn push_namespace( let target_namespace = params.map_to_target(namespace)?; // Check if user is allowed to perform backups on remote datastore check_ns_remote_datastore_privs(params, &target_namespace, PRIV_REMOTE_DATASTORE_BACKUP) - .map_err(|err| format_err!("Pushing to remote namespace not allowed - {err}"))?; + .context("Pushing to remote namespace not allowed")?; let mut list: Vec = params .source @@ -529,7 +543,7 @@ pub(crate) async fn push_namespace( match push_group(params, namespace, &group, &mut progress).await { Ok(sync_stats) => stats.add(sync_stats), Err(err) => { - warn!("Encountered errors: {err}"); + warn!("Encountered errors: {err:#}"); warn!("Failed to push group {group} to remote!"); errors = true; } @@ -562,7 +576,7 @@ pub(crate) async fn push_namespace( })); } Err(err) => { - warn!("Encountered errors: {err}"); + warn!("Encountered errors: {err:#}"); warn!("Failed to remove vanished group {target_group} from remote!"); errors = true; continue; @@ -584,7 +598,12 @@ async fn fetch_target_snapshots( if !target_namespace.is_root() { args["ns"] = serde_json::to_value(target_namespace)?; } - let mut result = params.target.client.get(&api_path, Some(args)).await?; + let mut result = params + .target + .client + .get(&api_path, Some(args)) + .await + .context("Failed to fetch remote snapshots, remote returned error")?; let snapshots: Vec = serde_json::from_value(result["data"].take())?; Ok(snapshots) @@ -596,14 +615,19 @@ async fn forget_target_snapshot( snapshot: &BackupDir, ) -> Result<(), Error> { check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) - .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; + .context("Pruning remote datastore contents not allowed")?; let api_path = params.target.datastore_api_path("snapshots"); let mut args = serde_json::to_value(snapshot)?; if !target_namespace.is_root() { args["ns"] = serde_json::to_value(target_namespace)?; } - params.target.client.delete(&api_path, Some(args)).await?; + params + .target + .client + .delete(&api_path, Some(args)) + .await + .context("Failed to remove remote snapshot, remote returned error")?; Ok(()) } @@ -709,7 +733,7 @@ pub(crate) async fn push_group( ); } Err(err) => { - warn!("Encountered errors: {err}"); + warn!("Encountered errors: {err:#}"); warn!( "Failed to remove vanished snapshot {name} from remote!", name = snapshot.backup @@ -754,7 +778,7 @@ pub(crate) async fn push_snapshot( Ok((manifest, _raw_size)) => manifest, Err(err) => { // No manifest in snapshot or failed to read, warn and skip - log::warn!("Encountered errors: {err}"); + log::warn!("Encountered errors: {err:#}"); log::warn!("Failed to load manifest for '{snapshot}'!"); return Ok(stats); } -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 13:28:35 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 13:28:35 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] improve push sync job log messages In-Reply-To: References: <20241121154337.471425-1-c.ebner@proxmox.com> Message-ID: superseded-by version 2: https://lore.proxmox.com/pbs-devel/20241122122604.335901-1-c.ebner at proxmox.com/T From t.lamprecht at proxmox.com Fri Nov 22 13:42:29 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Fri, 22 Nov 2024 13:42:29 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 12/29] api/api-types: refactor api endpoint version, add api types In-Reply-To: <9ccf5606-ff5a-4473-96b3-a9ea80ab0dcf@proxmox.com> References: <20241031121519.434337-1-c.ebner@proxmox.com> <20241031121519.434337-13-c.ebner@proxmox.com> <173089427968.79072.3773251895934605531@yuna.proxmox.com> <56e5b937-448e-4aa7-b285-f5cbad777bcb@proxmox.com> <13127ac6-d634-4ba4-b48a-9866110e35e1@proxmox.com> <65ce8683-8e27-4d4e-a2f3-9d05960f2e72@proxmox.com> <6bf543a9-1d7c-412e-9862-40e42ddf005e@proxmox.com> <9ccf5606-ff5a-4473-96b3-a9ea80ab0dcf@proxmox.com> Message-ID: <3082f27e-bea2-4dcb-a67c-be12a63e16eb@proxmox.com> Am 21.11.24 um 17:15 schrieb Christian Ebner: > On 11/21/24 17:01, Thomas Lamprecht wrote: >> Am 21.11.24 um 10:58 schrieb Christian Ebner: >>> On 11/21/24 10:23, Thomas Lamprecht wrote: >>> Well, that is something I did not consider at all! So with that >>> viewpoint, adding this to PBS specifically is surely not the best way. >>> As discussed with Fabain off list, version based matching will be the >>> best way forward here, and dropping the incompatibility check once EOL >>> is reached. >> >> If we add such a thing that you proposed we should definitively get the >> story somewhat straight w.r.t. how we want to handle this for all projects, >> and define when to define a feature and when not, with some extra care on >> the interfaces, as those are relatively set in stone. > > Regarding this, have we considered exposing the API schema to the > client, something like [0]? Well, if, I'd go for the JSON schema directly (i.e., like our API viewer consumes) and yes IIRC there weere some very informal/shallow talks about this once or twice in the last five years or so IIRC, but nothing came out of it. > Fetching the remote API schema and therefore knowing which parameters > are available would have not only covered the additional check, but also > allowed to see what response value to expect. As long as the schema is somewhat complete (a bit better for PBS/rust, not perfect for PVE) > > Although, I guess this boils down to the same set of maintenance burden > in the end, hard to maintain code because of to many condition checks. That API would be auto-generated and only the client side would take the burden of checking, and that would not be so bad. If done nicely and such that general tooling can work with it we would also help external API consumers a lot. But IMO a bigger project, at least if we want to have most uses cases covered and implemented honor industry standards closely. From f.gruenbichler at proxmox.com Fri Nov 22 13:49:01 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 13:49:01 +0100 Subject: [pbs-devel] applied-series: [PATCH v5 proxmox-backup 0/5] introduce dedcated archive name api type In-Reply-To: <20241122103011.165010-1-c.ebner@proxmox.com> References: <20241122103011.165010-1-c.ebner@proxmox.com> Message-ID: <173227974191.2118190.13647075496700342338@yuna.proxmox.com> with two follow-up patches squashed in (acked by Chris), in order to keep the "constant" names for manifest, log, catalog and key via LazyLock, and a third patch extending the tests slightly added. Quoting Christian Ebner (2024-11-22 11:30:06) > There is currently no dedicated api type for the archive names, given > as input parameters to several api methods. > > This patches introduce a dedicated type for archive names, in order > to collect the code for checks and eventual mappings into one > location and reduce possible unintentional misuse by passing > incorrect argument values to the functions and methods consuming > the archive names. > > Further, drop all archive name constants in favor of helper methods on > the api type to generate `BackupArchiveName` instances for them. This > allows for direct comparison with other `BackupArchiveName` instances. > > As a positive side effect, the mapping now allows also for the server > archive type extensions to be optionally passed as input to several > commands, e.g. > ``` > proxmox-backup-client restore .pxar.didx > ``` > is now valid, being equal to > ``` > proxmox-backup-client restore > ``` > > Changes since version 4: > - Rebased onto current master > - Extended to newly introduced sync jobs in push direction > > Changes since version 3: > - Removed catchall fallback to blob type, reworked type parsing logic > - Removed archive name constants in favor of helper methods to generate > archive names for them > - Extended tests > > Changes since version 2: > - Rebased onto current master > - Amended commit messages > > Changes since version 1 (thanks @Gabriel): > - Rebased onto current master > - Added unit tests for archive name parsing > - Added missing check for invalid archive names ending with '/' > - Inlined variable names for format strings > - Import implemented traits at top > > Christian Ebner (5): > datastore: move `ArchiveType` to api types > api types: introduce `BackupArchiveName` type > client/server: use dedicated api type for all archive names > client: drop unused parse_archive_type helper > api types: add unit tests for backup archive name parsing > > pbs-api-types/src/datastore.rs | 238 ++++++++++++++++++++++++++- > pbs-client/src/backup_reader.rs | 18 +- > pbs-client/src/backup_writer.rs | 43 +++-- > pbs-client/src/pxar/tools.rs | 3 +- > pbs-client/src/tools/mod.rs | 28 ++-- > pbs-datastore/src/backup_info.rs | 22 +-- > pbs-datastore/src/datastore.rs | 7 +- > pbs-datastore/src/lib.rs | 3 - > pbs-datastore/src/manifest.rs | 55 +++---- > pbs-datastore/src/snapshot_reader.rs | 11 +- > proxmox-backup-client/src/catalog.rs | 35 ++-- > proxmox-backup-client/src/helper.rs | 7 +- > proxmox-backup-client/src/main.rs | 138 +++++++++------- > proxmox-backup-client/src/mount.rs | 33 ++-- > proxmox-file-restore/src/main.rs | 13 +- > src/api2/admin/datastore.rs | 70 ++++---- > src/api2/backup/mod.rs | 3 +- > src/api2/reader/mod.rs | 7 +- > src/api2/tape/restore.rs | 17 +- > src/backup/mod.rs | 3 - > src/backup/verify.rs | 7 +- > src/bin/proxmox_backup_debug/diff.rs | 16 +- > src/server/pull.rs | 24 +-- > src/server/push.rs | 31 ++-- > src/server/sync.rs | 11 +- > tests/prune.rs | 5 +- > 26 files changed, 539 insertions(+), 309 deletions(-) > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Fri Nov 22 14:14:08 2024 From: f.gruenbichler at proxmox.com (Fabian =?utf-8?q?Gr=C3=BCnbichler?=) Date: Fri, 22 Nov 2024 14:14:08 +0100 Subject: [pbs-devel] applied: [PATCH v2 proxmox-backup] server: push: add error context to api calls and priv checks In-Reply-To: <20241122122604.335901-1-c.ebner@proxmox.com> References: <20241122122604.335901-1-c.ebner@proxmox.com> Message-ID: <173228124842.2118190.6703201927866768772@yuna.proxmox.com> with cargo fmt + a missing `format!` in one context folded in Quoting Christian Ebner (2024-11-22 13:26:04) > Add an anyhow context to errors and display the full error context > in the log output. Further, make it clear which errors stem from api > calls by explicitly mentioning this in the context message. > > This also fixes incorrect error handling by placing the error context > on the api result instead of the serde deserialization error for > cases this was handled incorrectly. > > Signed-off-by: Christian Ebner > --- > changes since version 1: > - fix incorrect api result error handling > - use anyhow context > - show full error context in log output > > src/server/push.rs | 88 +++++++++++++++++++++++++++++----------------- > 1 file changed, 56 insertions(+), 32 deletions(-) > > diff --git a/src/server/push.rs b/src/server/push.rs > index 4c489531c..1914cad75 100644 > --- a/src/server/push.rs > +++ b/src/server/push.rs > @@ -3,7 +3,7 @@ > use std::collections::HashSet; > use std::sync::{Arc, Mutex}; > > -use anyhow::{bail, format_err, Error}; > +use anyhow::{bail, Context, Error}; > use futures::stream::{self, StreamExt, TryStreamExt}; > use tokio::sync::mpsc; > use tokio_stream::wrappers::ReceiverStream; > @@ -180,7 +180,12 @@ fn check_ns_remote_datastore_privs( > // Fetch the list of namespaces found on target > async fn fetch_target_namespaces(params: &PushParameters) -> Result, Error> { > let api_path = params.target.datastore_api_path("namespace"); > - let mut result = params.target.client.get(&api_path, None).await?; > + let mut result = params > + .target > + .client > + .get(&api_path, None) > + .await > + .context("Fetching remote namespaces failed, remote returned error")?; > let namespaces: Vec = serde_json::from_value(result["data"].take())?; > let mut namespaces: Vec = namespaces > .into_iter() > @@ -201,7 +206,7 @@ async fn remove_target_namespace( > } > > check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) > - .map_err(|err| format_err!("Pruning remote datastore namespaces not allowed - {err}"))?; > + .context("Pruning remote datastore namespaces not allowed")?; > > let api_path = params.target.datastore_api_path("namespace"); > > @@ -214,13 +219,17 @@ async fn remove_target_namespace( > args["error-on-protected"] = serde_json::to_value(false)?; > } > > - let mut result = params.target.client.delete(&api_path, Some(args)).await?; > + let mut result = params > + .target > + .client > + .delete(&api_path, Some(args)) > + .await > + .context(format!( > + "Failed to remove remote namespace {target_namespace}, remote returned error" > + ))?; > > if params.target.supports_prune_delete_stats { > - let data = result["data"].take(); > - serde_json::from_value(data).map_err(|err| { > - format_err!("removing target namespace {target_namespace} failed - {err}") > - }) > + Ok(serde_json::from_value(result["data"].take())?) > } else { > Ok(BackupGroupDeleteStats::default()) > } > @@ -235,7 +244,13 @@ async fn fetch_target_groups( > let api_path = params.target.datastore_api_path("groups"); > let args = Some(serde_json::json!({ "ns": target_namespace.name() })); > > - let mut result = params.target.client.get(&api_path, args).await?; > + let mut result = params > + .target > + .client > + .get(&api_path, args) > + .await > + .context("Failed to fetch remote groups, remote returned error")?; > + > let groups: Vec = serde_json::from_value(result["data"].take())?; > > let (mut owned, not_owned) = groups.into_iter().fold( > @@ -262,7 +277,7 @@ async fn remove_target_group( > backup_group: &BackupGroup, > ) -> Result { > check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) > - .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; > + .context("Pruning remote datastore contents not allowed")?; > > let api_path = params.target.datastore_api_path("groups"); > > @@ -273,12 +288,11 @@ async fn remove_target_group( > args["error-on-protected"] = serde_json::to_value(false)?; > } > > - let mut result = params.target.client.delete(&api_path, Some(args)).await?; > + let mut result = params.target.client.delete(&api_path, Some(args)).await > + .context(format!("Failed to remove remote group {backup_group}, remote returned error"))?; > > if params.target.supports_prune_delete_stats { > - let data = result["data"].take(); > - serde_json::from_value(data) > - .map_err(|err| format_err!("removing target group {backup_group} failed - {err}")) > + Ok(serde_json::from_value(result["data"].take())?) > } else { > Ok(BackupGroupDeleteStats::default()) > } > @@ -295,7 +309,7 @@ async fn check_or_create_target_namespace( > // Sub-namespaces have to be created by creating parent components first. > > check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_MODIFY) > - .map_err(|err| format_err!("Creating remote namespace not allowed - {err}"))?; > + .context("Creating remote namespace not allowed")?; > > let mut parent = BackupNamespace::root(); > for component in target_namespace.components() { > @@ -310,12 +324,12 @@ async fn check_or_create_target_namespace( > if !parent.is_root() { > args["parent"] = serde_json::to_value(parent.clone())?; > } > - match params.target.client.post(&api_path, Some(args)).await { > - Ok(_) => info!("Successfully created new namespace {current} on remote"), > - Err(err) => { > - bail!("Remote creation of namespace {current} failed, remote returned: {err}") > - } > - } > + > + params.target.client.post(&api_path, Some(args)).await > + .context("Creation of remote namespace {current} failed, remote returned error")?; > + > + info!("Successfully created new namespace {current} on remote"); > + > existing_target_namespaces.push(current.clone()); > parent = current; > } > @@ -377,7 +391,7 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result ) > .await > { > - warn!("Encountered error: {err}"); > + warn!("Encountered error: {err:#}"); > warn!("Failed to sync {source_store_and_ns} into {target_store_and_ns}!"); > errors = true; > continue; > @@ -404,7 +418,7 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result } > Err(err) => { > errors = true; > - info!("Encountered errors: {err}"); > + info!("Encountered errors: {err:#}"); > info!("Failed to sync {source_store_and_ns} into {target_store_and_ns}!"); > } > } > @@ -453,7 +467,7 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result } > } > Err(err) => { > - warn!("Encountered errors: {err}"); > + warn!("Encountered errors: {err:#}"); > warn!("Failed to remove vanished namespace {target_namespace} from remote!"); > continue; > } > @@ -483,7 +497,7 @@ pub(crate) async fn push_namespace( > let target_namespace = params.map_to_target(namespace)?; > // Check if user is allowed to perform backups on remote datastore > check_ns_remote_datastore_privs(params, &target_namespace, PRIV_REMOTE_DATASTORE_BACKUP) > - .map_err(|err| format_err!("Pushing to remote namespace not allowed - {err}"))?; > + .context("Pushing to remote namespace not allowed")?; > > let mut list: Vec = params > .source > @@ -529,7 +543,7 @@ pub(crate) async fn push_namespace( > match push_group(params, namespace, &group, &mut progress).await { > Ok(sync_stats) => stats.add(sync_stats), > Err(err) => { > - warn!("Encountered errors: {err}"); > + warn!("Encountered errors: {err:#}"); > warn!("Failed to push group {group} to remote!"); > errors = true; > } > @@ -562,7 +576,7 @@ pub(crate) async fn push_namespace( > })); > } > Err(err) => { > - warn!("Encountered errors: {err}"); > + warn!("Encountered errors: {err:#}"); > warn!("Failed to remove vanished group {target_group} from remote!"); > errors = true; > continue; > @@ -584,7 +598,12 @@ async fn fetch_target_snapshots( > if !target_namespace.is_root() { > args["ns"] = serde_json::to_value(target_namespace)?; > } > - let mut result = params.target.client.get(&api_path, Some(args)).await?; > + let mut result = params > + .target > + .client > + .get(&api_path, Some(args)) > + .await > + .context("Failed to fetch remote snapshots, remote returned error")?; > let snapshots: Vec = serde_json::from_value(result["data"].take())?; > > Ok(snapshots) > @@ -596,14 +615,19 @@ async fn forget_target_snapshot( > snapshot: &BackupDir, > ) -> Result<(), Error> { > check_ns_remote_datastore_privs(params, target_namespace, PRIV_REMOTE_DATASTORE_PRUNE) > - .map_err(|err| format_err!("Pruning remote datastore contents not allowed - {err}"))?; > + .context("Pruning remote datastore contents not allowed")?; > > let api_path = params.target.datastore_api_path("snapshots"); > let mut args = serde_json::to_value(snapshot)?; > if !target_namespace.is_root() { > args["ns"] = serde_json::to_value(target_namespace)?; > } > - params.target.client.delete(&api_path, Some(args)).await?; > + params > + .target > + .client > + .delete(&api_path, Some(args)) > + .await > + .context("Failed to remove remote snapshot, remote returned error")?; > > Ok(()) > } > @@ -709,7 +733,7 @@ pub(crate) async fn push_group( > ); > } > Err(err) => { > - warn!("Encountered errors: {err}"); > + warn!("Encountered errors: {err:#}"); > warn!( > "Failed to remove vanished snapshot {name} from remote!", > name = snapshot.backup > @@ -754,7 +778,7 @@ pub(crate) async fn push_snapshot( > Ok((manifest, _raw_size)) => manifest, > Err(err) => { > // No manifest in snapshot or failed to read, warn and skip > - log::warn!("Encountered errors: {err}"); > + log::warn!("Encountered errors: {err:#}"); > log::warn!("Failed to load manifest for '{snapshot}'!"); > return Ok(stats); > } > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From h.laimer at proxmox.com Fri Nov 22 15:46:49 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:49 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 02/25] maintenance: make is_offline more generic In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-3-h.laimer@proxmox.com> ... and add MaintenanceType::Delete to it. We also want to clear any cach entries if we are deleting the datastore, not just if it is marked as offline. Signed-off-by: Hannes Laimer --- * new in v14 pbs-api-types/src/maintenance.rs | 7 +++---- pbs-datastore/src/datastore.rs | 5 +++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 1e3413dca..a7b8b078d 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -77,10 +77,9 @@ pub struct MaintenanceMode { } impl MaintenanceMode { - /// Used for deciding whether the datastore is cleared from the internal cache after the last - /// task finishes, so all open files are closed. - pub fn is_offline(&self) -> bool { - self.ty == MaintenanceType::Offline + /// Used for deciding whether the datastore is cleared from the internal cache + pub fn clear_from_cache(&self) -> bool { + self.ty == MaintenanceType::Offline || self.ty == MaintenanceType::Delete } pub fn check(&self, operation: Option) -> Result<(), Error> { diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 2755fed8c..2bf2b8437 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -116,7 +116,8 @@ impl Drop for DataStore { && pbs_config::datastore::config() .and_then(|(s, _)| s.lookup::("datastore", self.name())) .map_or(false, |c| { - c.get_maintenance_mode().map_or(false, |m| m.is_offline()) + c.get_maintenance_mode() + .map_or(false, |m| m.clear_from_cache()) }); if remove_from_cache { @@ -216,7 +217,7 @@ impl DataStore { let datastore: DataStoreConfig = config.lookup("datastore", name)?; if datastore .get_maintenance_mode() - .map_or(false, |m| m.is_offline()) + .map_or(false, |m| m.clear_from_cache()) { // the datastore drop handler does the checking if tasks are running and clears the // cache entry, so we just have to trigger it here -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:50 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:50 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 03/26] maintenance: add 'Unmount' maintenance type In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-4-h.laimer@proxmox.com> From: Dietmar Maurer Signed-off-by: Dietmar Maurer --- pbs-api-types/src/datastore.rs | 3 +++ pbs-api-types/src/maintenance.rs | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index b722c9ab7..ba75ebaba 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -401,6 +401,9 @@ impl DataStoreConfig { match current_type { Some(MaintenanceType::ReadOnly) => { /* always OK */ } Some(MaintenanceType::Offline) => { /* always OK */ } + Some(MaintenanceType::Unmount) => { + bail!("datastore is being unmounted"); + } Some(MaintenanceType::Delete) => { match new_type { Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ } diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index a7b8b078d..3c9aa8190 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -38,7 +38,6 @@ pub enum Operation { /// Maintenance type. pub enum MaintenanceType { // TODO: - // - Add "unmounting" once we got pluggable datastores // - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate // operation, so that one can enable a mode where nothing new can be added but stuff can be // cleaned @@ -48,6 +47,8 @@ pub enum MaintenanceType { Offline, /// The datastore is being deleted. Delete, + /// The (removable) datastore is being unmounted. + Unmount, } serde_plain::derive_display_from_serialize!(MaintenanceType); serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); @@ -79,7 +80,9 @@ pub struct MaintenanceMode { impl MaintenanceMode { /// Used for deciding whether the datastore is cleared from the internal cache pub fn clear_from_cache(&self) -> bool { - self.ty == MaintenanceType::Offline || self.ty == MaintenanceType::Delete + self.ty == MaintenanceType::Offline + || self.ty == MaintenanceType::Delete + || self.ty == MaintenanceType::Unmount } pub fn check(&self, operation: Option) -> Result<(), Error> { @@ -93,6 +96,8 @@ impl MaintenanceMode { if let Some(Operation::Lookup) = operation { return Ok(()); + } else if self.ty == MaintenanceType::Unmount { + bail!("datastore is being unmounted"); } else if self.ty == MaintenanceType::Offline { bail!("offline maintenance mode: {}", message); } else if self.ty == MaintenanceType::ReadOnly { -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:54 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:54 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 07/25] api: add check for nested datastores on creation In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-8-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- * new in v14, and not removable datastore specific src/api2/config/datastore.rs | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 420f8ddd0..75e1a1a56 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -81,6 +81,20 @@ pub(crate) fn do_create_datastore( bail!("cannot create datastore in root path"); } + for store in config.convert_to_typed_array::("datastore")? { + if store.backing_device != datastore.backing_device { + continue; + } + if store.path.starts_with(&datastore.path) || datastore.path.starts_with(&store.path) { + param_bail!( + "path", + "nested datastores not allowed: '{}' already in '{}'", + store.name, + store.path + ); + } + } + let need_unmount = datastore.backing_device.is_some(); if need_unmount { do_mount_device(datastore.clone())?; -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:56 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:56 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 09/26] bin: manager: add (un)mount command In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-10-h.laimer@proxmox.com> We can't just directly delegate these commands to the API endpoints since both mounting and unmounting are done in a worker, and that one would be killed when the parent ends. In this case that would be the CLI process, which basically ends right after spwaning the worker. Signed-off-by: Hannes Laimer --- pbs-config/src/datastore.rs | 14 ++++ src/bin/proxmox_backup_manager/datastore.rs | 74 +++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/pbs-config/src/datastore.rs b/pbs-config/src/datastore.rs index dc5bb3da9..396dcb371 100644 --- a/pbs-config/src/datastore.rs +++ b/pbs-config/src/datastore.rs @@ -62,6 +62,20 @@ pub fn complete_datastore_name(_arg: &str, _param: &HashMap) -> } } +pub fn complete_removable_datastore_name( + _arg: &str, + _param: &HashMap, +) -> Vec { + match config() { + Ok((data, _digest)) => data + .sections + .into_iter() + .filter_map(|(name, (_, c))| c.get("backing-device").map(|_| name)) + .collect(), + Err(_) => Vec::new(), + } +} + pub fn complete_acl_path(_arg: &str, _param: &HashMap) -> Vec { let mut list = vec![ String::from("/"), diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index 3a349451f..32a55fb9c 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -42,6 +42,34 @@ fn list_datastores(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result Result<(), Error> { + param["node"] = "localhost".into(); + + let info = &api2::admin::datastore::API_METHOD_MOUNT; + let result = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + Ok(()) +} + #[api( input: { properties: { @@ -101,6 +129,34 @@ async fn create_datastore(mut param: Value) -> Result { Ok(Value::Null) } +#[api( + protected: true, + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, +)] +/// Unmount a removable datastore. +async fn unmount_datastore(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + param["node"] = "localhost".into(); + + let info = &api2::admin::datastore::API_METHOD_UNMOUNT; + let result = match info.handler { + ApiHandler::Async(handler) => (handler)(param, info, rpcenv).await?, + _ => unreachable!(), + }; + + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + Ok(()) +} + #[api( protected: true, input: { @@ -191,6 +247,15 @@ async fn update_datastore(name: String, mut param: Value) -> Result<(), Error> { pub fn datastore_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) + .insert( + "mount", + CliCommand::new(&API_METHOD_MOUNT_DATASTORE) + .arg_param(&["store"]) + .completion_cb( + "store", + pbs_config::datastore::complete_removable_datastore_name, + ), + ) .insert( "show", CliCommand::new(&API_METHOD_SHOW_DATASTORE) @@ -201,6 +266,15 @@ pub fn datastore_commands() -> CommandLineInterface { "create", CliCommand::new(&API_METHOD_CREATE_DATASTORE).arg_param(&["name", "path"]), ) + .insert( + "unmount", + CliCommand::new(&API_METHOD_UNMOUNT_DATASTORE) + .arg_param(&["store"]) + .completion_cb( + "store", + pbs_config::datastore::complete_removable_datastore_name, + ), + ) .insert( "update", CliCommand::new(&API_METHOD_UPDATE_DATASTORE) -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:48 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:48 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 01/25] pbs-api-types: add backing-device to DataStoreConfig In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-2-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- changes since v13: * drop get_mount_point * update DATASTORE_DIR_NAME_SCHAME description pbs-api-types/src/datastore.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 711051d05..b722c9ab7 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -45,7 +45,7 @@ const_regex! { pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); -pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") +pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or an absolute on-device path for removable datastores.") .min_length(1) .max_length(4096) .schema(); @@ -163,6 +163,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .minimum(1) .schema(); +/// Base directory where datastores are mounted +pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; + #[api] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -237,7 +240,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore schema: DATASTORE_SCHEMA, }, path: { - schema: DIR_NAME_SCHEMA, + schema: DATASTORE_DIR_NAME_SCHEMA, }, "notify-user": { optional: true, @@ -276,6 +279,12 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), type: String, }, + "backing-device": { + description: "The UUID of the filesystem partition for removable datastores.", + optional: true, + format: &proxmox_schema::api_types::UUID_FORMAT, + type: String, + } } )] #[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] @@ -323,6 +332,11 @@ pub struct DataStoreConfig { /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " #[serde(skip_serializing_if = "Option::is_none")] pub maintenance_mode: Option, + + /// The UUID of the device(for removable datastores) + #[updater(skip)] + #[serde(skip_serializing_if = "Option::is_none")] + pub backing_device: Option, } #[api] @@ -357,12 +371,17 @@ impl DataStoreConfig { notification_mode: None, tuning: None, maintenance_mode: None, + backing_device: None, } } /// Returns the absolute path to the datastore content. pub fn absolute_path(&self) -> String { - self.path.clone() + if self.backing_device.is_some() { + format!("{DATASTORE_MOUNT_DIR}/{}", self.name) + } else { + self.path.clone() + } } pub fn get_maintenance_mode(&self) -> Option { -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:58 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:58 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 11/25] datastore: handle deletion of removable datastore properly In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-12-h.laimer@proxmox.com> Data deletion is only possible if the datastore is mounted, won't attempt mounting it for the purpose of deleting data. Signed-off-by: Hannes Laimer --- changes since v13: * log warn! on errors in cleanup * also unmount without destroy_data pbs-datastore/src/datastore.rs | 4 +++- src/api2/config/datastore.rs | 39 +++++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 2 deletions(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 6a9fc2dc0..adf29f183 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1535,7 +1535,9 @@ impl DataStore { // weird, but ok } Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => { - warn!("Cannot delete datastore directory (is it a mount point?).") + if datastore_config.backing_device.is_none() { + warn!("Cannot delete datastore directory (is it a mount point?).") + } } Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => { warn!("Datastore directory not empty, not deleting.") diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 75e1a1a56..5c2fd2573 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -1,4 +1,4 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use ::serde::{Deserialize, Serialize}; use anyhow::{bail, format_err, Error}; @@ -30,6 +30,7 @@ use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_bac use crate::api2::config::verify::delete_verification_job; use pbs_config::CachedUserInfo; +use pbs_datastore::get_datastore_mount_status; use proxmox_rest_server::WorkerTask; use crate::server::jobstate; @@ -561,6 +562,15 @@ pub async fn delete_datastore( http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name); } + let store_config: DataStoreConfig = config.lookup("datastore", &name)?; + + if destroy_data && get_datastore_mount_status(&store_config) == Some(false) { + http_bail!( + BAD_REQUEST, + "cannot destroy data on '{name}' unless the datastore is mounted" + ); + } + if !keep_job_configs { for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { delete_verification_job(job.config.id, None, rpcenv)? @@ -591,6 +601,18 @@ pub async fn delete_datastore( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) + { + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( + sock, + &format!( + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", + name.clone() + ), + ) + .await; + }; let upid = WorkerTask::new_thread( "delete-datastore", @@ -610,6 +632,21 @@ pub async fn delete_datastore( warn!("failed to notify after datastore removal: {err}"); } + // cleanup for removable datastores + // - unmount + // - remove mount dir, if destroy_data + if store_config.backing_device.is_some() { + let mount_point = store_config.absolute_path(); + if get_datastore_mount_status(&store_config) == Some(true) { + let _ = unmount_by_mountpoint(Path::new(&mount_point)) + .inspect_err(|e| warn!("could not unmount device after deletion: {e}")); + } + if destroy_data { + let _ = std::fs::remove_dir(&mount_point) + .inspect_err(|e| warn!("could not remove directory after deletion: {e}")); + } + } + Ok(()) }, )?; -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:06 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:06 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 19/26] ui: maintenance: fix disable msg field if no type is selected In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-20-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/window/MaintenanceOptions.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js index 1ee92542e..527c36987 100644 --- a/www/window/MaintenanceOptions.js +++ b/www/window/MaintenanceOptions.js @@ -56,12 +56,17 @@ Ext.define('PBS.window.MaintenanceOptions', { fieldLabel: gettext('Maintenance Type'), value: '__default__', deleteEmpty: true, + listeners: { + change: (field, newValue) => { + Ext.getCmp('message-field').setDisabled(newValue === '__default__'); + }, + }, }, { xtype: 'proxmoxtextfield', + id: 'message-field', name: 'maintenance-msg', fieldLabel: gettext('Description'), - // FIXME: disable if maintenance type is none }, ], }, -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:03 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:03 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 16/26] ui: tree: render unmounted datastores correctly In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-17-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/NavigationTree.js | 17 +++++++++++++---- www/css/ext6-pbs.css | 8 ++++++++ www/datastore/DataStoreListSummary.js | 1 + 3 files changed, 22 insertions(+), 4 deletions(-) diff --git a/www/NavigationTree.js b/www/NavigationTree.js index 53c8daff9..dd03fbd62 100644 --- a/www/NavigationTree.js +++ b/www/NavigationTree.js @@ -266,14 +266,23 @@ Ext.define('PBS.view.main.NavigationTree', { while (name.localeCompare(getChildTextAt(j)) > 0 && (j+1) < list.childNodes.length) { j++; } - - let [qtip, iconCls] = ['', 'fa fa-database']; + let mainIcon = `fa fa-${records[i].data.mount-status !== 'nonremovable' ? 'plug' : 'database'}`; + let [qtip, iconCls] = ['', mainIcon]; const maintenance = records[i].data.maintenance; + + const removable_not_mounted = records[i].data['mount-status'] === 'notmounted'; + if (removable_not_mounted) { + iconCls = `${mainIcon} pmx-tree-icon-custom unplugged`; + qtip = gettext('Removable datastore not mounted'); + } if (maintenance) { const [type, message] = PBS.Utils.parseMaintenanceMode(maintenance); qtip = `${type}${message ? ': ' + message : ''}`; - let maintenanceTypeCls = type === 'delete' ? 'destroying' : 'maintenance'; - iconCls = `fa fa-database pmx-tree-icon-custom ${maintenanceTypeCls}`; + let maintenanceTypeCls = 'maintenance'; + if (type === 'delete') { + maintenanceTypeCls = 'destroying'; + } + iconCls = `${mainIcon} pmx-tree-icon-custom ${maintenanceTypeCls}`; } if (getChildTextAt(j).localeCompare(name) !== 0) { diff --git a/www/css/ext6-pbs.css b/www/css/ext6-pbs.css index c33ce6845..706e681e9 100644 --- a/www/css/ext6-pbs.css +++ b/www/css/ext6-pbs.css @@ -271,6 +271,10 @@ span.snapshot-comment-column { content: "\ "; } +.x-treelist-item-icon.fa-plug, .pmx-tree-icon-custom.fa-plug { + font-size: 12px; +} + /* datastore maintenance */ .pmx-tree-icon-custom.maintenance:after { content: "\f0ad"; @@ -290,6 +294,10 @@ span.snapshot-comment-column { color: #888; } +.pmx-tree-icon-custom.unplugged:before { + color: #888; +} + /*' PBS specific icons */ .pbs-icon-tape { diff --git a/www/datastore/DataStoreListSummary.js b/www/datastore/DataStoreListSummary.js index b908034d8..f7ea83e7b 100644 --- a/www/datastore/DataStoreListSummary.js +++ b/www/datastore/DataStoreListSummary.js @@ -22,6 +22,7 @@ Ext.define('PBS.datastore.DataStoreListSummary', { stillbad: 0, deduplication: 1.0, error: "", + removable: false, maintenance: '', }, }, -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:07 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:07 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 20/26] ui: render 'unmount' maintenance mode correctly In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-21-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Utils.js | 4 +++- www/window/MaintenanceOptions.js | 10 ++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/www/Utils.js b/www/Utils.js index 7756e9b5d..6bae9b709 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -775,7 +775,7 @@ Ext.define('PBS.Utils', { let extra = ''; if (activeTasks !== undefined) { - const conflictingTasks = activeTasks.write + (type === 'offline' ? activeTasks.read : 0); + const conflictingTasks = activeTasks.write + (type === 'offline' || type === 'unmount' ? activeTasks.read : 0); if (conflictingTasks > 0) { extra += '| '; @@ -795,6 +795,8 @@ Ext.define('PBS.Utils', { break; case 'offline': modeText = gettext("Offline"); break; + case 'unmount': modeText = gettext("Unmounting"); + break; } return `${modeText} ${extra}`; }, diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js index 527c36987..d7348cb4f 100644 --- a/www/window/MaintenanceOptions.js +++ b/www/window/MaintenanceOptions.js @@ -52,6 +52,7 @@ Ext.define('PBS.window.MaintenanceOptions', { items: [ { xtype: 'pbsMaintenanceType', + id: 'type-field', name: 'maintenance-type', fieldLabel: gettext('Maintenance Type'), value: '__default__', @@ -85,6 +86,15 @@ Ext.define('PBS.window.MaintenanceOptions', { }; } + let unmounting = options['maintenance-type'] === 'unmount'; + let defaultType = options['maintenance-type'] === '__default__'; + if (unmounting) { + options['maintenance-type'] = ''; + } + me.callParent([options]); + + Ext.ComponentManager.get('type-field').setDisabled(unmounting); + Ext.ComponentManager.get('message-field').setDisabled(unmounting || defaultType); }, }); -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:57 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:57 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 10/25] add auto-mounting for removable datastores In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-11-h.laimer@proxmox.com> If a device houses multiple datastore, none of them will be mounted automatically. If a device only contains a single datastore it will be mounted automatically. The reason for not mounting multiple datastore automatically is that we don't know which is actually wanted, and since mounting all means also all have to be unmounted manually, it made sense to have the user choose which to mount. Signed-off-by: Hannes Laimer --- changes since v13: * skip API alltogether and use mounting wrapper directly * load datastore config directly debian/proxmox-backup-server.install | 1 + debian/proxmox-backup-server.udev | 3 ++ etc/Makefile | 1 + etc/removable-device-attach at .service | 8 ++++ src/bin/proxmox_backup_manager/datastore.rs | 53 ++++++++++++++++++++- 5 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 etc/removable-device-attach at .service diff --git a/debian/proxmox-backup-server.install b/debian/proxmox-backup-server.install index 79757eadb..ff581e3dd 100644 --- a/debian/proxmox-backup-server.install +++ b/debian/proxmox-backup-server.install @@ -4,6 +4,7 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/ etc/proxmox-backup-daily-update.timer /lib/systemd/system/ etc/proxmox-backup-proxy.service /lib/systemd/system/ etc/proxmox-backup.service /lib/systemd/system/ +etc/removable-device-attach at .service /lib/systemd/system/ usr/bin/pmt usr/bin/pmtx usr/bin/proxmox-tape diff --git a/debian/proxmox-backup-server.udev b/debian/proxmox-backup-server.udev index afdfb2bc7..e21b8bc71 100644 --- a/debian/proxmox-backup-server.udev +++ b/debian/proxmox-backup-server.udev @@ -16,3 +16,6 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg" LABEL="persistent_storage_tape_end" + +# triggers the mounting of a removable device +ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}" \ No newline at end of file diff --git a/etc/Makefile b/etc/Makefile index 42f639f62..26e91684e 100644 --- a/etc/Makefile +++ b/etc/Makefile @@ -2,6 +2,7 @@ include ../defines.mk UNITS := \ proxmox-backup-daily-update.timer \ + removable-device-attach at .service DYNAMIC_UNITS := \ proxmox-backup-banner.service \ diff --git a/etc/removable-device-attach at .service b/etc/removable-device-attach at .service new file mode 100644 index 000000000..e10d1ea3c --- /dev/null +++ b/etc/removable-device-attach at .service @@ -0,0 +1,8 @@ +[Unit] +Description=Try to mount the removable device of a datastore with uuid '%i'. +After=proxmox-backup-proxy.service +Requires=proxmox-backup-proxy.service + +[Service] +Type=simple +ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index 32a55fb9c..bcfdae786 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -9,7 +9,7 @@ use proxmox_backup::api2; use proxmox_backup::api2::config::datastore::DeletableProperty; use proxmox_backup::client_helpers::connect_to_localhost; -use anyhow::Error; +use anyhow::{format_err, Error}; use serde_json::Value; #[api( @@ -244,6 +244,53 @@ async fn update_datastore(name: String, mut param: Value) -> Result<(), Error> { Ok(()) } +#[api( + protected: true, + input: { + properties: { + uuid: { + type: String, + description: "The UUID of the device that should be mounted", + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + }, + }, +)] +/// Try mounting a removable datastore given the UUID. +async fn uuid_mount(param: Value, _rpcenv: &mut dyn RpcEnvironment) -> Result { + let uuid = param["uuid"] + .as_str() + .ok_or_else(|| format_err!("uuid has to be specified"))?; + + let (config, _digest) = pbs_config::datastore::config()?; + let list: Vec = config.convert_to_typed_array("datastore")?; + let matching_stores: Vec = list + .into_iter() + .filter(|store: &DataStoreConfig| { + store + .backing_device + .clone() + .map_or(false, |device| device.eq(&uuid)) + }) + .collect(); + + if matching_stores.len() != 1 { + return Ok(Value::Null); + } + + if let Some(store) = matching_stores.get(0) { + api2::admin::datastore::do_mount_device(store.clone())?; + return Ok(Value::Null); + } + + // we don't want to fail for UUIDs that are not associated with datastores, as that produces + // quite some noise in the logs, given this is check for every device that is plugged in. + Ok(Value::Null) +} + pub fn datastore_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) @@ -289,6 +336,10 @@ pub fn datastore_commands() -> CommandLineInterface { pbs_config::datastore::complete_calendar_event, ), ) + .insert( + "uuid-mount", + CliCommand::new(&API_METHOD_UUID_MOUNT).arg_param(&["uuid"]), + ) .insert( "remove", CliCommand::new(&API_METHOD_DELETE_DATASTORE) -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:09 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:09 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 22/25] api: node: include removable datastores in directory list In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-23-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- changes since v13: * drop check with get_mount_point src/api2/node/disks/directory.rs | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index b6006b47c..11d07af42 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -45,6 +45,8 @@ pub struct DatastoreMountInfo { pub path: String, /// The mounted device. pub device: String, + /// This is removable + pub removable: bool, /// File system type pub filesystem: Option, /// Mount options @@ -61,7 +63,7 @@ pub struct DatastoreMountInfo { } }, returns: { - description: "List of systemd datastore mount units.", + description: "List of removable-datastore devices and systemd datastore mount units.", type: Array, items: { type: DatastoreMountInfo, @@ -100,6 +102,28 @@ pub fn list_datastore_mounts() -> Result, Error> { path: data.Where, filesystem: data.Type, options: data.Options, + removable: false, + }); + } + + let (config, _digest) = pbs_config::datastore::config()?; + let store_list: Vec = config.convert_to_typed_array("datastore")?; + + for item in store_list + .into_iter() + .filter(|store| store.backing_device.is_some()) + { + let Some(backing_device) = item.backing_device.as_deref() else { + continue; + }; + list.push(DatastoreMountInfo { + unitfile: "datastore config".to_string(), + name: item.name.clone(), + device: format!("/dev/disk/by-uuid/{backing_device}"), + path: item.absolute_path(), + filesystem: None, + options: None, + removable: true, }); } -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:11 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:11 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 24/26] ui: support create removable datastore through directory creation In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-25-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 2 ++ www/DirectoryList.js | 13 +++++++++++++ www/window/CreateDirectory.js | 14 ++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index ff817b253..2f7cc7a27 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -150,6 +150,8 @@ pub fn list_datastore_mounts() -> Result, Error> { "removable-datastore": { description: "The added datastore is removable.", type: bool, + optional: true, + default: false, }, filesystem: { type: FileSystemType, diff --git a/www/DirectoryList.js b/www/DirectoryList.js index adefa9abf..25921a623 100644 --- a/www/DirectoryList.js +++ b/www/DirectoryList.js @@ -121,6 +121,19 @@ Ext.define('PBS.admin.Directorylist', { ], columns: [ + { + text: '', + flex: 0, + width: 35, + dataIndex: 'removable', + renderer: function(_text, _, row) { + if (row.data.removable) { + return ``; + } else { + return ''; + } + }, + }, { text: gettext('Path'), dataIndex: 'path', diff --git a/www/window/CreateDirectory.js b/www/window/CreateDirectory.js index 6aabe21ab..38d6979d9 100644 --- a/www/window/CreateDirectory.js +++ b/www/window/CreateDirectory.js @@ -43,6 +43,20 @@ Ext.define('PBS.window.CreateDirectory', { name: 'add-datastore', fieldLabel: gettext('Add as Datastore'), value: '1', + listeners: { + change(field, newValue, _oldValue) { + let form = field.up('form'); + let rmBox = form.down('[name=removable-datastore]'); + + rmBox.setDisabled(!newValue); + rmBox.setValue(false); + }, + }, + }, + { + xtype: 'proxmoxcheckbox', + name: 'removable-datastore', + fieldLabel: gettext('is removable'), }, ], }); -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:08 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:08 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 21/25] api: node: allow creation of removable datastore through directory endpoint In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-22-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- changes since v13: * use one worker with few conditionals instead of two src/api2/node/disks/directory.rs | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 7f5402207..b6006b47c 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -123,6 +123,11 @@ pub fn list_datastore_mounts() -> Result, Error> { description: "Configure a datastore using the directory.", type: bool, optional: true, + default: false, + }, + "removable-datastore": { + description: "The added datastore is removable.", + type: bool, }, filesystem: { type: FileSystemType, @@ -141,7 +146,8 @@ pub fn list_datastore_mounts() -> Result, Error> { pub fn create_datastore_disk( name: String, disk: String, - add_datastore: Option, + add_datastore: bool, + removable_datastore: bool, filesystem: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { @@ -156,7 +162,6 @@ pub fn create_datastore_disk( } let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); - // check if the default path exists already. // bail if it is not empty or another filesystem mounted on top let default_path = std::path::PathBuf::from(&mount_point); @@ -183,7 +188,6 @@ pub fn create_datastore_disk( move |_worker| { info!("create datastore '{name}' on disk {disk}"); - let add_datastore = add_datastore.unwrap_or(false); let filesystem = filesystem.unwrap_or(FileSystemType::Ext4); let manager = DiskManage::new(); @@ -196,18 +200,24 @@ pub fn create_datastore_disk( let uuid = get_fs_uuid(&partition)?; let uuid_path = format!("/dev/disk/by-uuid/{}", uuid); - let mount_unit_name = - create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?; + if !removable_datastore { + let mount_unit_name = + create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?; - crate::tools::systemd::reload_daemon()?; - crate::tools::systemd::enable_unit(&mount_unit_name)?; - crate::tools::systemd::start_unit(&mount_unit_name)?; + crate::tools::systemd::reload_daemon()?; + crate::tools::systemd::enable_unit(&mount_unit_name)?; + crate::tools::systemd::start_unit(&mount_unit_name)?; + } if add_datastore { let lock = pbs_config::datastore::lock_config()?; - let datastore: DataStoreConfig = - serde_json::from_value(json!({ "name": name, "path": mount_point }))?; - + let datastore: DataStoreConfig = if removable_datastore { + serde_json::from_value( + json!({ "name": name, "path": format!("/{name}"), "backing-device": uuid }), + )? + } else { + serde_json::from_value(json!({ "name": name, "path": mount_point }))? + }; let (config, _digest) = pbs_config::datastore::config()?; if config.sections.contains_key(&datastore.name) { -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:12 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:12 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 25/26] bin: debug: add inspect device command In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-26-h.laimer@proxmox.com> ... to get information about (removable) datastores a device contains Signed-off-by: Hannes Laimer --- src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++++++++++++++++ 1 file changed, 149 insertions(+) diff --git a/src/bin/proxmox_backup_debug/inspect.rs b/src/bin/proxmox_backup_debug/inspect.rs index 28a472b0f..17df09be2 100644 --- a/src/bin/proxmox_backup_debug/inspect.rs +++ b/src/bin/proxmox_backup_debug/inspect.rs @@ -331,6 +331,151 @@ fn inspect_file( Ok(()) } +/// Return the count of VM, CT and host backup groups and the count of namespaces +/// as this tuple (vm, ct, host, ns) +fn get_basic_ds_info(path: String) -> Result<(i64, i64, i64, i64), Error> { + let mut vms = 0; + let mut cts = 0; + let mut hosts = 0; + let mut ns = 0; + let mut walker = WalkDir::new(path).into_iter(); + + while let Some(entry_result) = walker.next() { + let entry = entry_result?; + if !entry.file_type().is_dir() { + continue; + } + + let Some(name) = entry.path().file_name().and_then(|a| a.to_str()) else { + continue; + }; + + if name == ".chunks" { + walker.skip_current_dir(); + continue; + } + + let dir_count = std::fs::read_dir(entry.path())? + .filter_map(Result::ok) + .filter(|entry| entry.path().is_dir()) + .count() as i64; + + match name { + "ns" => ns += dir_count, + "vm" => { + vms += dir_count; + walker.skip_current_dir(); + } + "ct" => { + cts += dir_count; + walker.skip_current_dir(); + } + "host" => { + hosts += dir_count; + walker.skip_current_dir(); + } + _ => { + // root or ns dir + } + } + } + + Ok((vms, cts, hosts, ns)) +} + +#[api( + input: { + properties: { + device: { + description: "Device path, usually /dev/...", + type: String, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Inspect a device for possible datastores on it +fn inspect_device(device: String, param: Value) -> Result<(), Error> { + let output_format = get_output_format(¶m); + let tmp_mount_path = format!( + "{}/{:x}", + pbs_buildcfg::rundir!("/mount"), + proxmox_uuid::Uuid::generate() + ); + + let default_options = proxmox_sys::fs::CreateOptions::new(); + proxmox_sys::fs::create_path( + &tmp_mount_path, + Some(default_options.clone()), + Some(default_options.clone()), + )?; + let mut mount_cmd = std::process::Command::new("mount"); + mount_cmd.arg(device.clone()); + mount_cmd.arg(tmp_mount_path.clone()); + proxmox_sys::command::run_command(mount_cmd, None)?; + + let mut walker = WalkDir::new(tmp_mount_path.clone()).into_iter(); + + let mut stores = Vec::new(); + + let mut ds_count = 0; + while let Some(entry_result) = walker.next() { + let entry = entry_result?; + + if entry.file_type().is_dir() + && entry + .file_name() + .to_str() + .map_or(false, |name| name == ".chunks") + { + let store_path = entry + .path() + .to_str() + .and_then(|n| n.strip_suffix("/.chunks")); + + if let Some(store_path) = store_path { + ds_count += 1; + let (vm, ct, host, ns) = get_basic_ds_info(store_path.to_string())?; + stores.push(json!({ + "path": store_path.strip_prefix(&tmp_mount_path).unwrap_or("???"), + "vm-count": vm, + "ct-count": ct, + "host-count": host, + "ns-count": ns, + })); + }; + + walker.skip_current_dir(); + } + } + + let mut umount_cmd = std::process::Command::new("umount"); + umount_cmd.arg(tmp_mount_path.clone()); + proxmox_sys::command::run_command(umount_cmd, None)?; + std::fs::remove_dir(std::path::Path::new(&tmp_mount_path))?; + + if output_format == "text" { + println!("Device containes {} stores", ds_count); + println!("---------------"); + for s in stores { + println!( + "Datastore at {} | VM: {}, CT: {}, HOST: {}, NS: {}", + s["path"], s["vm-count"], s["ct-count"], s["host-count"], s["ns-count"] + ); + } + } else { + format_and_print_result( + &json!({"store_count": stores.len(), "stores": stores}), + &output_format, + ); + } + + Ok(()) +} + pub fn inspect_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert( @@ -340,6 +485,10 @@ pub fn inspect_commands() -> CommandLineInterface { .insert( "file", CliCommand::new(&API_METHOD_INSPECT_FILE).arg_param(&["file"]), + ) + .insert( + "device", + CliCommand::new(&API_METHOD_INSPECT_DEVICE).arg_param(&["device"]), ); cmd_def.into() -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:52 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:52 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 05/25] api: admin: add (un)mount endpoint for removable datastores In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-6-h.laimer@proxmox.com> Removable datastores can be mounted unless - they are already - their device is not present For unmounting the maintenance mode is set to `unmount`, which prohibits the starting of any new tasks envolving any IO, this mode is unset either - on completion of the unmount - on abort of the unmount tasks If the unmounting itself should fail, the maintenance mode stays in place and requires manual intervention by unsetting it in the config file directly. This is intentional, as unmounting should not fail, and if it should the situation should be looked at. Signed-off-by: Hannes Laimer --- changes since v13: * improve logging * fix racy unmount * (manually) changing maintenance during unmount will prevent unmounting and result in failed unmount task src/api2/admin/datastore.rs | 294 ++++++++++++++++++++++++++++++++++-- 1 file changed, 283 insertions(+), 11 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 3b863c06b..85522345e 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -4,7 +4,7 @@ use std::collections::HashSet; use std::ffi::OsStr; use std::ops::Deref; use std::os::unix::ffi::OsStrExt; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{bail, format_err, Error}; @@ -14,7 +14,7 @@ use hyper::{header, Body, Response, StatusCode}; use serde::Deserialize; use serde_json::{json, Value}; use tokio_stream::wrappers::ReceiverStream; -use tracing::{info, warn}; +use tracing::{debug, info, warn}; use proxmox_async::blocking::WrappedReaderStream; use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; @@ -30,6 +30,7 @@ use proxmox_sys::fs::{ file_read_firstline, file_read_optional_string, replace_file, CreateOptions, }; use proxmox_time::CalendarEvent; +use proxmox_worker_task::WorkerTaskContext; use pxar::accessor::aio::Accessor; use pxar::EntryKind; @@ -38,13 +39,13 @@ use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, - JobScheduleStatus, KeepOptions, Operation, PruneJobOptions, SnapshotListItem, - SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, - IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + JobScheduleStatus, KeepOptions, MaintenanceMode, MaintenanceType, Operation, PruneJobOptions, + SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, + CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, + MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -59,8 +60,8 @@ use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::BackupManifest; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{ - check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, - StoreProgress, + check_backup_owner, ensure_datastore_is_mounted, task_tracking, BackupDir, BackupGroup, + DataStore, LocalChunkReader, StoreProgress, }; use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; @@ -2394,6 +2395,275 @@ pub async fn set_backup_owner( .await? } +/// Here we +/// +/// 1. mount the removable device to `/mount/` +/// 2. bind mount `/mount//` to `/mnt/datastore/` +/// 3. unmount `/mount/` +/// +/// leaving us with the datastore being mounted directly with its name under /mnt/datastore/... +/// +/// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to +/// the same path, this is *very* unlikely since the device is only mounted really shortly, but +/// technically possible. +pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> { + if let Some(uuid) = datastore.backing_device.as_ref() { + let mount_point = datastore.absolute_path(); + if pbs_datastore::get_datastore_mount_status(&datastore) == Some(true) { + bail!("device is already mounted at '{}'", mount_point); + } + let tmp_mount_path = format!( + "{}/{:x}", + pbs_buildcfg::rundir!("/mount"), + proxmox_uuid::Uuid::generate() + ); + + let default_options = proxmox_sys::fs::CreateOptions::new(); + proxmox_sys::fs::create_path( + &tmp_mount_path, + Some(default_options.clone()), + Some(default_options.clone()), + )?; + + info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path); + crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path)) + .map_err(|e| format_err!("mounting to tmp path failed: {e}"))?; + + let full_store_path = format!( + "{tmp_mount_path}/{}", + datastore.path.trim_start_matches('/') + ); + let backup_user = pbs_config::backup_user()?; + let options = CreateOptions::new() + .owner(backup_user.uid) + .group(backup_user.gid); + + proxmox_sys::fs::create_path( + &mount_point, + Some(default_options.clone()), + Some(options.clone()), + ) + .map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?; + + // can't be created before it is mounted, so we have to do it here + proxmox_sys::fs::create_path( + &full_store_path, + Some(default_options.clone()), + Some(options.clone()), + ) + .map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?; + + info!( + "bind mount '{}'({}) to '{}'", + datastore.name, datastore.path, mount_point + ); + if let Err(err) = + crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point)) + { + debug!("unmounting '{}'", tmp_mount_path); + let _ = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)) + .inspect_err(|e| warn!("unmounting from tmp path '{tmp_mount_path} failed: {e}'")); + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)) + .inspect_err(|e| warn!("removing tmp path '{tmp_mount_path} failed: {e}'")); + return Err(format_err!( + "Datastore '{}' cound not be mounted: {}.", + datastore.name, + err + )); + } + + debug!("unmounting '{}'", tmp_mount_path); + let _ = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)) + .map_err(|e| format_err!("unmounting from tmp path '{tmp_mount_path} failed: {e}'")); + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)) + .map_err(|e| format_err!("removing tmp path '{tmp_mount_path} failed: {e}'")); + + Ok(()) + } else { + Err(format_err!( + "Datastore '{}' cannot be mounted because it is not removable.", + datastore.name + )) + } +} + +#[api( + protected: true, + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + } + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), + }, +)] +/// Mount removable datastore. +pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let (section_config, _digest) = pbs_config::datastore::config()?; + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + let upid = WorkerTask::new_thread( + "mount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |_worker| do_mount_device(datastore), + )?; + + Ok(json!(upid)) +} + +fn expect_maintanance_unmounting( + store: &str, +) -> Result<(pbs_config::BackupLockGuard, DataStoreConfig), Error> { + let lock = pbs_config::datastore::lock_config()?; + let (section_config, _digest) = pbs_config::datastore::config()?; + let store_config: DataStoreConfig = section_config.lookup("datastore", store)?; + + if store_config + .get_maintenance_mode() + .map_or(true, |m| m.ty != MaintenanceType::Unmount) + { + bail!("maintenance mode is not 'Unmount'"); + } + + Ok((lock, store_config)) +} + +fn unset_maintenance( + _lock: pbs_config::BackupLockGuard, + mut config: DataStoreConfig, +) -> Result<(), Error> { + let (mut section_config, _digest) = pbs_config::datastore::config()?; + config.maintenance_mode = None; + section_config.set_data(&config.name, "datastore", &config)?; + pbs_config::datastore::save_config(§ion_config)?; + Ok(()) +} + +fn do_unmount_device( + datastore: DataStoreConfig, + worker: Option<&dyn WorkerTaskContext>, +) -> Result<(), Error> { + if datastore.backing_device.is_none() { + bail!("can't unmount non-removable datastore"); + } + let mount_point = datastore.absolute_path(); + + let mut active_operations = task_tracking::get_active_operations(&datastore.name)?; + let mut old_status = String::new(); + let mut aborted = false; + while active_operations.read + active_operations.write > 0 { + if let Some(worker) = worker { + if worker.abort_requested() { + aborted = true; + break; + } + let status = format!( + "cannot unmount yet, still {} read and {} write operations active", + active_operations.read, active_operations.write + ); + if status != old_status { + info!("{status}"); + old_status = status; + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + active_operations = task_tracking::get_active_operations(&datastore.name)?; + } + + if aborted { + let _ = expect_maintanance_unmounting(&datastore.name) + .inspect_err(|e| warn!("maintenance mode was not as expected: {e}")) + .and_then(|(lock, config)| { + unset_maintenance(lock, config) + .inspect_err(|e| warn!("could not reset maintenance mode: {e}")) + }); + bail!("aborted, due to user request"); + } else { + let (lock, config) = expect_maintanance_unmounting(&datastore.name)?; + crate::tools::disks::unmount_by_mountpoint(Path::new(&mount_point))?; + let _ = unset_maintenance(lock, config) + .inspect_err(|e| warn!("could not reset maintenance mode: {e}")); + } + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + store: { schema: DATASTORE_SCHEMA }, + }, + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), + } +)] +/// Unmount a removable device that is associated with the datastore +pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let _lock = pbs_config::datastore::lock_config()?; + let (mut section_config, _digest) = pbs_config::datastore::config()?; + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + ensure_datastore_is_mounted(&datastore)?; + + datastore.set_maintenance_mode(Some(MaintenanceMode { + ty: MaintenanceType::Unmount, + message: None, + }))?; + section_config.set_data(&store, "datastore", &datastore)?; + pbs_config::datastore::save_config(§ion_config)?; + + drop(_lock); + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) + { + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( + sock, + &format!( + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", + &store + ), + ) + .await; + } + + let upid = WorkerTask::new_thread( + "unmount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |worker| do_unmount_device(datastore, Some(&worker)), + )?; + + Ok(json!(upid)) +} + #[sortable] const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ ( @@ -2432,6 +2702,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .get(&API_METHOD_LIST_GROUPS) .delete(&API_METHOD_DELETE_GROUP), ), + ("mount", &Router::new().post(&API_METHOD_MOUNT)), ( "namespace", // FIXME: move into datastore:: sub-module?! @@ -2466,6 +2737,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .delete(&API_METHOD_DELETE_SNAPSHOT), ), ("status", &Router::new().get(&API_METHOD_STATUS)), + ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)), ( "upload-backup-log", &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:51 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:51 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 04/25] datastore: add helper for checking if a datastore is mounted In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-5-h.laimer@proxmox.com> ... at a specific location. Also adds two additional functions to get the mount status, and ensuring a removable datastore is mounted. Co-authored-by: Wolfgang Bumiller Signed-off-by: Hannes Laimer --- change since v13: * add two helpers to not have to have the same code in a amount a million places pbs-datastore/src/datastore.rs | 74 +++++++++++++++++++++++++++++ pbs-datastore/src/lib.rs | 4 +- src/server/metric_collection/mod.rs | 4 ++ 3 files changed, 81 insertions(+), 1 deletion(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 2bf2b8437..6a9fc2dc0 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; use std::io::{self, Write}; +use std::os::unix::ffi::OsStrExt; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; use std::sync::{Arc, LazyLock, Mutex}; @@ -14,6 +15,7 @@ use proxmox_schema::ApiType; use proxmox_sys::error::SysError; use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions}; use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard}; +use proxmox_sys::linux::procfs::MountInfo; use proxmox_sys::process_locker::ProcessLockSharedGuard; use proxmox_worker_task::WorkerTaskContext; @@ -46,6 +48,70 @@ pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> Ok(()) } +/// Check if a device with a given UUID is currently mounted at store_mount_point by +/// comparing the `st_rdev` values of `/dev/disk/by-uuid/` and the source device in +/// /proc/self/mountinfo. +/// +/// If we can't check if it is mounted, we treat that as not mounted, +/// returning false. +/// +/// Reasons it could fail other than not being mounted where expected: +/// - could not read /proc/self/mountinfo +/// - could not stat /dev/disk/by-uuid/ +/// - /dev/disk/by-uuid/ is not a block device +/// +/// Since these are very much out of our control, there is no real value in distinguishing +/// between them, so for this function they all are treated as 'device not mounted' +fn is_datastore_mounted_at(store_mount_point: String, device_uuid: &str) -> bool { + use nix::sys::stat::SFlag; + + let store_mount_point = Path::new(&store_mount_point); + + let dev_node = match nix::sys::stat::stat(format!("/dev/disk/by-uuid/{device_uuid}").as_str()) { + Ok(stat) if SFlag::from_bits_truncate(stat.st_mode) == SFlag::S_IFBLK => stat.st_rdev, + _ => return false, + }; + + let Ok(mount_info) = MountInfo::read() else { + return false; + }; + + for (_, entry) in mount_info { + let Some(source) = entry.mount_source else { + continue; + }; + + if entry.mount_point != store_mount_point || !source.as_bytes().starts_with(b"/") { + continue; + } + + if let Ok(stat) = nix::sys::stat::stat(source.as_os_str()) { + let sflag = SFlag::from_bits_truncate(stat.st_mode); + + if sflag == SFlag::S_IFBLK && stat.st_rdev == dev_node { + return true; + } + } + } + + false +} + +pub fn get_datastore_mount_status(config: &DataStoreConfig) -> Option { + let Some(ref device_uuid) = config.backing_device else { + return None; + }; + Some(is_datastore_mounted_at(config.absolute_path(), device_uuid)) +} + +pub fn ensure_datastore_is_mounted(config: &DataStoreConfig) -> Result<(), Error> { + match get_datastore_mount_status(config) { + Some(true) => Ok(()), + Some(false) => Err(format_err!("Datastore '{}' is not mounted", config.name)), + None => Ok(()), + } +} + /// Datastore Management /// /// A Datastore can store severals backups, and provides the @@ -156,6 +222,12 @@ impl DataStore { } } + if get_datastore_mount_status(&config) == Some(false) { + let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); + datastore_cache.remove(&config.name); + bail!("datastore '{}' is not mounted", config.name); + } + let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); let entry = datastore_cache.get(name); @@ -259,6 +331,8 @@ impl DataStore { ) -> Result, Error> { let name = config.name.clone(); + ensure_datastore_is_mounted(&config)?; + let tuning: DatastoreTuning = serde_json::from_value( DatastoreTuning::API_SCHEMA .parse_property_string(config.tuning.as_deref().unwrap_or(""))?, diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 8050cf4d0..5014b6c09 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -201,7 +201,9 @@ pub use manifest::BackupManifest; pub use store_progress::StoreProgress; mod datastore; -pub use datastore::{check_backup_owner, DataStore}; +pub use datastore::{ + check_backup_owner, ensure_datastore_is_mounted, get_datastore_mount_status, DataStore, +}; mod hierarchy; pub use hierarchy::{ diff --git a/src/server/metric_collection/mod.rs b/src/server/metric_collection/mod.rs index b95dba203..2ede8408f 100644 --- a/src/server/metric_collection/mod.rs +++ b/src/server/metric_collection/mod.rs @@ -176,6 +176,10 @@ fn collect_disk_stats_sync() -> (DiskStat, Vec) { continue; } + if pbs_datastore::get_datastore_mount_status(&config) == Some(false) { + continue; + } + datastores.push(gather_disk_stats( disk_manager.clone(), Path::new(&config.absolute_path()), -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:47 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:47 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 00/26] add removable datastores Message-ID: <20241122144713.299130-1-h.laimer@proxmox.com> These patches add support for removable datastores. All removable datastores have a backing-device(a UUID) associated with them. Removable datastores work like normal ones, just that they can be unplugged. It is possible to create a removable datastore, sync backups onto it, unplug it and use it on a different PBS. The datastore path specified is relative to the root of the used device. Removable datastores are bind mounted to /mnt/datastore/. Multiple datastores can be created on a single device, but only device with a single datastore on them will be auto-mounted. When a removable datastore is deleted and 'destroy-data' is set, the device has to be mounted. If 'destroy-data' is not set the datastore can be deleted even if the device is not present. Removable datastores are automatically mounted when plugged in. v14: thanks @Fabian and @Maximiliano * add two functions to get mount status, or ensure mounted for rm.ds., avoiding repeating things * use enum for `mount_status` instead of Option * fix problme with unmounting, now check for unmounting maintenance mode before actually unmounting, manually chaning the maintenance mode during unmounting will not prevent unmounting * improve logging for mounting: add context and adjust logging levels * improve uuid_mount function: load config file directly and call do_mount function directly without going through the API * add logging for cleanup on ds deletion * move check for nesting into do_create_datastore, and check fir all datastore(not just removable) * remove redundant check when deleting through directory * use single worker when creating removable datastore through dir endpoint * drop get_mount_point function * ui: stop loading status after first failed attempt, prevents logs spamming v13: thanks @Fabian * allow multiple datastore on devices * replace `is_datastore_available` by a more specific function, it is now removable datastore specific and won't be called for normal ones * replace removable/is_available in status structs with mount_state, which is `None` for normal datastore as it makes it less ambiguous what is meant * remove notion of 'available' from normal datastores and replace it with mounted/mount_status for removable ones, as it never really made sense in the first place * abort of an unmount task will now reset the maintanance mode * add check for race when setting maintenance at end of unmounting task * improve documentation and commit messages * remove not needed tokio::spawn * only auto mount devices with single datastore on them * drop ptach that added flag for excluding used partitions * make auto mount service not dynamic * add debug command to scan devices for datastores they may contain * rebase onto master v12: thanks @Wolfgang * use bind mounts, so now /path/to/ds is mounted to /mnt/datastore/ this is a bit cleaner and allows for multiple datastores on a single device to be mounted individually, if we want to allow that in the future * small code improvements v11: * rebase onto master v10: thanks @Gabriel and @Wolfgang * make is_datastore_available more robust * fix a lot of wording * drop format on uuid_mount command for UUID * only gather_disk_stats if datastore is available * overall code improvements * ui: include model in partition selector * rebased onto master v9: * change mount point to `/mnt/datastore/` * update "Directory" list UI * add `absolute_path()` from Dietmar's RFC * update docs v8: * still depends on [1] * paths for removable datastores are now relative to `/mnt/removable_datastore/` * add support for creation of removable datastore through the "create directory" endpoint (last 3 patches) * update datastore creation UI * update docs v7: * depends on [1] * improve logging when waiting for tasks * drop `update-datatore-cache` refactoring * fix some commit messages [1] https://lists.proxmox.com/pipermail/pbs-devel/2024-April/008739.html v6: * remove 'drop' flag in datastore cache * use maintenance-mode 'unmount' for unmounting process, only for the unmounting not for being unmounted * rename/simplify update-datastore-cache command * ui: integrate new unmounting maintenance mode * basically a mix of v3 and v4 v5: thanks @Dietmar and @Christian * drop --force for unmount since it'll always fail if tasks are still running, and if there are not normal unount will work * improve several commit messages * improve error message wording * add removable datastore section to docs * add documentation for is_datastore_available v4: thanks a lot @Dietmar and @Christian * make check if mounted wayyy faster * don't keep track of mounting state * drop Unplugged maintenance mode * use UUID_FORMAT for uuid field * a lot of small things, like use of bail!, inline format!, ... * include improvement to cache handling v3: * remove lazy unmounting (since 9cba51ac782d04085c0af55128f32178e5132358 is applied) * fix CLI (un)mount command, thanks @Gabriel * add removable datastore CLI autocomplete helper * rebase onto master * move ui patches to the end thanks @Lukas and @Thomas for the feedback v2: * fix datastore 'add' button in the UI * some format!("{}", a) -> format!("{a}") * replace `const` with `let` in js code * change icon `fa-usb` -> `fa-plug` * add some docs * add JDoc for parseMaintenanceMode * proxmox-schema dep bump Dietmar Maurer (1): maintenance: add 'Unmount' maintenance type Hannes Laimer (25): pbs-api-types: add backing-device to DataStoreConfig maintenance: make is_offline more generic datastore: add helper for checking if a datastore is mounted api: admin: add (un)mount endpoint for removable datastores api: removable datastore creation api: add check for nested datastores on creation pbs-api-types: add mount_status field to DataStoreListItem bin: manager: add (un)mount command add auto-mounting for removable datastores datastore: handle deletion of removable datastore properly docs: add removable datastores section ui: add partition selector form ui: add removable datastore creation support ui: add (un)mount button to summary ui: tree: render unmounted datastores correctly ui: utils: make parseMaintenanceMode more robust ui: add datastore status mask for unmounted removable datastores ui: maintenance: fix disable msg field if no type is selected ui: render 'unmount' maintenance mode correctly api: node: allow creation of removable datastore through directory endpoint api: node: include removable datastores in directory list node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR ui: support create removable datastore through directory creation bin: debug: add inspect device command api: disks: only return UUID of partitions if it actually is one debian/proxmox-backup-server.install | 1 + debian/proxmox-backup-server.udev | 3 + docs/storage.rst | 38 +++ etc/Makefile | 1 + etc/removable-device-attach at .service | 8 + pbs-api-types/src/datastore.rs | 47 ++- pbs-api-types/src/maintenance.rs | 12 +- pbs-config/src/datastore.rs | 14 + pbs-datastore/src/datastore.rs | 83 +++++- pbs-datastore/src/lib.rs | 4 +- src/api2/admin/datastore.rs | 310 +++++++++++++++++++- src/api2/config/datastore.rs | 107 ++++++- src/api2/node/disks/directory.rs | 74 +++-- src/api2/status/mod.rs | 30 +- src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++ src/bin/proxmox_backup_manager/datastore.rs | 127 +++++++- src/server/metric_collection/mod.rs | 4 + src/tools/disks/mod.rs | 5 +- www/DirectoryList.js | 13 + www/Makefile | 1 + www/NavigationTree.js | 17 +- www/Utils.js | 33 ++- www/css/ext6-pbs.css | 20 ++ www/datastore/DataStoreListSummary.js | 1 + www/datastore/Summary.js | 115 +++++++- www/form/PartitionSelector.js | 81 +++++ www/window/CreateDirectory.js | 14 + www/window/DataStoreEdit.js | 37 +++ www/window/MaintenanceOptions.js | 17 +- 29 files changed, 1274 insertions(+), 92 deletions(-) create mode 100644 etc/removable-device-attach at .service create mode 100644 www/form/PartitionSelector.js -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:53 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:53 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 06/25] api: removable datastore creation In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-7-h.laimer@proxmox.com> Devices can contains multiple datastores. If the specified path already contains a datastore, `reuse datastore` has to be set so it'll be added without creating a chunckstore. Signed-off-by: Hannes Laimer --- change since v13: * cleanup src/api2/config/datastore.rs | 54 ++++++++++++++++++++++++++---------- 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 37d1528c7..420f8ddd0 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use ::serde::{Deserialize, Serialize}; -use anyhow::{bail, Error}; +use anyhow::{bail, format_err, Error}; use hex::FromHex; use serde_json::Value; use tracing::warn; @@ -21,7 +21,8 @@ use pbs_config::BackupLockGuard; use pbs_datastore::chunk_store::ChunkStore; use crate::api2::admin::{ - prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, + datastore::do_mount_device, prune::list_prune_jobs, sync::list_config_sync_jobs, + verify::list_verification_jobs, }; use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; use crate::api2::config::sync::delete_sync_job; @@ -32,6 +33,7 @@ use pbs_config::CachedUserInfo; use proxmox_rest_server::WorkerTask; use crate::server::jobstate; +use crate::tools::disks::unmount_by_mountpoint; #[api( input: { @@ -73,37 +75,57 @@ pub(crate) fn do_create_datastore( datastore: DataStoreConfig, reuse_datastore: bool, ) -> Result<(), Error> { - let path: PathBuf = datastore.path.clone().into(); + let path: PathBuf = datastore.absolute_path().into(); if path.parent().is_none() { bail!("cannot create datastore in root path"); } + let need_unmount = datastore.backing_device.is_some(); + if need_unmount { + do_mount_device(datastore.clone())?; + }; + let tuning: DatastoreTuning = serde_json::from_value( DatastoreTuning::API_SCHEMA .parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?, )?; - if reuse_datastore { - ChunkStore::verify_chunkstore(&path)?; + let res = if reuse_datastore { + ChunkStore::verify_chunkstore(&path) } else { + let mut is_empty = true; if let Ok(dir) = std::fs::read_dir(&path) { for file in dir { let name = file?.file_name(); let name = name.to_str(); if !name.map_or(false, |name| name.starts_with('.') || name == "lost+found") { - bail!("datastore path is not empty"); + is_empty = false; + break; } } } - let backup_user = pbs_config::backup_user()?; - let _store = ChunkStore::create( - &datastore.name, - path, - backup_user.uid, - backup_user.gid, - tuning.sync_level.unwrap_or_default(), - )?; + if is_empty { + let backup_user = pbs_config::backup_user()?; + ChunkStore::create( + &datastore.name, + path.clone(), + backup_user.uid, + backup_user.gid, + tuning.sync_level.unwrap_or_default(), + ) + .map(|_| ()) + } else { + Err(format_err!("datastore path not empty")) + } + }; + + if res.is_err() { + if need_unmount { + let _ = unmount_by_mountpoint(&path) + .inspect_err(|e| warn!("could not unmount device: {e}")); + } + return res; } config.set_data(&datastore.name, "datastore", &datastore)?; @@ -147,6 +169,10 @@ pub fn create_datastore( param_bail!("name", "datastore '{}' already exists.", config.name); } + if !config.path.starts_with("/") { + param_bail!("path", "expected an abolute path, '{}' is not", config.path); + } + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:01 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:01 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 14/26] ui: add removable datastore creation support In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-15-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/window/DataStoreEdit.js | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js index b8e866df2..7b6aff1e7 100644 --- a/www/window/DataStoreEdit.js +++ b/www/window/DataStoreEdit.js @@ -63,6 +63,20 @@ Ext.define('PBS.DataStoreEdit', { emptyText: gettext('An absolute path'), validator: val => val?.trim() !== '/', }, + { + xtype: 'pmxDisplayEditField', + fieldLabel: gettext('Device'), + name: 'backing-device', + disabled: true, + cbind: { + editable: '{isCreate}', + }, + editConfig: { + xtype: 'pbsPartitionSelector', + allowBlank: true, + }, + emptyText: gettext('Device path'), + }, ], column2: [ { @@ -88,6 +102,29 @@ Ext.define('PBS.DataStoreEdit', { }, ], columnB: [ + { + xtype: 'checkbox', + boxLabel: gettext('Removable datastore'), + submitValue: false, + listeners: { + change: function(checkbox, isRemovable) { + let inputPanel = checkbox.up('inputpanel'); + let pathField = inputPanel.down('[name=path]'); + let uuidField = inputPanel.down('pbsPartitionSelector[name=backing-device]'); + let uuidEditField = inputPanel.down('[name=backing-device]'); + + uuidField.allowBlank = !isRemovable; + uuidEditField.setDisabled(!isRemovable); + uuidField.setDisabled(!isRemovable); + uuidField.setValue(''); + if (isRemovable) { + pathField.setFieldLabel(gettext('On device path')); + } else { + pathField.setFieldLabel(gettext('Backing Path')); + } + }, + }, + }, { xtype: 'textfield', name: 'comment', -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:59 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:59 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 12/25] docs: add removable datastores section In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-13-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- changes since v13: * fix typo docs/storage.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/docs/storage.rst b/docs/storage.rst index f1e15d522..361af4420 100644 --- a/docs/storage.rst +++ b/docs/storage.rst @@ -165,6 +165,44 @@ following command creates a new datastore called ``store1`` on # proxmox-backup-manager datastore create store1 /backup/disk1/store1 +Removable Datastores +^^^^^^^^^^^^^^^^^^^^ +Removable datastores have a ``backing-device`` associated with them, they can be +mounted and unmounted. Other than that they behave the same way a normal datastore +would. + +They can be created on already correctly formatted partitions, which, as with normal +datastores, should be either ``ext4`` or ``xfs``. It is also possible to create them +on completely unused disks through "Administration" > "Disks / Storage" > "Directory", +using this method the disk will be partitioned and formatted automatically for the datastore. + +Devices with only one datastore on them will be mounted automatically. It is possible to create a +removable datastore on one PBS and use it on multiple instances, the device just has to be added +on each instance as a removable datastore by checking "reuse datastore" on creation. +If the device already contains a datastore at the specified path it'll just be added as +a new datastore to the PBS instance and will be mounted whenever plugged in. Unmounting has +to be done through the UI by clicking "Unmount" on the summary page or using the CLI. + +A single device can house multiple datastores, they only limitation is that they are not +allowed to be nested. + +.. code-block:: console + + # proxmox-backup-manager datastore unmount store1 + +both will wait for any running tasks to finish and unmount the device. + +All removable datastores are mounted under /mnt/datastore/, and the specified path +refers to the path on the device. + +All datastores present on a device can be listed using ``proxmox-backup-debug``. + +.. code-block:: console + + # proxmox-backup-debug inspect device /dev/... + + + Managing Datastores ^^^^^^^^^^^^^^^^^^^ -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:00 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:00 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 13/26] ui: add partition selector form In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-14-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Makefile | 1 + www/form/PartitionSelector.js | 81 +++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 www/form/PartitionSelector.js diff --git a/www/Makefile b/www/Makefile index d35e81283..3defe3428 100644 --- a/www/Makefile +++ b/www/Makefile @@ -49,6 +49,7 @@ JSSRC= \ form/NamespaceMaxDepth.js \ form/CalendarEvent.js \ form/PermissionPathSelector.js \ + form/PartitionSelector.js \ form/GroupSelector.js \ form/GroupFilter.js \ form/VerifyOutdatedAfter.js \ diff --git a/www/form/PartitionSelector.js b/www/form/PartitionSelector.js new file mode 100644 index 000000000..162dbe418 --- /dev/null +++ b/www/form/PartitionSelector.js @@ -0,0 +1,81 @@ +Ext.define('pbs-partition-list', { + extend: 'Ext.data.Model', + fields: ['name', 'uuid', 'filesystem', 'devpath', 'size', 'model'], + proxy: { + type: 'proxmox', + url: "/api2/json/nodes/localhost/disks/list?skipsmart=1&include-partitions=1", + reader: { + transform: (rawData) => rawData.data + .flatMap(disk => (disk.partitions + .map(part => ({ ...part, model: disk.model })) ?? []) + .filter(partition => partition.used === 'filesystem')), + }, + }, + idProperty: 'devpath', + +}); + +Ext.define('PBS.form.PartitionSelector', { + extend: 'Proxmox.form.ComboGrid', + alias: 'widget.pbsPartitionSelector', + + allowBlank: false, + autoSelect: false, + submitEmpty: false, + valueField: 'uuid', + displayField: 'devpath', + + store: { + model: 'pbs-partition-list', + autoLoad: true, + sorters: 'devpath', + }, + getSubmitData: function() { + let me = this; + let data = null; + if (!me.disabled && me.submitValue && !me.isFileUpload()) { + let val = me.getSubmitValue(); + if (val !== undefined && val !== null && val !== '') { + data = {}; + data[me.getName()] = val; + } else if (me.getDeleteEmpty()) { + data = {}; + data.delete = me.getName(); + } + } + return data; + }, + listConfig: { + columns: [ + { + header: gettext('Path'), + sortable: true, + dataIndex: 'devpath', + renderer: (v, metaData, rec) => Ext.String.htmlEncode(v), + flex: 1, + }, + { + header: gettext('Filesystem'), + sortable: true, + dataIndex: 'filesystem', + flex: 1, + }, + { + header: gettext('Size'), + sortable: true, + dataIndex: 'size', + renderer: Proxmox.Utils.format_size, + flex: 1, + }, + { + header: gettext('Model'), + sortable: true, + dataIndex: 'model', + flex: 1, + }, + ], + viewConfig: { + emptyText: 'No usable partitions present', + }, + }, +}); -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:04 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:04 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 17/26] ui: utils: make parseMaintenanceMode more robust In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-18-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Utils.js | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/www/Utils.js b/www/Utils.js index 4853be36c..7756e9b5d 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -740,14 +740,29 @@ Ext.define('PBS.Utils', { return `${icon} ${value}`; }, - // FIXME: this "parser" is brittle and relies on the order the arguments will appear in + /** + * Parses maintenance mode property string. + * Examples: + * "offline,message=foo" -> ["offline", "foo"] + * "offline" -> ["offline", null] + * "message=foo,offline" -> ["offline", "foo"] + * null/undefined -> [null, null] + * + * @param {string|null} mode - Maintenance mode string to parse. + * @return {Array} - Parsed maintenance mode values. + */ parseMaintenanceMode: function(mode) { - let [type, message] = mode.split(/,(.+)/); - type = type.split("=").pop(); - message = message ? message.split("=")[1] - .replace(/^"(.*)"$/, '$1') - .replaceAll('\\"', '"') : null; - return [type, message]; + if (!mode) { + return [null, null]; + } + return mode.split(',').reduce(([m, msg], pair) => { + const [key, value] = pair.split('='); + if (key === 'message') { + return [m, value.replace(/^"(.*)"$/, '$1').replace(/\\"/g, '"')]; + } else { + return [value ?? key, msg]; + } + }, [null, null]); }, renderMaintenance: function(mode, activeTasks) { -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:02 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:02 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 15/26] ui: add (un)mount button to summary In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-16-h.laimer@proxmox.com> And only try to load datastore information if the datastore is available. Signed-off-by: Hannes Laimer --- changes since v13: * stop statusStore update on first failed request, start again on mount www/datastore/Summary.js | 94 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 92 insertions(+), 2 deletions(-) diff --git a/www/datastore/Summary.js b/www/datastore/Summary.js index a932b4e01..2d79a7951 100644 --- a/www/datastore/Summary.js +++ b/www/datastore/Summary.js @@ -309,7 +309,84 @@ Ext.define('PBS.DataStoreSummary', { model: 'pve-rrd-datastore', }); - me.callParent(); + me.statusStore = Ext.create('Proxmox.data.ObjectStore', { + url: `/api2/json/admin/datastore/${me.datastore}/status`, + interval: 1000, + }); + + let unmountBtn = Ext.create('Ext.Button', { + text: gettext('Unmount'), + hidden: true, + handler: () => { + Proxmox.Utils.API2Request({ + url: `/admin/datastore/${me.datastore}/unmount`, + method: 'POST', + failure: function(response) { + Ext.Msg.alert(gettext('Error'), response.htmlStatus); + }, + success: function(response, options) { + Ext.create('Proxmox.window.TaskViewer', { + upid: response.result.data, + }).show(); + }, + }); + }, + }); + + let mountBtn = Ext.create('Ext.Button', { + text: gettext('Mount'), + hidden: true, + handler: () => { + Proxmox.Utils.API2Request({ + url: `/admin/datastore/${me.datastore}/mount`, + method: 'POST', + failure: function(response) { + Ext.Msg.alert(gettext('Error'), response.htmlStatus); + }, + success: function(response, options) { + me.statusStore.startUpdate(); + Ext.create('Proxmox.window.TaskViewer', { + upid: response.result.data, + }).show(); + }, + }); + }, + }); + + Ext.apply(me, { + tbar: [unmountBtn, mountBtn, '->', { xtype: 'proxmoxRRDTypeSelector' }], + }); + + me.mon(me.statusStore, 'load', (s, records, success) => { + if (!success) { + me.statusStore.stopUpdate(); + me.down('pbsDataStoreInfo').fireEvent('deactivate'); + Proxmox.Utils.API2Request({ + url: `/config/datastore/${me.datastore}`, + success: response => { + let mode = response.result.data['maintenance-mode']; + let [type, _message] = PBS.Utils.parseMaintenanceMode(mode); + if (!response.result.data['backing-device']) { + return; + } + if (!type || type === 'read-only') { + unmountBtn.setDisabled(true); + mountBtn.setDisabled(false); + } else if (type === 'unmount') { + unmountBtn.setDisabled(true); + mountBtn.setDisabled(true); + } else { + unmountBtn.setDisabled(false); + mountBtn.setDisabled(false); + } + }, + }); + } else { + me.down('pbsDataStoreInfo').fireEvent('activate'); + unmountBtn.setDisabled(false); + mountBtn.setDisabled(true); + } + }); let sp = Ext.state.Manager.getProvider(); me.mon(sp, 'statechange', function(provider, key, value) { @@ -322,11 +399,17 @@ Ext.define('PBS.DataStoreSummary', { Proxmox.Utils.updateColumns(me); }); + me.callParent(); + Proxmox.Utils.API2Request({ url: `/config/datastore/${me.datastore}`, waitMsgTarget: me.down('pbsDataStoreInfo'), success: function(response) { - let path = Ext.htmlEncode(response.result.data.path); + let data = response.result.data; + let path = Ext.htmlEncode(data.path); + const removable = Object.prototype.hasOwnProperty.call(data, "backing-device"); + unmountBtn.setHidden(!removable); + mountBtn.setHidden(!removable); me.down('pbsDataStoreInfo').setTitle(`${me.datastore} (${path})`); me.down('pbsDataStoreNotes').setNotes(response.result.data.comment); }, @@ -344,6 +427,13 @@ Ext.define('PBS.DataStoreSummary', { let hasIoTicks = records?.some((rec) => rec?.data?.io_ticks !== undefined); me.down('#ioDelayChart').setVisible(!success || hasIoTicks); }, undefined, { single: true }); + me.on('afterrender', () => { + me.statusStore.startUpdate(); + }); + + me.on('destroy', () => { + me.statusStore.stopUpdate(); + }); me.query('proxmoxRRDChart').forEach((chart) => { chart.setStore(me.rrdstore); -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:05 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:05 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 18/26] ui: add datastore status mask for unmounted removable datastores In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-19-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/css/ext6-pbs.css | 12 ++++++++++++ www/datastore/Summary.js | 21 +++++++++++++-------- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/www/css/ext6-pbs.css b/www/css/ext6-pbs.css index 706e681e9..891189ae3 100644 --- a/www/css/ext6-pbs.css +++ b/www/css/ext6-pbs.css @@ -261,6 +261,18 @@ span.snapshot-comment-column { content: "\f0ad"; } +.pbs-unplugged-mask div.x-mask-msg-text { + background: None; + padding: 12px 0 0; +} + +.pbs-unplugged-mask:before { + font-size: 3em; + display: flex; + justify-content: center; + content: "\f1e6"; +} + /* the small icons TODO move to proxmox-widget-toolkit */ .pmx-tree-icon-custom:after { position: relative; diff --git a/www/datastore/Summary.js b/www/datastore/Summary.js index 2d79a7951..c41c55423 100644 --- a/www/datastore/Summary.js +++ b/www/datastore/Summary.js @@ -61,16 +61,21 @@ Ext.define('PBS.DataStoreInfo', { Proxmox.Utils.API2Request({ url: `/config/datastore/${me.view.datastore}`, success: function(response) { - const config = response.result.data; - if (config['maintenance-mode']) { - const [_type, msg] = PBS.Utils.parseMaintenanceMode(config['maintenance-mode']); - me.view.el.mask( - `${gettext('Datastore is in maintenance mode')}${msg ? ': ' + msg : ''}`, - 'fa pbs-maintenance-mask', - ); - } else { + let maintenanceString = response.result.data['maintenance-mode']; + let removable = !!response.result.data['backing-device']; + if (!maintenanceString && !removable) { me.view.el.mask(gettext('Datastore is not available')); + return; } + + let [_type, msg] = PBS.Utils.parseMaintenanceMode(maintenanceString); + let isUnplugged = !maintenanceString && removable; + let maskMessage = isUnplugged + ? gettext('Datastore is not mounted') + : `${gettext('Datastore is in maintenance mode')}${msg ? ': ' + msg : ''}`; + + let maskIcon = isUnplugged ? 'fa pbs-unplugged-mask' : 'fa pbs-maintenance-mask'; + me.view.el.mask(maskMessage, maskIcon); }, }); return; -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:46:55 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:46:55 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 08/25] pbs-api-types: add mount_status field to DataStoreListItem In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-9-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- changdes since v13: * drop commit message, seemed unnecessary, enum is pretty self-explainatory * use enum instead of Option pbs-api-types/src/datastore.rs | 19 ++++++++++++++++- src/api2/admin/datastore.rs | 38 ++++++++++++++++++++-------------- src/api2/status/mod.rs | 30 +++++++++++++++++++++++---- 3 files changed, 66 insertions(+), 21 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index ba75ebaba..b445e10e5 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -178,6 +178,20 @@ pub enum ChunkOrder { Inode, } +#[api] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Current mounting status of a datastore, useful for removable datastores. +pub enum DataStoreMountStatus { + /// Removable datastore is currently mounted correctly. + Mounted, + /// Removable datastore is currebtly not mounted. + NotMounted, + /// Datastore is not removable, so there is no mount status. + #[default] + NonRemovable, +} + #[api] #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -451,6 +465,7 @@ impl DataStoreConfig { pub struct DataStoreListItem { pub store: String, pub comment: Option, + pub mount_status: DataStoreMountStatus, /// If the datastore is in maintenance mode, information about it #[serde(skip_serializing_if = "Option::is_none")] pub maintenance: Option, @@ -1456,6 +1471,7 @@ pub struct DataStoreStatusListItem { /// The available bytes of the underlying storage. (-1 on error) #[serde(skip_serializing_if = "Option::is_none")] pub avail: Option, + pub mount_status: DataStoreMountStatus, /// A list of usages of the past (last Month). #[serde(skip_serializing_if = "Option::is_none")] pub history: Option>>, @@ -1480,12 +1496,13 @@ pub struct DataStoreStatusListItem { } impl DataStoreStatusListItem { - pub fn empty(store: &str, err: Option) -> Self { + pub fn empty(store: &str, err: Option, mount_status: DataStoreMountStatus) -> Self { DataStoreStatusListItem { store: store.to_owned(), total: None, used: None, avail: None, + mount_status, history: None, history_start: None, history_delta: None, diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 85522345e..f41024e42 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -38,14 +38,15 @@ use pxar::EntryKind; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode, - DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, - JobScheduleStatus, KeepOptions, MaintenanceMode, MaintenanceType, Operation, PruneJobOptions, - SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, - BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, - CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, - MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, - UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, + DataStoreConfig, DataStoreListItem, DataStoreMountStatus, DataStoreStatus, + GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, MaintenanceMode, + MaintenanceType, Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, + BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, + IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, + PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, + VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -1325,8 +1326,8 @@ pub fn get_datastore_list( let mut list = Vec::new(); - for (store, (_, data)) in &config.sections { - let acl_path = &["datastore", store]; + for (store, (_, data)) in config.sections { + let acl_path = &["datastore", &store]; let user_privs = user_info.lookup_privs(&auth_id, acl_path); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; @@ -1337,15 +1338,20 @@ pub fn get_datastore_list( } } + let store_config: DataStoreConfig = serde_json::from_value(data)?; + + let mount_status = match pbs_datastore::get_datastore_mount_status(&store_config) { + Some(true) => DataStoreMountStatus::Mounted, + Some(false) => DataStoreMountStatus::NotMounted, + None => DataStoreMountStatus::NonRemovable, + }; + if allowed || allow_id { list.push(DataStoreListItem { store: store.clone(), - comment: if !allowed { - None - } else { - data["comment"].as_str().map(String::from) - }, - maintenance: data["maintenance-mode"].as_str().map(String::from), + comment: store_config.comment.filter(|_| allowed), + mount_status, + maintenance: store_config.maintenance_mode, }); } } diff --git a/src/api2/status/mod.rs b/src/api2/status/mod.rs index 113aa9852..5efde9c3d 100644 --- a/src/api2/status/mod.rs +++ b/src/api2/status/mod.rs @@ -10,11 +10,12 @@ use proxmox_schema::api; use proxmox_sortable_macro::sortable; use pbs_api_types::{ - Authid, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + Authid, DataStoreConfig, DataStoreMountStatus, DataStoreStatusListItem, Operation, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; -use pbs_datastore::DataStore; +use pbs_datastore::{get_datastore_mount_status, DataStore}; use crate::server::metric_collection::rrd::extract_rrd_data; use crate::tools::statistics::linear_regression; @@ -51,10 +52,26 @@ pub async fn datastore_status( for (store, (_, _)) in &config.sections { let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; + + let store_config = config.lookup::("datastore", store)?; + + let mount_status = match get_datastore_mount_status(&store_config) { + Some(true) => DataStoreMountStatus::Mounted, + Some(false) => { + list.push(DataStoreStatusListItem::empty( + store, + None, + DataStoreMountStatus::NotMounted, + )); + continue; + } + None => DataStoreMountStatus::NonRemovable, + }; + if !allowed { if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) { if can_access_any_namespace(datastore, &auth_id, &user_info) { - list.push(DataStoreStatusListItem::empty(store, None)); + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); } } continue; @@ -63,7 +80,11 @@ pub async fn datastore_status( let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) { Ok(datastore) => datastore, Err(err) => { - list.push(DataStoreStatusListItem::empty(store, Some(err.to_string()))); + list.push(DataStoreStatusListItem::empty( + store, + Some(err.to_string()), + mount_status, + )); continue; } }; @@ -74,6 +95,7 @@ pub async fn datastore_status( total: Some(status.total), used: Some(status.used), avail: Some(status.available), + mount_status, history: None, history_start: None, history_delta: None, -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:13 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:13 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 26/26] api: disks: only return UUID of partitions if it actually is one In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-27-h.laimer@proxmox.com> Some filesystems like FAT don't include a concept of UUIDs. Instead, tools like blkid tools like blkid derive these identifiers based on certain filesystem metadata, such as volume serial numbers or other unique information. This does however not follow the format specified in RFC 9562[1]. [1] https://datatracker.ietf.org/doc/html/rfc9562 Signed-off-by: Hannes Laimer --- src/tools/disks/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs index 6345fde7c..61aceccd6 100644 --- a/src/tools/disks/mod.rs +++ b/src/tools/disks/mod.rs @@ -898,7 +898,10 @@ fn get_partitions_info( let mut uuid = None; if let Some(devpath) = devpath.as_ref() { for info in lsblk_infos.iter().filter(|i| i.path.eq(devpath)) { - uuid = info.uuid.clone(); + uuid = info + .uuid + .clone() + .filter(|uuid| pbs_api_types::UUID_REGEX.is_match(uuid)); used = match info.partition_type.as_deref() { Some("21686148-6449-6e6f-744e-656564454649") => PartitionUsageType::BIOS, Some("c12a7328-f81f-11d2-ba4b-00a0c93ec93b") => PartitionUsageType::EFI, -- 2.39.5 From h.laimer at proxmox.com Fri Nov 22 15:47:10 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Fri, 22 Nov 2024 15:47:10 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 23/26] node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <20241122144713.299130-24-h.laimer@proxmox.com> ... since they do have the same value. Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 11d07af42..ff817b253 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -11,8 +11,8 @@ use proxmox_schema::api; use proxmox_section_config::SectionConfigData; use pbs_api_types::{ - DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, - PRIV_SYS_MODIFY, UPID_SCHEMA, + DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_MOUNT_DIR, DATASTORE_SCHEMA, NODE_SCHEMA, + PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA, }; use crate::tools::disks::{ @@ -23,8 +23,6 @@ use crate::tools::systemd::{self, types::*}; use proxmox_rest_server::WorkerTask; -const BASE_MOUNT_DIR: &str = "/mnt/datastore/"; - #[api( properties: { "filesystem": { @@ -91,7 +89,7 @@ pub fn list_datastore_mounts() -> Result, Error> { let name = data .Where - .strip_prefix(BASE_MOUNT_DIR) + .strip_prefix(DATASTORE_MOUNT_DIR) .unwrap_or(&data.Where) .to_string(); @@ -185,7 +183,7 @@ pub fn create_datastore_disk( bail!("disk '{}' is already in use.", disk); } - let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); + let mount_point = format!("{}/{}", DATASTORE_MOUNT_DIR, &name); // check if the default path exists already. // bail if it is not empty or another filesystem mounted on top let default_path = std::path::PathBuf::from(&mount_point); @@ -193,7 +191,7 @@ pub fn create_datastore_disk( match std::fs::metadata(&default_path) { Err(_) => {} // path does not exist Ok(stat) => { - let basedir_dev = std::fs::metadata(BASE_MOUNT_DIR)?.st_dev(); + let basedir_dev = std::fs::metadata(DATASTORE_MOUNT_DIR)?.st_dev(); if stat.st_dev() != basedir_dev { bail!("path {default_path:?} already exists and is mountpoint"); } @@ -278,7 +276,7 @@ pub fn create_datastore_disk( )] /// Remove a Filesystem mounted under `/mnt/datastore/`. pub fn delete_datastore_disk(name: String) -> Result<(), Error> { - let path = format!("{}{}", BASE_MOUNT_DIR, name); + let path = format!("{}/{}", DATASTORE_MOUNT_DIR, name); // path of datastore cannot be changed let (config, _) = pbs_config::datastore::config()?; let datastores: Vec = config.convert_to_typed_array("datastore")?; -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 17:54:06 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 17:54:06 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: sync job: fix source group filters based on sync direction Message-ID: <20241122165406.676851-1-c.ebner@proxmox.com> Fix switching the source for group filters based on the sync jobs sync direction. The helper to set the local namespace for the group filers was introduced in commit 43a92c8c ("ui: group filter: allow to set namespace for local datastore"), but never used because lost during subsequent iterations of reworking the patch series. The switching is corrected by: - correctly initializing the local store and namespace for the group filer of sync jobs in push direction in the controller init. - fixing an incorrect check for the sync direction in the remote datastore selector change listener. - conditionally switching namespace to be set for the group filter in the remote and local namespace selector change listeners. Reported-by: Lukas Wagner Signed-off-by: Christian Ebner --- www/window/SyncJobEdit.js | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 0e648e7b3..4f46eacc4 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -69,6 +69,16 @@ Ext.define('PBS.window.SyncJobEdit', { let nsSelector = view.down('pbsNamespaceSelector[name=ns]'); nsSelector.setDatastore(value); }, + + init: function() { + let view = this.getView(); + if (view.syncDirectionPush) { + let localNs = view.down('pbsNamespaceSelector[name=ns]').getValue(); + let localStore = view.down('field[name=store]').getValue(); + view.down('pbsGroupFilter').setLocalDatastore(localStore); + view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); + } + } }, setValues: function(values) { @@ -134,6 +144,11 @@ Ext.define('PBS.window.SyncJobEdit', { let me = this; let view = me.up('pbsSyncJobEdit'); + if (view.syncDirectionPush) { + let localStore = view.down('field[name=store]').getValue(); + view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); + } + let remoteNs = view.down('pbsRemoteNamespaceSelector[name=remote-ns]').getValue(); let maxDepthField = view.down('field[name=max-depth]'); maxDepthField.setLimit(localNs, remoteNs); @@ -268,7 +283,8 @@ Ext.define('PBS.window.SyncJobEdit', { remoteNamespaceField.setRemote(remote); remoteNamespaceField.setRemoteStore(value); - if (!me.syncDirectionPush) { + let view = me.up('pbsSyncJobEdit'); + if (!view.syncDirectionPush) { me.up('tabpanel').down('pbsGroupFilter').setRemoteDatastore(remote, value); } else { let localStore = me.up('pbsSyncJobEdit').down('field[name=store]').getValue(); @@ -293,7 +309,10 @@ Ext.define('PBS.window.SyncJobEdit', { let remote = view.down('field[name=remote]').getValue(); let remoteStore = view.down('field[name=remote-store]').getValue(); - me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, remoteStore, remoteNs); + + if (!view.syncDirectionPush) { + me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, remoteStore, remoteNs); + } let localNs = view.down('pbsNamespaceSelector[name=ns]').getValue(); let maxDepthField = view.down('field[name=max-depth]'); -- 2.39.5 From c.ebner at proxmox.com Fri Nov 22 18:39:17 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Fri, 22 Nov 2024 18:39:17 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] server: push: log encountered empty backup groups during sync Message-ID: <20241122173917.728728-1-c.ebner@proxmox.com> Log also empty backup groups with no snapshots encountered during the sync so the log output contains this additional information as well, reducing possible confusion. Nevertheless, continue with the regular logic, so that pruning of vanished snapshot is honored. Examplary output in the sync jobs task log: ``` 2024-11-22T18:32:40+01:00: Syncing datastore 'datastore', root namespace into datastore 'push-target-store', namespace 'test' 2024-11-22T18:32:40+01:00: Found 2 groups to sync (out of 2 total) 2024-11-22T18:32:40+01:00: skipped: 1 snapshot(s) (2024-11-22T13:40:18Z) - older than the newest snapshot present on sync target 2024-11-22T18:32:40+01:00: Group 'vm/200' contains no snapshots to sync to remote 2024-11-22T18:32:40+01:00: Finished syncing root namespace, current progress: 1 groups, 0 snapshots ``` Reported-by: Fabian Gr?nbichler Signed-off-by: Christian Ebner --- src/server/push.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/server/push.rs b/src/server/push.rs index 082a6f49d..99757a3cc 100644 --- a/src/server/push.rs +++ b/src/server/push.rs @@ -664,6 +664,10 @@ pub(crate) async fn push_group( let mut snapshots: Vec = params.source.list_backup_dirs(namespace, group).await?; snapshots.sort_unstable_by(|a, b| a.time.cmp(&b.time)); + if snapshots.is_empty() { + info!("Group '{group}' contains no snapshots to sync to remote"); + } + let total_snapshots = snapshots.len(); let cutoff = params .transfer_last -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 07:19:46 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 07:19:46 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 00/26] add removable datastores In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <988ca7c5-db7d-4ff3-acd2-737f4e846b43@proxmox.com> On 11/22/24 15:46, Hannes Laimer wrote: > These patches add support for removable datastores. All removable > datastores have a backing-device(a UUID) associated with them. Removable > datastores work like normal ones, just that they can be unplugged. It is > possible to create a removable datastore, sync backups onto it, unplug > it and use it on a different PBS. > > The datastore path specified is relative to the root of the used device. > Removable datastores are bind mounted to /mnt/datastore/. > Multiple datastores can be created on a single device, but only device with > a single datastore on them will be auto-mounted. > > When a removable datastore is deleted and 'destroy-data' is set, the > device has to be mounted. If 'destroy-data' is not set the datastore > can be deleted even if the device is not present. Removable datastores > are automatically mounted when plugged in. > > v14: thanks @Fabian and @Maximiliano > * add two functions to get mount status, or ensure mounted for rm.ds., > avoiding repeating things > * use enum for `mount_status` instead of Option > * fix problme with unmounting, now check for unmounting maintenance mode before actually unmounting, > manually chaning the maintenance mode during unmounting will not prevent unmounting ^ *now > * improve logging for mounting: add context and adjust logging levels > * improve uuid_mount function: load config file directly and call do_mount function directly without > going through the API > * add logging for cleanup on ds deletion > * move check for nesting into do_create_datastore, and check fir all datastore(not just removable) > * remove redundant check when deleting through directory > * use single worker when creating removable datastore through dir endpoint > * drop get_mount_point function > * ui: stop loading status after first failed attempt, prevents logs spamming > > v13: thanks @Fabian > * allow multiple datastore on devices > * replace `is_datastore_available` by a more specific function, it is now > removable datastore specific and won't be called for normal ones > * replace removable/is_available in status structs with mount_state, > which is `None` for normal datastore as it makes it > less ambiguous what is meant > * remove notion of 'available' from normal datastores and replace it with > mounted/mount_status for removable ones, as it never really made sense > in the first place > * abort of an unmount task will now reset the maintanance mode > * add check for race when setting maintenance at end of unmounting task > * improve documentation and commit messages > * remove not needed tokio::spawn > * only auto mount devices with single datastore on them > * drop ptach that added flag for excluding used partitions > * make auto mount service not dynamic > * add debug command to scan devices for datastores they may contain > * rebase onto master > > v12: thanks @Wolfgang > * use bind mounts, so now > /path/to/ds is mounted to /mnt/datastore/ > this is a bit cleaner and allows for multiple datastores > on a single device to be mounted individually, if we > want to allow that in the future > * small code improvements > > > v11: > * rebase onto master > > v10: thanks @Gabriel and @Wolfgang > * make is_datastore_available more robust > * fix a lot of wording > * drop format on uuid_mount command for UUID > * only gather_disk_stats if datastore is available > * overall code improvements > * ui: include model in partition selector > * rebased onto master > > v9: > * change mount point to `/mnt/datastore/` > * update "Directory" list UI > * add `absolute_path()` from Dietmar's RFC > * update docs > > v8: > * still depends on [1] > * paths for removable datastores are now relative to > `/mnt/removable_datastore/` > * add support for creation of removable datastore through the > "create directory" endpoint (last 3 patches) > * update datastore creation UI > * update docs > > v7: > * depends on [1] > * improve logging when waiting for tasks > * drop `update-datatore-cache` refactoring > * fix some commit messages > > [1] https://lists.proxmox.com/pipermail/pbs-devel/2024-April/008739.html > > v6: > * remove 'drop' flag in datastore cache > * use maintenance-mode 'unmount' for unmounting process, only for the > unmounting not for being unmounted > * rename/simplify update-datastore-cache command > * ui: integrate new unmounting maintenance mode > * basically a mix of v3 and v4 > > v5: thanks @Dietmar and @Christian > * drop --force for unmount since it'll always fail if tasks are still running, and if > there are not normal unount will work > * improve several commit messages > * improve error message wording > * add removable datastore section to docs > * add documentation for is_datastore_available > > v4: thanks a lot @Dietmar and @Christian > * make check if mounted wayyy faster > * don't keep track of mounting state > * drop Unplugged maintenance mode > * use UUID_FORMAT for uuid field > * a lot of small things, like use of bail!, inline format!, ... > * include improvement to cache handling > > v3: > * remove lazy unmounting (since 9cba51ac782d04085c0af55128f32178e5132358 is applied) > * fix CLI (un)mount command, thanks @Gabriel > * add removable datastore CLI autocomplete helper > * rebase onto master > * move ui patches to the end > > thanks @Lukas and @Thomas for the feedback > v2: > * fix datastore 'add' button in the UI > * some format!("{}", a) -> format!("{a}") > * replace `const` with `let` in js code > * change icon `fa-usb` -> `fa-plug` > * add some docs > * add JDoc for parseMaintenanceMode > * proxmox-schema dep bump > > Dietmar Maurer (1): > maintenance: add 'Unmount' maintenance type > > Hannes Laimer (25): > pbs-api-types: add backing-device to DataStoreConfig > maintenance: make is_offline more generic > datastore: add helper for checking if a datastore is mounted > api: admin: add (un)mount endpoint for removable datastores > api: removable datastore creation > api: add check for nested datastores on creation > pbs-api-types: add mount_status field to DataStoreListItem > bin: manager: add (un)mount command > add auto-mounting for removable datastores > datastore: handle deletion of removable datastore properly > docs: add removable datastores section > ui: add partition selector form > ui: add removable datastore creation support > ui: add (un)mount button to summary > ui: tree: render unmounted datastores correctly > ui: utils: make parseMaintenanceMode more robust > ui: add datastore status mask for unmounted removable datastores > ui: maintenance: fix disable msg field if no type is selected > ui: render 'unmount' maintenance mode correctly > api: node: allow creation of removable datastore through directory > endpoint > api: node: include removable datastores in directory list > node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR > ui: support create removable datastore through directory creation > bin: debug: add inspect device command > api: disks: only return UUID of partitions if it actually is one > > debian/proxmox-backup-server.install | 1 + > debian/proxmox-backup-server.udev | 3 + > docs/storage.rst | 38 +++ > etc/Makefile | 1 + > etc/removable-device-attach at .service | 8 + > pbs-api-types/src/datastore.rs | 47 ++- > pbs-api-types/src/maintenance.rs | 12 +- > pbs-config/src/datastore.rs | 14 + > pbs-datastore/src/datastore.rs | 83 +++++- > pbs-datastore/src/lib.rs | 4 +- > src/api2/admin/datastore.rs | 310 +++++++++++++++++++- > src/api2/config/datastore.rs | 107 ++++++- > src/api2/node/disks/directory.rs | 74 +++-- > src/api2/status/mod.rs | 30 +- > src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++ > src/bin/proxmox_backup_manager/datastore.rs | 127 +++++++- > src/server/metric_collection/mod.rs | 4 + > src/tools/disks/mod.rs | 5 +- > www/DirectoryList.js | 13 + > www/Makefile | 1 + > www/NavigationTree.js | 17 +- > www/Utils.js | 33 ++- > www/css/ext6-pbs.css | 20 ++ > www/datastore/DataStoreListSummary.js | 1 + > www/datastore/Summary.js | 115 +++++++- > www/form/PartitionSelector.js | 81 +++++ > www/window/CreateDirectory.js | 14 + > www/window/DataStoreEdit.js | 37 +++ > www/window/MaintenanceOptions.js | 17 +- > 29 files changed, 1274 insertions(+), 92 deletions(-) > create mode 100644 etc/removable-device-attach at .service > create mode 100644 www/form/PartitionSelector.js > From h.laimer at proxmox.com Mon Nov 25 09:47:52 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 09:47:52 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14] fixup! ui: tree: render unmounted datastores correctly In-Reply-To: <20241122144713.299130-17-h.laimer@proxmox.com> References: <20241122144713.299130-17-h.laimer@proxmox.com> Message-ID: <20241125084752.28727-1-h.laimer@proxmox.com> --- fixes small problem with the original patch where all datastores were rendered with the plug icon in the UI sent in reply to original patch since it is a really minor or change and can just be squashed in www/NavigationTree.js | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/www/NavigationTree.js b/www/NavigationTree.js index dd03fbd62..29ecae7fe 100644 --- a/www/NavigationTree.js +++ b/www/NavigationTree.js @@ -266,7 +266,8 @@ Ext.define('PBS.view.main.NavigationTree', { while (name.localeCompare(getChildTextAt(j)) > 0 && (j+1) < list.childNodes.length) { j++; } - let mainIcon = `fa fa-${records[i].data.mount-status !== 'nonremovable' ? 'plug' : 'database'}`; + const isRemovable = records[i].data['mount-status'] !== 'nonremovable'; + let mainIcon = `fa fa-${isRemovable ? 'plug' : 'database'}`; let [qtip, iconCls] = ['', mainIcon]; const maintenance = records[i].data.maintenance; -- 2.39.5 From g.goller at proxmox.com Mon Nov 25 09:58:18 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Mon, 25 Nov 2024 09:58:18 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] reuse-datastore: avoid creating another default prune job In-Reply-To: <3cc8bcda-9bc6-4658-9c9f-cfad4ef38833@proxmox.com> References: <20241122111150.162327-1-g.goller@proxmox.com> <3cc8bcda-9bc6-4658-9c9f-cfad4ef38833@proxmox.com> Message-ID: On 22.11.2024 12:18, Christian Ebner wrote: >On 11/22/24 12:11, Gabriel Goller wrote: >>diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs >>index ce7b8ce565ce..dafb97e2f1e5 100644 >>--- a/src/api2/config/prune.rs >>+++ b/src/api2/config/prune.rs >>@@ -77,6 +77,21 @@ pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { >> Ok(()) >> } >>+pub fn default_prune_job_existing(datastore: &str) -> Result { >>+ let (section_config, _digest) = prune::config()?; >>+ if section_config >>+ .sections >>+ .keys() >>+ .filter(|s| s.starts_with(&format!("default-{datastore}"))) >>+ .count() >>+ > 0 >>+ { >>+ Ok(true) >>+ } else { >>+ Ok(false) >>+ } > >could be more compact? no need for the if block: > >let has_default = section_config.sections.keys().filter(...).count() > >0; >Ok(has_default) Yep, corrected it in v2! Thanks for looking at it! >>+} >>+ >> #[api( >> protected: true, >> input: { > From g.goller at proxmox.com Mon Nov 25 09:59:53 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Mon, 25 Nov 2024 09:59:53 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] reuse-datastore: avoid creating another default prune job Message-ID: <20241125085953.19828-1-g.goller@proxmox.com> If a datastore with a default prune job is removed, the prune job is preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also create a default prune job for every datastore ? this means that when reusing a datastore that previously existed, you end up with duplicate prune jobs. Reported-by: Fabian Gr?nbichler Signed-off-by: Gabriel Goller --- v2, thanks @Christian: - convert if-statement to inline condition src/api2/config/datastore.rs | 41 ++++++++++++++++++++---------------- src/api2/config/prune.rs | 11 ++++++++++ 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 37d1528c70fb..cbe67cfc6ac5 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -23,7 +23,9 @@ use pbs_datastore::chunk_store::ChunkStore; use crate::api2::admin::{ prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, }; -use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; +use crate::api2::config::prune::{ + default_prune_job_existing, delete_prune_job, do_create_prune_job, +}; use crate::api2::config::sync::delete_sync_job; use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs}; use crate::api2::config::verify::delete_verification_job; @@ -150,23 +152,26 @@ pub fn create_datastore( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let prune_job_config = config.prune_schedule.as_ref().map(|schedule| { - let mut id = format!("default-{}-{}", config.name, Uuid::generate()); - id.truncate(32); - - PruneJobConfig { - id, - store: config.name.clone(), - comment: None, - disable: false, - schedule: schedule.clone(), - options: PruneJobOptions { - keep: config.keep.clone(), - max_depth: None, - ns: None, - }, - } - }); + let mut prune_job_config = None; + if !default_prune_job_existing(&config.name)? { + prune_job_config = config.prune_schedule.as_ref().map(|schedule| { + let mut id = format!("default-{}-{}", config.name, Uuid::generate()); + id.truncate(32); + + PruneJobConfig { + id, + store: config.name.clone(), + comment: None, + disable: false, + schedule: schedule.clone(), + options: PruneJobOptions { + keep: config.keep.clone(), + max_depth: None, + ns: None, + }, + } + }); + } // clearing prune settings in the datastore config, as they are now handled by prune jobs let config = DataStoreConfig { diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs index ce7b8ce565ce..747371067225 100644 --- a/src/api2/config/prune.rs +++ b/src/api2/config/prune.rs @@ -77,6 +77,17 @@ pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { Ok(()) } +pub fn default_prune_job_existing(datastore: &str) -> Result { + let (section_config, _digest) = prune::config()?; + let has_default = section_config + .sections + .keys() + .filter(|s| s.starts_with(&format!("default-{datastore}"))) + .count() + > 0; + Ok(has_default) +} + #[api( protected: true, input: { -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 10:06:18 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 10:06:18 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: sync job: fix source group filters based on sync direction In-Reply-To: <20241122165406.676851-1-c.ebner@proxmox.com> References: <20241122165406.676851-1-c.ebner@proxmox.com> Message-ID: <64fdcd0e-7608-4fe0-8c4d-4259a460c097@proxmox.com> two issues here: if i use the add button on the top level 'Datastore' sync pane, there is no initial datastore selected, but the groups get loaded with '/api2/json/admin/datastore//groups' which returns the index for a datastore. This populates the group dropdown with records that don't contain the relevant infos, and will displayed as empty lines. imho we should only trigger the initial load only when the datastore is set also even if i select a datastore here, the groups won't get reloaded, only if i change the local namespace or the remote (!) namespace. the second issue is inline: On 11/22/24 17:54, Christian Ebner wrote: > Fix switching the source for group filters based on the sync jobs > sync direction. > > The helper to set the local namespace for the group filers was > introduced in commit 43a92c8c ("ui: group filter: allow to set > namespace for local datastore"), but never used because lost during > subsequent iterations of reworking the patch series. > > The switching is corrected by: > - correctly initializing the local store and namespace for the group > filer of sync jobs in push direction in the controller init. > - fixing an incorrect check for the sync direction in the remote > datastore selector change listener. > - conditionally switching namespace to be set for the group filter in > the remote and local namespace selector change listeners. > > Reported-by: Lukas Wagner > Signed-off-by: Christian Ebner > --- > www/window/SyncJobEdit.js | 23 +++++++++++++++++++++-- > 1 file changed, 21 insertions(+), 2 deletions(-) > > diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js > index 0e648e7b3..4f46eacc4 100644 > --- a/www/window/SyncJobEdit.js > +++ b/www/window/SyncJobEdit.js > @@ -69,6 +69,16 @@ Ext.define('PBS.window.SyncJobEdit', { > let nsSelector = view.down('pbsNamespaceSelector[name=ns]'); > nsSelector.setDatastore(value); > }, > + > + init: function() { > + let view = this.getView(); > + if (view.syncDirectionPush) { > + let localNs = view.down('pbsNamespaceSelector[name=ns]').getValue(); > + let localStore = view.down('field[name=store]').getValue(); > + view.down('pbsGroupFilter').setLocalDatastore(localStore); > + view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); > + } > + } eslint complains about a missing trailing comma here ;) > }, > > setValues: function(values) { > @@ -134,6 +144,11 @@ Ext.define('PBS.window.SyncJobEdit', { > let me = this; > let view = me.up('pbsSyncJobEdit'); > > + if (view.syncDirectionPush) { > + let localStore = view.down('field[name=store]').getValue(); > + view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); > + } > + > let remoteNs = view.down('pbsRemoteNamespaceSelector[name=remote-ns]').getValue(); > let maxDepthField = view.down('field[name=max-depth]'); > maxDepthField.setLimit(localNs, remoteNs); > @@ -268,7 +283,8 @@ Ext.define('PBS.window.SyncJobEdit', { > remoteNamespaceField.setRemote(remote); > remoteNamespaceField.setRemoteStore(value); > > - if (!me.syncDirectionPush) { > + let view = me.up('pbsSyncJobEdit'); > + if (!view.syncDirectionPush) { > me.up('tabpanel').down('pbsGroupFilter').setRemoteDatastore(remote, value); > } else { > let localStore = me.up('pbsSyncJobEdit').down('field[name=store]').getValue(); > @@ -293,7 +309,10 @@ Ext.define('PBS.window.SyncJobEdit', { > > let remote = view.down('field[name=remote]').getValue(); > let remoteStore = view.down('field[name=remote-store]').getValue(); > - me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, remoteStore, remoteNs); > + > + if (!view.syncDirectionPush) { > + me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, remoteStore, remoteNs); > + } > > let localNs = view.down('pbsNamespaceSelector[name=ns]').getValue(); > let maxDepthField = view.down('field[name=max-depth]'); From c.ebner at proxmox.com Mon Nov 25 10:17:13 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 10:17:13 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: sync job: fix source group filters based on sync direction In-Reply-To: <64fdcd0e-7608-4fe0-8c4d-4259a460c097@proxmox.com> References: <20241122165406.676851-1-c.ebner@proxmox.com> <64fdcd0e-7608-4fe0-8c4d-4259a460c097@proxmox.com> Message-ID: <3308a39e-cfa2-46d5-82cb-b454d8301051@proxmox.com> On 11/25/24 10:06, Dominik Csapak wrote: > two issues here: > > if i use the add button on the top level 'Datastore' sync pane, there is > no initial datastore selected, but the groups get loaded with '/api2/ > json/admin/datastore//groups' which returns > the index for a datastore. This populates the group dropdown with > records that don't contain > the relevant infos, and will displayed as empty lines. > > imho we should only trigger the initial load only when the datastore is set Ah yes, thanks for noticing! I did overlook that way of setting the sync job on friday. > > also even if i select a datastore here, the groups won't get reloaded, > only if i change the local namespace or the remote (!) namespace. Yes, the namespace has to be set when changing the store as well, not just the datastore. Will send a new version covering also this case. > > the second issue is inline: > > On 11/22/24 17:54, Christian Ebner wrote: >> Fix switching the source for group filters based on the sync jobs >> sync direction. >> >> The helper to set the local namespace for the group filers was >> introduced in commit 43a92c8c ("ui: group filter: allow to set >> namespace for local datastore"), but never used because lost during >> subsequent iterations of reworking the patch series. >> >> The switching is corrected by: >> - correctly initializing the local store and namespace for the group >> ?? filer of sync jobs in push direction in the controller init. >> - fixing an incorrect check for the sync direction in the remote >> ?? datastore selector change listener. >> - conditionally switching namespace to be set for the group filter in >> ?? the remote and local namespace selector change listeners. >> >> Reported-by: Lukas Wagner >> Signed-off-by: Christian Ebner >> --- >> ? www/window/SyncJobEdit.js | 23 +++++++++++++++++++++-- >> ? 1 file changed, 21 insertions(+), 2 deletions(-) >> >> diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js >> index 0e648e7b3..4f46eacc4 100644 >> --- a/www/window/SyncJobEdit.js >> +++ b/www/window/SyncJobEdit.js >> @@ -69,6 +69,16 @@ Ext.define('PBS.window.SyncJobEdit', { >> ????????? let nsSelector = view.down('pbsNamespaceSelector[name=ns]'); >> ????????? nsSelector.setDatastore(value); >> ????? }, >> + >> +??? init: function() { >> +??????? let view = this.getView(); >> +??????? if (view.syncDirectionPush) { >> +??????? let localNs = >> view.down('pbsNamespaceSelector[name=ns]').getValue(); >> +??????? let localStore = view.down('field[name=store]').getValue(); >> +??????? view.down('pbsGroupFilter').setLocalDatastore(localStore); >> +??????? view.down('pbsGroupFilter').setLocalNamespace(localStore, >> localNs); >> +??????? } >> +??? } > > eslint complains about a missing trailing comma here ;) Acked, will be fixed in the upcoming version as well. > >> ????? }, >> ????? setValues: function(values) { >> @@ -134,6 +144,11 @@ Ext.define('PBS.window.SyncJobEdit', { >> ????????????????? let me = this; >> ????????????????? let view = me.up('pbsSyncJobEdit'); >> +??????????????? if (view.syncDirectionPush) { >> +??????????????????? let localStore = >> view.down('field[name=store]').getValue(); >> + >> view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); >> +??????????????? } >> + >> ????????????????? let remoteNs = >> view.down('pbsRemoteNamespaceSelector[name=remote-ns]').getValue(); >> ????????????????? let maxDepthField = view.down('field[name=max-depth]'); >> ????????????????? maxDepthField.setLimit(localNs, remoteNs); >> @@ -268,7 +283,8 @@ Ext.define('PBS.window.SyncJobEdit', { >> ????????????????? remoteNamespaceField.setRemote(remote); >> ????????????????? remoteNamespaceField.setRemoteStore(value); >> -??????????????? if (!me.syncDirectionPush) { >> +??????????????? let view = me.up('pbsSyncJobEdit'); >> +??????????????? if (!view.syncDirectionPush) { >> >> me.up('tabpanel').down('pbsGroupFilter').setRemoteDatastore(remote, >> value); >> ????????????????? } else { >> ????????????????????? let localStore = >> me.up('pbsSyncJobEdit').down('field[name=store]').getValue(); >> @@ -293,7 +309,10 @@ Ext.define('PBS.window.SyncJobEdit', { >> ????????????????? let remote = >> view.down('field[name=remote]').getValue(); >> ????????????????? let remoteStore = view.down('field[name=remote- >> store]').getValue(); >> - >> me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, >> remoteStore, remoteNs); >> + >> +??????????????? if (!view.syncDirectionPush) { >> + >> me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, >> remoteStore, remoteNs); >> +??????????????? } >> ????????????????? let localNs = >> view.down('pbsNamespaceSelector[name=ns]').getValue(); >> ????????????????? let maxDepthField = view.down('field[name=max-depth]'); > > From c.ebner at proxmox.com Mon Nov 25 10:23:37 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 10:23:37 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: sync job: fix source group filters based on sync direction In-Reply-To: <3308a39e-cfa2-46d5-82cb-b454d8301051@proxmox.com> References: <20241122165406.676851-1-c.ebner@proxmox.com> <64fdcd0e-7608-4fe0-8c4d-4259a460c097@proxmox.com> <3308a39e-cfa2-46d5-82cb-b454d8301051@proxmox.com> Message-ID: On 11/25/24 10:17, Christian Ebner wrote: > On 11/25/24 10:06, Dominik Csapak wrote: >> two issues here: >> >> if i use the add button on the top level 'Datastore' sync pane, there >> is no initial datastore selected, but the groups get loaded with '/ >> api2/ json/admin/datastore//groups' which returns >> the index for a datastore. This populates the group dropdown with >> records that don't contain >> the relevant infos, and will displayed as empty lines. >> >> imho we should only trigger the initial load only when the datastore >> is set > > Ah yes, thanks for noticing! I did overlook that way of setting the sync > job on friday. > >> >> also even if i select a datastore here, the groups won't get reloaded, >> only if i change the local namespace or the remote (!) namespace. > > Yes, the namespace has to be set when changing the store as well, not > just the datastore. Will send a new version covering also this case. Correction, I meant the group filter datastore has to be set. > >> >> the second issue is inline: >> >> On 11/22/24 17:54, Christian Ebner wrote: >>> Fix switching the source for group filters based on the sync jobs >>> sync direction. >>> >>> The helper to set the local namespace for the group filers was >>> introduced in commit 43a92c8c ("ui: group filter: allow to set >>> namespace for local datastore"), but never used because lost during >>> subsequent iterations of reworking the patch series. >>> >>> The switching is corrected by: >>> - correctly initializing the local store and namespace for the group >>> ?? filer of sync jobs in push direction in the controller init. >>> - fixing an incorrect check for the sync direction in the remote >>> ?? datastore selector change listener. >>> - conditionally switching namespace to be set for the group filter in >>> ?? the remote and local namespace selector change listeners. >>> >>> Reported-by: Lukas Wagner >>> Signed-off-by: Christian Ebner >>> --- >>> ? www/window/SyncJobEdit.js | 23 +++++++++++++++++++++-- >>> ? 1 file changed, 21 insertions(+), 2 deletions(-) >>> >>> diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js >>> index 0e648e7b3..4f46eacc4 100644 >>> --- a/www/window/SyncJobEdit.js >>> +++ b/www/window/SyncJobEdit.js >>> @@ -69,6 +69,16 @@ Ext.define('PBS.window.SyncJobEdit', { >>> ????????? let nsSelector = view.down('pbsNamespaceSelector[name=ns]'); >>> ????????? nsSelector.setDatastore(value); >>> ????? }, >>> + >>> +??? init: function() { >>> +??????? let view = this.getView(); >>> +??????? if (view.syncDirectionPush) { >>> +??????? let localNs = >>> view.down('pbsNamespaceSelector[name=ns]').getValue(); >>> +??????? let localStore = view.down('field[name=store]').getValue(); >>> +??????? view.down('pbsGroupFilter').setLocalDatastore(localStore); >>> +??????? view.down('pbsGroupFilter').setLocalNamespace(localStore, >>> localNs); >>> +??????? } >>> +??? } >> >> eslint complains about a missing trailing comma here ;) > > Acked, will be fixed in the upcoming version as well. > >> >>> ????? }, >>> ????? setValues: function(values) { >>> @@ -134,6 +144,11 @@ Ext.define('PBS.window.SyncJobEdit', { >>> ????????????????? let me = this; >>> ????????????????? let view = me.up('pbsSyncJobEdit'); >>> +??????????????? if (view.syncDirectionPush) { >>> +??????????????????? let localStore = >>> view.down('field[name=store]').getValue(); >>> + view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); >>> +??????????????? } >>> + >>> ????????????????? let remoteNs = >>> view.down('pbsRemoteNamespaceSelector[name=remote-ns]').getValue(); >>> ????????????????? let maxDepthField = view.down('field[name=max- >>> depth]'); >>> ????????????????? maxDepthField.setLimit(localNs, remoteNs); >>> @@ -268,7 +283,8 @@ Ext.define('PBS.window.SyncJobEdit', { >>> ????????????????? remoteNamespaceField.setRemote(remote); >>> ????????????????? remoteNamespaceField.setRemoteStore(value); >>> -??????????????? if (!me.syncDirectionPush) { >>> +??????????????? let view = me.up('pbsSyncJobEdit'); >>> +??????????????? if (!view.syncDirectionPush) { >>> me.up('tabpanel').down('pbsGroupFilter').setRemoteDatastore(remote, >>> value); >>> ????????????????? } else { >>> ????????????????????? let localStore = >>> me.up('pbsSyncJobEdit').down('field[name=store]').getValue(); >>> @@ -293,7 +309,10 @@ Ext.define('PBS.window.SyncJobEdit', { >>> ????????????????? let remote = >>> view.down('field[name=remote]').getValue(); >>> ????????????????? let remoteStore = view.down('field[name=remote- >>> store]').getValue(); >>> - me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, >>> remoteStore, remoteNs); >>> + >>> +??????????????? if (!view.syncDirectionPush) { >>> + me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, >>> remoteStore, remoteNs); >>> +??????????????? } >>> ????????????????? let localNs = >>> view.down('pbsNamespaceSelector[name=ns]').getValue(); >>> ????????????????? let maxDepthField = view.down('field[name=max- >>> depth]'); >> >> > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From f.gruenbichler at proxmox.com Mon Nov 25 10:41:50 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 10:41:50 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup] server: push: log encountered empty backup groups during sync In-Reply-To: <20241122173917.728728-1-c.ebner@proxmox.com> References: <20241122173917.728728-1-c.ebner@proxmox.com> Message-ID: <1732527618.i47auxui82.astroid@yuna.none> On November 22, 2024 6:39 pm, Christian Ebner wrote: > Log also empty backup groups with no snapshots encountered during the > sync so the log output contains this additional information as well, > reducing possible confusion. > > Nevertheless, continue with the regular logic, so that pruning of > vanished snapshot is honored. > > Examplary output in the sync jobs task log: > ``` > 2024-11-22T18:32:40+01:00: Syncing datastore 'datastore', root namespace into datastore 'push-target-store', namespace 'test' > 2024-11-22T18:32:40+01:00: Found 2 groups to sync (out of 2 total) > 2024-11-22T18:32:40+01:00: skipped: 1 snapshot(s) (2024-11-22T13:40:18Z) - older than the newest snapshot present on sync target > 2024-11-22T18:32:40+01:00: Group 'vm/200' contains no snapshots to sync to remote > 2024-11-22T18:32:40+01:00: Finished syncing root namespace, current progress: 1 groups, 0 snapshots > ``` > > Reported-by: Fabian Gr?nbichler > Signed-off-by: Christian Ebner > --- > src/server/push.rs | 4 ++++ > 1 file changed, 4 insertions(+) > > diff --git a/src/server/push.rs b/src/server/push.rs > index 082a6f49d..99757a3cc 100644 > --- a/src/server/push.rs > +++ b/src/server/push.rs > @@ -664,6 +664,10 @@ pub(crate) async fn push_group( > let mut snapshots: Vec = params.source.list_backup_dirs(namespace, group).await?; > snapshots.sort_unstable_by(|a, b| a.time.cmp(&b.time)); > > + if snapshots.is_empty() { > + info!("Group '{group}' contains no snapshots to sync to remote"); > + } > + > let total_snapshots = snapshots.len(); > let cutoff = params > .transfer_last > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > From c.ebner at proxmox.com Mon Nov 25 11:10:36 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 11:10:36 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] reuse-datastore: avoid creating another default prune job In-Reply-To: <20241125085953.19828-1-g.goller@proxmox.com> References: <20241125085953.19828-1-g.goller@proxmox.com> Message-ID: On 11/25/24 09:59, Gabriel Goller wrote: > If a datastore with a default prune job is removed, the prune job is > preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also > create a default prune job for every datastore ? this means that when > reusing a datastore that previously existed, you end up with duplicate > prune jobs. Looking at this once more, I am not so sure anymore that this should only check for the default prune job? Why not check if there is any prune job configured at all for this datastore, and only if there is none create the new default prune job? From f.gruenbichler at proxmox.com Mon Nov 25 11:37:41 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 11:37:41 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup v6 0/4] fix #3786: resync corrupt chunks in sync-job In-Reply-To: <20241122121617.185615-1-g.goller@proxmox.com> References: <20241122121617.185615-1-g.goller@proxmox.com> Message-ID: <1732530754.o6m38usul0.astroid@yuna.none> with some slight rebasing (context), and one follow-up patch: Subject: [PATCH proxmox-backup] sync config: forbid setting resync_corrupt for they don't support it (yet), so don't allow setting it in the backend either. Signed-off-by: Fabian Gr?nbichler --- src/api2/config/sync.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index 7ff6cae02..afaa0d5e4 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -229,6 +229,10 @@ pub fn create_sync_job( bail!("source and target datastore can't be the same"); } + if sync_direction == SyncDirection::Push && config.resync_corrupt.is_some() { + bail!("push jobs do not support resync-corrupt option"); + } + if let Some(max_depth) = config.max_depth { if let Some(ref ns) = config.ns { ns.check_max_depth(max_depth)?; @@ -389,6 +393,10 @@ pub fn update_sync_job( http_bail!(NOT_FOUND, "job '{id}' does not exist.") }; + if sync_direction == SyncDirection::Push && update.resync_corrupt.is_some() { + bail!("push jobs do not support resync-corrupt option"); + } + if let Some(delete) = delete { for delete_prop in delete { match delete_prop { -- 2.39.5 there's some more follow-up potential: - if we successfully re-synced a corrupt snapshot, it would be great to set the verifystate accordingly, since we know the snapshot is good now? - if we re-sync the last snapshot during regular sync, we reset the verifystate from the remote side.. should we maybe compare the manifests after dropping the verifystate from them? but the whole question of "how to handle verifystate and other metadata when syncing" is probably a bigger can worms anyway, so feel free to ignore that one for now ;) On November 22, 2024 1:16 pm, Gabriel Goller wrote: > Add an option `resync-corrupt` that resyncs corrupt snapshots when running > sync-job. This option checks if the local snapshot failed the last > verification and if it did, overwrites the local snapshot with the > remote one. > > This is quite useful, as we currently don't have an option to "fix" > broken chunks/snapshots in any way, even if a healthy version is on > another (e.g. offsite) instance. > > Important things to note are also: this has a slight performance > penalty, as all the manifests have to be looked through, and a > verification job has to be run beforehand, otherwise we do not know > if the snapshot is healthy. > > Note: This series was originally written by Shannon! I just picked it > up, rebased, and fixed the obvious comments on the last series. > > Changelog v6 (thanks @Fabian): > - rebase > - only allow resync-chunks option on pull snapshots > - fix typo > > Changelog v5 (thanks @Fabian): > - rebase > - don't remove parsing error in verify_state helper > - add error logs on failures > > Changelog v4 (thanks @Fabian): > - make verify_state bubble up errors > - call verify_state helper everywhere we need the verify_state > - resync broken manifests (so resync when load_manifest fails) > > Changelog v3 (thanks @Fabian): > - filter out snapshots earlier in the pull_group function > - move verify_state to BackupManifest and fixed invocations > - reverted verify_state Option -> Result state (It doesn't matter if we get an > error, we get that quite often f.e. in new backups) > - removed some unnecessary log lines > - removed some unnecessary imports and modifications > - rebase to current master > > Changelog v2 (thanks @Thomas): > - order git trailers > - adjusted schema description to include broken indexes > - change verify_state to return a Result<_,_> > - print error if verify_state is not able to read the state > - update docs on pull_snapshot function > - simplify logic by combining flags > - move log line out of loop to only print once that we resync the snapshot > > Changelog since RFC (Shannon's work): > - rename option from deep-sync to resync-corrupt > - rebase on latest master (and change implementation details, as a > lot has changed around sync-jobs) > > proxmox-backup: > > Gabriel Goller (4): > snapshot: add helper function to retrieve verify_state > fix #3786: api: add resync-corrupt option to sync jobs > fix #3786: ui/cli: add resync-corrupt option on sync-jobs > fix #3786: docs: add resync-corrupt option to sync-job > > docs/managing-remotes.rst | 6 +++ > pbs-api-types/src/jobs.rs | 10 +++++ > pbs-datastore/src/backup_info.rs | 9 +++- > pbs-datastore/src/manifest.rs | 14 +++++- > src/api2/admin/datastore.rs | 16 +++---- > src/api2/backup/mod.rs | 18 +++++--- > src/api2/config/sync.rs | 4 ++ > src/api2/pull.rs | 9 +++- > src/backup/verify.rs | 13 +++--- > src/bin/proxmox-backup-manager.rs | 16 ++++++- > src/server/pull.rs | 72 ++++++++++++++++++++++++------- > www/window/SyncJobEdit.js | 14 ++++++ > 12 files changed, 158 insertions(+), 43 deletions(-) > > > Summary over all repositories: > 12 files changed, 158 insertions(+), 43 deletions(-) > > -- > Generated by git-murpp 0.7.1 > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Mon Nov 25 11:56:13 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 11:56:13 +0100 Subject: [pbs-devel] applied-series: [PATCH v3 proxmox-backup 1/2] docs: add security implications of prune and change detection mode In-Reply-To: <20241114151551.407971-1-c.ebner@proxmox.com> References: <20241114151551.407971-1-c.ebner@proxmox.com> Message-ID: <1732532163.ggb994v280.astroid@yuna.none> thanks! On November 14, 2024 4:15 pm, Christian Ebner wrote: > Users should be made aware that the data stored in chunks outlives > the backup snapshots on pruning and that backups created using the > change-detection-mode set to metadata might reference chunks > containing files which have vanished since the previous backup, but > might still be accessible when access to the chunks raw data is > possible (client or server side). > > Reviewed-by: Gabriel Goller > Signed-off-by: Christian Ebner > --- > changes since version 2: > - s/Further/Moreover/ for second sentence starting with Further > - fix formatting for metadata by using double backticks > - Improve text flow based on suggestions > > docs/maintenance.rst | 30 ++++++++++++++++++++++++++++-- > 1 file changed, 28 insertions(+), 2 deletions(-) > > diff --git a/docs/maintenance.rst b/docs/maintenance.rst > index 4bb135e4e..601756246 100644 > --- a/docs/maintenance.rst > +++ b/docs/maintenance.rst > @@ -6,8 +6,34 @@ Maintenance Tasks > Pruning > ------- > > -Prune lets you specify which backup snapshots you want to keep. > -The following retention options are available: > +Prune lets you specify which backup snapshots you want to keep, removing others. > +When pruning a snapshot, only the snapshot metadata (manifest, indices, blobs, > +log and notes) is removed. The chunks containing the actual backup data and > +previously referenced by the pruned snapshot, have to be removed by a garbage > +collection run. > + > +.. Caution:: Take into consideration that sensitive information stored in a > + given data chunk will outlive pruned snapshots and remain present in the > + datastore as long as referenced by at least one backup snapshot. Further, > + *even* if no snapshot references a given chunk, it will remain present until > + removed by the garbage collection. > + > + Moreover, file-level backups created using the change detection mode > + ``metadata`` can reference backup chunks containing files which have vanished > + since the previous backup. These files might still be accessible by reading > + the chunks raw data (client or server side). > + > + To remove chunks containing sensitive data, prune any snapshot made while the > + data was part of the backup input and run a garbage collection. Further, if > + using file-based backups with change detection mode ``metadata``, > + additionally prune all snapshots since the sensitive data was no longer part > + of the backup input and run a garbage collection. > + > + The no longer referenced chunks will then be marked for deletion on the next > + garbage collection run and removed by a subsequent run after the grace > + period. > + > +The following retention options are available for pruning: > > ``keep-last `` > Keep the last ```` backup snapshots. > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Mon Nov 25 12:03:23 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 12:03:23 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup] ui: sync job: fix source group filters based on sync direction Message-ID: <20241125110323.169074-1-c.ebner@proxmox.com> Fix switching the source for group filters based on the sync jobs sync direction. The helper to set the local namespace for the group filers was introduced in commit 43a92c8c ("ui: group filter: allow to set namespace for local datastore"), but never used because lost during subsequent iterations of reworking the patch series. The switching is corrected by: - correctly initializing the local store and namespace for the group filer of sync jobs in push direction in the controller init, if a datastore is set. - fixing an incorrect check for the sync direction in the remote datastore selector change listener. - conditionally switching namespace to be set for the group filter in the remote and local namespace selector change listeners. - conditionally switching datastore to be set for the group filter in the local datastore selector change listener. Reported-by: Lukas Wagner Signed-off-by: Christian Ebner --- changes since version 1 (thanks @Dominik for catching the issues): - only init group filters if there is a datastore given - also switch group filters datastore when switching local datastore - removed unneeded setLocalDatastore, as setLocalNamespace sets datastore and namespace - fixed eslint issues - updated commit message www/window/SyncJobEdit.js | 31 +++++++++++++++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js index 0e648e7b3..2870f74fa 100644 --- a/www/window/SyncJobEdit.js +++ b/www/window/SyncJobEdit.js @@ -69,6 +69,14 @@ Ext.define('PBS.window.SyncJobEdit', { let nsSelector = view.down('pbsNamespaceSelector[name=ns]'); nsSelector.setDatastore(value); }, + + init: function() { + let view = this.getView(); + if (view.syncDirectionPush && view.datastore !== undefined) { + let localNs = view.down('pbsNamespaceSelector[name=ns]').getValue(); + view.down('pbsGroupFilter').setLocalNamespace(view.datastore, localNs); + } + }, }, setValues: function(values) { @@ -121,6 +129,16 @@ Ext.define('PBS.window.SyncJobEdit', { xtype: 'pbsDataStoreSelector', allowBlank: false, }, + listeners: { + change: function(field, localStore) { + let me = this; + let view = me.up('pbsSyncJobEdit'); + if (view.syncDirectionPush) { + let localNs = view.down('pbsNamespaceSelector[name=ns]').getValue(); + view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); + } + }, + }, }, { xtype: 'pbsNamespaceSelector', @@ -134,6 +152,11 @@ Ext.define('PBS.window.SyncJobEdit', { let me = this; let view = me.up('pbsSyncJobEdit'); + if (view.syncDirectionPush) { + let localStore = view.down('field[name=store]').getValue(); + view.down('pbsGroupFilter').setLocalNamespace(localStore, localNs); + } + let remoteNs = view.down('pbsRemoteNamespaceSelector[name=remote-ns]').getValue(); let maxDepthField = view.down('field[name=max-depth]'); maxDepthField.setLimit(localNs, remoteNs); @@ -268,7 +291,8 @@ Ext.define('PBS.window.SyncJobEdit', { remoteNamespaceField.setRemote(remote); remoteNamespaceField.setRemoteStore(value); - if (!me.syncDirectionPush) { + let view = me.up('pbsSyncJobEdit'); + if (!view.syncDirectionPush) { me.up('tabpanel').down('pbsGroupFilter').setRemoteDatastore(remote, value); } else { let localStore = me.up('pbsSyncJobEdit').down('field[name=store]').getValue(); @@ -293,7 +317,10 @@ Ext.define('PBS.window.SyncJobEdit', { let remote = view.down('field[name=remote]').getValue(); let remoteStore = view.down('field[name=remote-store]').getValue(); - me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, remoteStore, remoteNs); + + if (!view.syncDirectionPush) { + me.up('tabpanel').down('pbsGroupFilter').setRemoteNamespace(remote, remoteStore, remoteNs); + } let localNs = view.down('pbsNamespaceSelector[name=ns]').getValue(); let maxDepthField = view.down('field[name=max-depth]'); -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 12:15:32 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 12:15:32 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/6] api: admin: sync: add direction to sync job status In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com> References: <20241125111537.1504618-1-d.csapak@proxmox.com> Message-ID: <20241125111537.1504618-2-d.csapak@proxmox.com> Signed-off-by: Dominik Csapak --- pbs-api-types/src/jobs.rs | 6 ++++++ src/api2/admin/sync.rs | 1 + 2 files changed, 7 insertions(+) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 52520811b..e18197fb1 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -660,6 +660,9 @@ impl SyncJobConfig { status: { type: JobScheduleStatus, }, + direction: { + type: SyncDirection, + }, }, )] #[derive(Serialize, Deserialize, Clone, PartialEq)] @@ -670,6 +673,9 @@ pub struct SyncJobStatus { pub config: SyncJobConfig, #[serde(flatten)] pub status: JobScheduleStatus, + + /// The direction of the job + pub direction: SyncDirection, } /// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index 3a41aa2c7..479f1a958 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -84,6 +84,7 @@ pub fn list_config_sync_jobs( list.push(SyncJobStatus { config: job, status, + direction: sync_direction, }); } -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 12:15:36 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 12:15:36 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 5/6] ui: sync jobs: change default sorting to 'store' -> 'direction' -> 'id' In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com> References: <20241125111537.1504618-1-d.csapak@proxmox.com> Message-ID: <20241125111537.1504618-6-d.csapak@proxmox.com> instead of just the id, which makes the list in the global datastore view a bit more easier to digest (since it's now sorted by store first) Signed-off-by: Dominik Csapak --- www/config/SyncView.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/www/config/SyncView.js b/www/config/SyncView.js index 7f68bf7cc..3471100b6 100644 --- a/www/config/SyncView.js +++ b/www/config/SyncView.js @@ -140,7 +140,7 @@ Ext.define('PBS.config.SyncJobView', { type: 'diff', autoDestroy: true, autoDestroyRstore: true, - sorters: 'id', + sorters: ['store', 'direction', 'id'], rstore: { type: 'update', storeid: 'pbs-sync-jobs-status', -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 12:15:33 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 12:15:33 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/6] api: admin: sync: add optional 'all' sync type for listing In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com> References: <20241125111537.1504618-1-d.csapak@proxmox.com> Message-ID: <20241125111537.1504618-3-d.csapak@proxmox.com> so that one can list all sync jobs, both pull and push, at the same time. To not confuse existing clients that only know of pull syncs, show only them by default and make the 'all' parameter opt-in. (But add a todo for 4.x to change that) Signed-off-by: Dominik Csapak --- src/api2/admin/sync.rs | 66 ++++++++++++++++++++-------- src/api2/config/datastore.rs | 9 ++-- src/api2/config/notifications/mod.rs | 2 +- 3 files changed, 54 insertions(+), 23 deletions(-) diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index 479f1a958..2b8fce484 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -1,7 +1,7 @@ //! Datastore Synchronization Job Management use anyhow::{bail, format_err, Error}; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; use serde_json::Value; use proxmox_router::{ @@ -23,6 +23,30 @@ use crate::{ server::sync::do_sync_job, }; +// FIXME: 4.x make 'all' the default +#[api()] +#[derive(Copy, Clone, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "kebab-case")] +/// The direction of the listed sync jobs: push, pull or all. +pub enum ListSyncDirection { + /// All directions + All, + /// Sync direction push + Push, + /// Sync direction pull + #[default] + Pull, +} + +impl From for ListSyncDirection { + fn from(value: SyncDirection) -> Self { + match value { + SyncDirection::Pull => ListSyncDirection::Pull, + SyncDirection::Push => ListSyncDirection::Push, + } + } +} + #[api( input: { properties: { @@ -31,7 +55,7 @@ use crate::{ optional: true, }, "sync-direction": { - type: SyncDirection, + type: ListSyncDirection, optional: true, }, }, @@ -49,7 +73,7 @@ use crate::{ /// List all configured sync jobs pub fn list_config_sync_jobs( store: Option, - sync_direction: Option, + sync_direction: Option, _param: Value, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { @@ -59,23 +83,27 @@ pub fn list_config_sync_jobs( let (config, digest) = sync::config()?; let sync_direction = sync_direction.unwrap_or_default(); - let job_config_iter = config - .convert_to_typed_array(sync_direction.as_config_type_str())? - .into_iter() - .filter(|job: &SyncJobConfig| { - if let Some(store) = &store { - &job.store == store - } else { - true - } - }) - .filter(|job: &SyncJobConfig| { - check_sync_job_read_access(&user_info, &auth_id, job, sync_direction) - }); - let mut list = Vec::new(); + let mut list = Vec::with_capacity(config.sections.len()); + for (_, (sync_type, job)) in config.sections.into_iter() { + let job: SyncJobConfig = serde_json::from_value(job)?; + let direction = SyncDirection::from_config_type_str(&sync_type)?; + + match &store { + Some(store) if &job.store != store => continue, + _ => {} + } + + match &sync_direction { + ListSyncDirection::Pull if direction != SyncDirection::Pull => continue, + ListSyncDirection::Push if direction != SyncDirection::Push => continue, + _ => {} + } + + if !check_sync_job_read_access(&user_info, &auth_id, &job, direction) { + continue; + } - for job in job_config_iter { let last_state = JobState::load("syncjob", &job.id) .map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?; @@ -84,7 +112,7 @@ pub fn list_config_sync_jobs( list.push(SyncJobStatus { config: job, status, - direction: sync_direction, + direction, }); } diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 37d1528c7..8c307a233 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -526,9 +526,12 @@ pub async fn delete_datastore( delete_verification_job(job.config.id, None, rpcenv)? } for direction in [SyncDirection::Pull, SyncDirection::Push] { - for job in - list_config_sync_jobs(Some(name.clone()), Some(direction), Value::Null, rpcenv)? - { + for job in list_config_sync_jobs( + Some(name.clone()), + Some(direction.into()), + Value::Null, + rpcenv, + )? { delete_sync_job(job.config.id, None, rpcenv)? } } diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs index f156c8cfd..2081b7b75 100644 --- a/src/api2/config/notifications/mod.rs +++ b/src/api2/config/notifications/mod.rs @@ -155,7 +155,7 @@ pub fn get_values( } for direction in [SyncDirection::Pull, SyncDirection::Push] { - let sync_jobs = list_config_sync_jobs(None, Some(direction), param.clone(), rpcenv)?; + let sync_jobs = list_config_sync_jobs(None, Some(direction.into()), param.clone(), rpcenv)?; for job in sync_jobs { values.push(MatchableValue { field: "job-id".into(), -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 12:15:31 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 12:15:31 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/6] sync job ui improvements Message-ID: <20241125111537.1504618-1-d.csapak@proxmox.com> this series aims to improve the pull/push sync job ui a bit, by: * unifying both types into one list * adding a helpful tooltip for local owner/user * adding a filter for the sync jobs * adding a 'all' mode for listing all jobs on the /admin/sync api Dominik Csapak (6): api: admin: sync: add direction to sync job status api: admin: sync: add optional 'all' sync type for listing cli: manager: sync: add 'sync-direction' parameter to list ui: sync jobs: revert to single list for pull/push jobs ui: sync jobs: change default sorting to 'store' -> 'direction' -> 'id' ui: sync jobs: add search box pbs-api-types/src/jobs.rs | 6 ++ src/api2/admin/sync.rs | 65 ++++++++---- src/api2/config/datastore.rs | 9 +- src/api2/config/notifications/mod.rs | 2 +- src/bin/proxmox_backup_manager/sync.rs | 6 +- www/Makefile | 1 - www/config/SyncPullPushView.js | 61 ----------- www/config/SyncView.js | 134 ++++++++++++++++++++----- www/datastore/DataStoreList.js | 2 +- www/datastore/Panel.js | 2 +- 10 files changed, 178 insertions(+), 110 deletions(-) delete mode 100644 www/config/SyncPullPushView.js -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 12:15:34 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 12:15:34 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/6] cli: manager: sync: add 'sync-direction' parameter to list In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com> References: <20241125111537.1504618-1-d.csapak@proxmox.com> Message-ID: <20241125111537.1504618-4-d.csapak@proxmox.com> so one can list pull and push jobs Signed-off-by: Dominik Csapak --- not really happy with this, ideally we would also allow 'all' here, but then we'd have to show the type in the return value too, which is not really possible with the 'SyncJobConfig' type (as that get's deserialized) where the direction is part of the config section type alternatively we could here switch to the '/admin/sync' api from the '/config/sync' api which does contain the sync-direction (and also the status). then it'd be easy to show all types src/bin/proxmox_backup_manager/sync.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/bin/proxmox_backup_manager/sync.rs b/src/bin/proxmox_backup_manager/sync.rs index 005cce6f3..b08bfb58b 100644 --- a/src/bin/proxmox_backup_manager/sync.rs +++ b/src/bin/proxmox_backup_manager/sync.rs @@ -4,7 +4,7 @@ use serde_json::Value; use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; use proxmox_schema::api; -use pbs_api_types::JOB_ID_SCHEMA; +use pbs_api_types::{SyncDirection, JOB_ID_SCHEMA}; use proxmox_backup::api2; @@ -20,6 +20,10 @@ fn render_group_filter(value: &Value, _record: &Value) -> Result #[api( input: { properties: { + "sync-direction": { + type: SyncDirection, + optional: true, + }, "output-format": { schema: OUTPUT_FORMAT, optional: true, -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 12:15:37 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 12:15:37 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 6/6] ui: sync jobs: add search box In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com> References: <20241125111537.1504618-1-d.csapak@proxmox.com> Message-ID: <20241125111537.1504618-7-d.csapak@proxmox.com> filter by (remote) store, remote, id, owner, direction. Local store is only included on the globabl view not the datastore specific one. Signed-off-by: Dominik Csapak --- www/config/SyncView.js | 62 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/www/config/SyncView.js b/www/config/SyncView.js index 3471100b6..ca1f7ecd6 100644 --- a/www/config/SyncView.js +++ b/www/config/SyncView.js @@ -31,6 +31,40 @@ Ext.define('PBS.config.SyncJobView', { controller: { xclass: 'Ext.app.ViewController', + search: function(tf, value) { + let me = this; + let view = me.getView(); + let store = view.getStore(); + if (!value && value !== 0) { + store.clearFilter(); + tf.triggers.clear.setVisible(false); + return; + } + tf.triggers.clear.setVisible(true); + if (value.length < 2) return; + + store.clearFilter(); + + let fieldsToSearch = ['direction', 'id', 'remote', 'remote-store', 'owner']; + if (!view.datastore) { + fieldsToSearch.push('store'); + } + value = value.toLowerCase(); + + + store.addFilter(function(rec) { + let found = false; + for (const field of fieldsToSearch) { + let recValue = rec.data[field] ?? ''; + if (recValue.toString().toLowerCase().indexOf(value) !== -1) { + found = true; + break; + } + } + return found; + }); + }, + addPullSyncJob: function() { this.addSyncJob('pull'); }, @@ -197,6 +231,34 @@ Ext.define('PBS.config.SyncJobView', { handler: 'runSyncJob', disabled: true, }, + '->', + { + xtype: 'tbtext', + html: gettext('Search'), + }, + { + xtype: 'textfield', + reference: 'searchbox', + emptyText: gettext('(remote) store, remote, id, owner, direction'), + minWidth: 300, + triggers: { + clear: { + cls: 'pmx-clear-trigger', + weight: -1, + hidden: true, + handler: function() { + this.triggers.clear.setVisible(false); + this.setValue(''); + }, + }, + }, + listeners: { + change: { + fn: 'search', + buffer: 500, + }, + }, + }, ], viewConfig: { -- 2.39.5 From d.csapak at proxmox.com Mon Nov 25 12:15:35 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 12:15:35 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 4/6] ui: sync jobs: revert to single list for pull/push jobs In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com> References: <20241125111537.1504618-1-d.csapak@proxmox.com> Message-ID: <20241125111537.1504618-5-d.csapak@proxmox.com> but add a separate column for the direction so one still sees the separate jobs. change the 'local owner/user' to a single column, but add a tooltip in the header to explain when it does what. This makes the 'SyncJobsPullPushView' unnecessary, so delete it. Signed-off-by: Dominik Csapak --- not really sure about the tooltip helptext for the owner/user, but did not come up with something better for now... www/Makefile | 1 - www/config/SyncPullPushView.js | 61 ----------------------------- www/config/SyncView.js | 70 +++++++++++++++++++++++----------- www/datastore/DataStoreList.js | 2 +- www/datastore/Panel.js | 2 +- 5 files changed, 50 insertions(+), 86 deletions(-) delete mode 100644 www/config/SyncPullPushView.js diff --git a/www/Makefile b/www/Makefile index d35e81283..609a0ba67 100644 --- a/www/Makefile +++ b/www/Makefile @@ -61,7 +61,6 @@ JSSRC= \ config/TrafficControlView.js \ config/ACLView.js \ config/SyncView.js \ - config/SyncPullPushView.js \ config/VerifyView.js \ config/PruneView.js \ config/GCView.js \ diff --git a/www/config/SyncPullPushView.js b/www/config/SyncPullPushView.js deleted file mode 100644 index 3460bc662..000000000 --- a/www/config/SyncPullPushView.js +++ /dev/null @@ -1,61 +0,0 @@ -Ext.define('PBS.config.SyncPullPush', { - extend: 'Ext.panel.Panel', - alias: 'widget.pbsSyncJobPullPushView', - title: gettext('Sync Jobs'), - - mixins: ['Proxmox.Mixin.CBind'], - - layout: { - type: 'vbox', - align: 'stretch', - multi: true, - bodyPadding: 5, - }, - defaults: { - collapsible: false, - margin: 5, - }, - scrollable: true, - items: [ - { - xtype: 'pbsSyncJobView', - itemId: 'syncJobsPull', - syncDirection: 'pull', - cbind: { - datastore: '{datastore}', - }, - minHeight: 125, // shows at least one line of content - }, - { - xtype: 'splitter', - performCollapse: false, - }, - { - xtype: 'pbsSyncJobView', - itemId: 'syncJobsPush', - syncDirection: 'push', - cbind: { - datastore: '{datastore}', - }, - flex: 1, - minHeight: 125, // shows at least one line of content - }, - ], - initComponent: function() { - let me = this; - - let subPanelIds = me.items.map(el => el.itemId).filter(id => !!id); - - me.callParent(); - - for (const itemId of subPanelIds) { - let component = me.getComponent(itemId); - component.relayEvents(me, ['activate', 'deactivate', 'destroy']); - } - }, - - cbindData: function(initialConfig) { - let me = this; - me.datastore = initialConfig.datastore ? initialConfig.datastore : undefined; - }, -}); diff --git a/www/config/SyncView.js b/www/config/SyncView.js index c8b2181c4..7f68bf7cc 100644 --- a/www/config/SyncView.js +++ b/www/config/SyncView.js @@ -26,26 +26,25 @@ Ext.define('PBS.config.SyncJobView', { stateful: true, stateId: 'grid-sync-jobs-v1', - title: gettext('Sync Jobs - Pull Direction'), - ownerHeader: gettext('Owner'), - - cbindData: function(initialConfig) { - let me = this; - if (me.syncDirection === 'push') { - me.title = gettext('Sync Jobs - Push Direction'); - me.ownerHeader = gettext('Local User'); - } - }, + title: gettext('Sync Jobs'), controller: { xclass: 'Ext.app.ViewController', - addSyncJob: function() { + addPullSyncJob: function() { + this.addSyncJob('pull'); + }, + + addPushSyncJob: function() { + this.addSyncJob('push'); + }, + + addSyncJob: function(syncDirection) { let me = this; let view = me.getView(); Ext.create('PBS.window.SyncJobEdit', { datastore: view.datastore, - syncDirection: view.syncDirection, + syncDirection, listeners: { destroy: function() { me.reload(); @@ -63,7 +62,7 @@ Ext.define('PBS.config.SyncJobView', { Ext.create('PBS.window.SyncJobEdit', { datastore: view.datastore, id: selection[0].data.id, - syncDirection: view.syncDirection, + syncDirection: selection[0].data.direction, listeners: { destroy: function() { me.reload(); @@ -125,9 +124,7 @@ Ext.define('PBS.config.SyncJobView', { if (view.datastore !== undefined) { params.store = view.datastore; } - if (view.syncDirection !== undefined) { - params["sync-direction"] = view.syncDirection; - } + params['sync-direction'] = 'all'; view.getStore().rstore.getProxy().setExtraParams(params); Proxmox.Utils.monStoreErrors(view, view.getStore().rstore); }, @@ -158,10 +155,21 @@ Ext.define('PBS.config.SyncJobView', { tbar: [ { - xtype: 'proxmoxButton', text: gettext('Add'), - handler: 'addSyncJob', - selModel: false, + menu: [ + { + text: gettext('Add Pull Sync Job'), + iconCls: "fa fa-fw fa-download", + handler: 'addPullSyncJob', + selModel: false, + }, + { + text: gettext('Add Push Sync Job'), + iconCls: "fa fa-fw fa-upload", + handler: 'addPushSyncJob', + selModel: false, + }, + ], }, { xtype: 'proxmoxButton', @@ -205,6 +213,23 @@ Ext.define('PBS.config.SyncJobView', { flex: 1, sortable: true, }, + { + header: gettext('Direction'), + dataIndex: 'direction', + renderer: function(value) { + let iconCls, text; + if (value === 'pull') { + iconCls = 'download'; + text = gettext('Pull'); + } else { + iconCls = 'upload'; + text = gettext('Push'); + } + return ` ${text}`; + }, + width: 100, + sortable: true, + }, { header: gettext('Local Store'), dataIndex: 'store', @@ -245,9 +270,10 @@ Ext.define('PBS.config.SyncJobView', { sortable: true, }, { - cbind: { - header: '{ownerHeader}', - }, + header: `${gettext('Local Owner/User')} + ${gettext("Push: The local user used for access control.")} + ">`, dataIndex: 'owner', renderer: 'render_optional_owner', flex: 2, diff --git a/www/datastore/DataStoreList.js b/www/datastore/DataStoreList.js index 22ef18540..fc68cfc10 100644 --- a/www/datastore/DataStoreList.js +++ b/www/datastore/DataStoreList.js @@ -239,7 +239,7 @@ Ext.define('PBS.datastore.DataStores', { { iconCls: 'fa fa-refresh', itemId: 'syncjobs', - xtype: 'pbsSyncJobPullPushView', + xtype: 'pbsSyncJobView', }, { iconCls: 'fa fa-check-circle', diff --git a/www/datastore/Panel.js b/www/datastore/Panel.js index e1da7cfac..ad9fc10fe 100644 --- a/www/datastore/Panel.js +++ b/www/datastore/Panel.js @@ -68,7 +68,7 @@ Ext.define('PBS.DataStorePanel', { { iconCls: 'fa fa-refresh', itemId: 'syncjobs', - xtype: 'pbsSyncJobPullPushView', + xtype: 'pbsSyncJobView', cbind: { datastore: '{datastore}', }, -- 2.39.5 From f.gruenbichler at proxmox.com Mon Nov 25 12:29:21 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 12:29:21 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 0/4] fix #2996: client: allow optional match patterns for restore In-Reply-To: <20241112104316.206282-1-c.ebner@proxmox.com> References: <20241112104316.206282-1-c.ebner@proxmox.com> Message-ID: <1732534011.hf3x11ngo1.astroid@yuna.none> Consider this Reviewed-by: Fabian Gr?nbichler Tested-by: Fabian Gr?nbichler but I'd like to add the following on-top, unless you object: Subject: [PATCH proxmox-backup] api types: replace PathPatterns with Vec PathPatterns is hard to distinguish from PathPattern, so would need to be renamed anyway.. but there isn't really a reason to define a separate API type just for this. Signed-off-by: Fabian Gr?nbichler --- pbs-api-types/src/pathpatterns.rs | 27 +-------------------------- proxmox-backup-client/src/main.rs | 8 ++++++-- pxar-bin/src/main.rs | 9 +++++++-- 3 files changed, 14 insertions(+), 30 deletions(-) diff --git a/pbs-api-types/src/pathpatterns.rs b/pbs-api-types/src/pathpatterns.rs index c40926a44..505ecc8aa 100644 --- a/pbs-api-types/src/pathpatterns.rs +++ b/pbs-api-types/src/pathpatterns.rs @@ -1,4 +1,4 @@ -use proxmox_schema::{const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema}; +use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema}; use serde::{Deserialize, Serialize}; @@ -13,12 +13,6 @@ pub const PATH_PATTERN_SCHEMA: Schema = .format(&PATH_PATTERN_FORMAT) .schema(); -pub const PATH_PATTERN_LIST_SCHEMA: Schema = ArraySchema::new( - "List of paths or match patterns for matching filenames.", - &PATH_PATTERN_SCHEMA, -) -.schema(); - #[derive(Default, Deserialize, Serialize)] /// Path or path pattern for filename matching pub struct PathPattern { @@ -34,22 +28,3 @@ impl AsRef<[u8]> for PathPattern { self.pattern.as_bytes() } } - -#[derive(Default, Deserialize, Serialize)] -/// Array of paths and/or path patterns for filename matching -pub struct PathPatterns { - patterns: Vec, -} - -impl ApiType for PathPatterns { - const API_SCHEMA: Schema = PATH_PATTERN_LIST_SCHEMA; -} - -impl IntoIterator for PathPatterns { - type Item = PathPattern; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.patterns.into_iter() - } -} diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs index cfeed77d7..89f91e2b5 100644 --- a/proxmox-backup-client/src/main.rs +++ b/proxmox-backup-client/src/main.rs @@ -26,7 +26,7 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pbs_api_types::{ ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart, - BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PathPatterns, + BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PathPattern, PruneJobOptions, PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, @@ -1407,7 +1407,11 @@ We do not extract '.pxar' archives when writing to standard output. flatten: true, }, pattern: { - type: PathPatterns, + type: Array, + items: { + type: PathPattern, + }, + description: "Path or match pattern to limit files that get restored.", optional: true, }, "allow-existing-dirs": { diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs index eb3580d92..7dff1e38c 100644 --- a/pxar-bin/src/main.rs +++ b/pxar-bin/src/main.rs @@ -13,7 +13,8 @@ use serde_json::Value; use tokio::signal::unix::{signal, SignalKind}; use pathpatterns::{MatchEntry, MatchType, PatternFlag}; -use pbs_api_types::PathPatterns; + +use pbs_api_types::PathPattern; use pbs_client::pxar::tools::format_single_line_entry; use pbs_client::pxar::{ Flags, OverwriteFlags, PxarExtractOptions, PxarWriters, ENCODER_MAX_ENTRIES, @@ -55,8 +56,12 @@ fn extract_archive_from_reader( description: "Archive name.", }, pattern: { - type: PathPatterns, + type: Array, + items: { + type: PathPattern, + }, optional: true, + description: "Path or match pattern to limit files that get restored.", }, target: { description: "Target directory", -- 2.39.5 On November 12, 2024 11:43 am, Christian Ebner wrote: > This patches implement the api types to allow input validation for > pathpatterns and reuse them in the pxar-bin, the catalog shell as > well as the newly exposed optional restore patterns to the backup > clients restore command. > > Patterns are parsed and passed along to the preexisting restore > logic via the `PxarExtractOptions`. > > To correctly work also with split pxar archives, this patches depend > on the following patch being applied to the pxar repo first: > https://lore.proxmox.com/pbs-devel/20240918150047.485551-1-c.ebner at proxmox.com/ > > changes since version 5: > - rebased onto current master > > changes since version 4: > - rebased onto current master > - fixed passing patterns via cli for pxar extract > > changes since version 3: > - s/matches/patterns for bail message, thanks for testing and > catching this Gabriel! > > changes since version 2: > - added API types as suggested > - reuse same API types for proxmox-backup-client catalog shell and > restore as well as the pxar extract > - use simple reference instead of `as_slice()` when passing vector of > patterns > > Link to bugtracker issue: > https://bugzilla.proxmox.com/show_bug.cgi?id=2996 > > Christian Ebner (4): > api-types: implement dedicated api type for match patterns > pxar: bin: use dedicated api type for restore pattern > client: catalog shell: use dedicated api type for patterns > fix #2996: client: allow optional match patterns for restore > > pbs-api-types/src/lib.rs | 3 ++ > pbs-api-types/src/pathpatterns.rs | 55 +++++++++++++++++++++++++++++++ > pbs-client/src/catalog_shell.rs | 7 ++-- > proxmox-backup-client/src/main.rs | 29 +++++++++++++--- > pxar-bin/Cargo.toml | 1 + > pxar-bin/src/main.rs | 26 +++++++-------- > 6 files changed, 99 insertions(+), 22 deletions(-) > create mode 100644 pbs-api-types/src/pathpatterns.rs > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Mon Nov 25 12:30:15 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 12:30:15 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/6] sync job ui improvements In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com> References: <20241125111537.1504618-1-d.csapak@proxmox.com> Message-ID: <377618fd-0ea9-46ba-9aec-a47387eca50d@proxmox.com> On 11/25/24 12:15, Dominik Csapak wrote: > this series aims to improve the pull/push sync job ui a bit, by: > > * unifying both types into one list Not sure if you are aware of this, but we decided early on [0] to explicitly separate the list for both directions to reduce possible misconfiguration for the user. That is also why the sync jobs in push direction have their dedicated config type instead of having the direction as property of the sync job. [0] https://lore.proxmox.com/pbs-devel/5be4c3d1-593f-4eec-b21b-33cb3afc9216 at proxmox.com/ > * adding a helpful tooltip for local owner/user > * adding a filter for the sync jobs > * adding a 'all' mode for listing all jobs on the /admin/sync api > > Dominik Csapak (6): > api: admin: sync: add direction to sync job status > api: admin: sync: add optional 'all' sync type for listing > cli: manager: sync: add 'sync-direction' parameter to list > ui: sync jobs: revert to single list for pull/push jobs > ui: sync jobs: change default sorting to 'store' -> 'direction' -> > 'id' > ui: sync jobs: add search box > > pbs-api-types/src/jobs.rs | 6 ++ > src/api2/admin/sync.rs | 65 ++++++++---- > src/api2/config/datastore.rs | 9 +- > src/api2/config/notifications/mod.rs | 2 +- > src/bin/proxmox_backup_manager/sync.rs | 6 +- > www/Makefile | 1 - > www/config/SyncPullPushView.js | 61 ----------- > www/config/SyncView.js | 134 ++++++++++++++++++++----- > www/datastore/DataStoreList.js | 2 +- > www/datastore/Panel.js | 2 +- > 10 files changed, 178 insertions(+), 110 deletions(-) > delete mode 100644 www/config/SyncPullPushView.js > From c.ebner at proxmox.com Mon Nov 25 13:09:39 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 13:09:39 +0100 Subject: [pbs-devel] [PATCH v6 proxmox-backup 0/4] fix #2996: client: allow optional match patterns for restore In-Reply-To: <1732534011.hf3x11ngo1.astroid@yuna.none> References: <20241112104316.206282-1-c.ebner@proxmox.com> <1732534011.hf3x11ngo1.astroid@yuna.none> Message-ID: On 11/25/24 12:29, Fabian Gr?nbichler wrote: > Consider this > > Reviewed-by: Fabian Gr?nbichler > Tested-by: Fabian Gr?nbichler > > but I'd like to add the following on-top, unless you object: No objections, looks good to me. Thx! > > Subject: [PATCH proxmox-backup] api types: replace PathPatterns with Vec > > PathPatterns is hard to distinguish from PathPattern, so would need to be > renamed anyway.. but there isn't really a reason to define a separate API type > just for this. > > Signed-off-by: Fabian Gr?nbichler > --- > pbs-api-types/src/pathpatterns.rs | 27 +-------------------------- > proxmox-backup-client/src/main.rs | 8 ++++++-- > pxar-bin/src/main.rs | 9 +++++++-- > 3 files changed, 14 insertions(+), 30 deletions(-) > > diff --git a/pbs-api-types/src/pathpatterns.rs b/pbs-api-types/src/pathpatterns.rs > index c40926a44..505ecc8aa 100644 > --- a/pbs-api-types/src/pathpatterns.rs > +++ b/pbs-api-types/src/pathpatterns.rs > @@ -1,4 +1,4 @@ > -use proxmox_schema::{const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema}; > +use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema}; > > use serde::{Deserialize, Serialize}; > > @@ -13,12 +13,6 @@ pub const PATH_PATTERN_SCHEMA: Schema = > .format(&PATH_PATTERN_FORMAT) > .schema(); > > -pub const PATH_PATTERN_LIST_SCHEMA: Schema = ArraySchema::new( > - "List of paths or match patterns for matching filenames.", > - &PATH_PATTERN_SCHEMA, > -) > -.schema(); > - > #[derive(Default, Deserialize, Serialize)] > /// Path or path pattern for filename matching > pub struct PathPattern { > @@ -34,22 +28,3 @@ impl AsRef<[u8]> for PathPattern { > self.pattern.as_bytes() > } > } > - > -#[derive(Default, Deserialize, Serialize)] > -/// Array of paths and/or path patterns for filename matching > -pub struct PathPatterns { > - patterns: Vec, > -} > - > -impl ApiType for PathPatterns { > - const API_SCHEMA: Schema = PATH_PATTERN_LIST_SCHEMA; > -} > - > -impl IntoIterator for PathPatterns { > - type Item = PathPattern; > - type IntoIter = std::vec::IntoIter; > - > - fn into_iter(self) -> Self::IntoIter { > - self.patterns.into_iter() > - } > -} > diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs > index cfeed77d7..89f91e2b5 100644 > --- a/proxmox-backup-client/src/main.rs > +++ b/proxmox-backup-client/src/main.rs > @@ -26,7 +26,7 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; > > use pbs_api_types::{ > ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart, > - BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PathPatterns, > + BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PathPattern, > PruneJobOptions, PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus, > BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, > CATALOG_NAME, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, > @@ -1407,7 +1407,11 @@ We do not extract '.pxar' archives when writing to standard output. > flatten: true, > }, > pattern: { > - type: PathPatterns, > + type: Array, > + items: { > + type: PathPattern, > + }, > + description: "Path or match pattern to limit files that get restored.", > optional: true, > }, > "allow-existing-dirs": { > diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs > index eb3580d92..7dff1e38c 100644 > --- a/pxar-bin/src/main.rs > +++ b/pxar-bin/src/main.rs > @@ -13,7 +13,8 @@ use serde_json::Value; > use tokio::signal::unix::{signal, SignalKind}; > > use pathpatterns::{MatchEntry, MatchType, PatternFlag}; > -use pbs_api_types::PathPatterns; > + > +use pbs_api_types::PathPattern; > use pbs_client::pxar::tools::format_single_line_entry; > use pbs_client::pxar::{ > Flags, OverwriteFlags, PxarExtractOptions, PxarWriters, ENCODER_MAX_ENTRIES, > @@ -55,8 +56,12 @@ fn extract_archive_from_reader( > description: "Archive name.", > }, > pattern: { > - type: PathPatterns, > + type: Array, > + items: { > + type: PathPattern, > + }, > optional: true, > + description: "Path or match pattern to limit files that get restored.", > }, > target: { > description: "Target directory", From d.csapak at proxmox.com Mon Nov 25 13:44:58 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 13:44:58 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 14/26] ui: add removable datastore creation support In-Reply-To: <20241122144713.299130-15-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-15-h.laimer@proxmox.com> Message-ID: <75ba25f9-c671-4899-9b9a-6b3fb08fecaa@proxmox.com> comments inline: On 11/22/24 15:47, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > www/window/DataStoreEdit.js | 37 +++++++++++++++++++++++++++++++++++++ > 1 file changed, 37 insertions(+) > > diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js > index b8e866df2..7b6aff1e7 100644 > --- a/www/window/DataStoreEdit.js > +++ b/www/window/DataStoreEdit.js > @@ -63,6 +63,20 @@ Ext.define('PBS.DataStoreEdit', { > emptyText: gettext('An absolute path'), > validator: val => val?.trim() !== '/', > }, > + { > + xtype: 'pmxDisplayEditField', > + fieldLabel: gettext('Device'), > + name: 'backing-device', > + disabled: true, > + cbind: { > + editable: '{isCreate}', > + }, > + editConfig: { > + xtype: 'pbsPartitionSelector', > + allowBlank: true, > + }, > + emptyText: gettext('Device path'), > + }, it's a bit tricky to see from the code, but this editwindow is never actually called in an editable context, so the displayedit field here would actually not be necessary, because if you omit this > ], > column2: [ > { > @@ -88,6 +102,29 @@ Ext.define('PBS.DataStoreEdit', { > }, > ], > columnB: [ > + { > + xtype: 'checkbox', > + boxLabel: gettext('Removable datastore'), > + submitValue: false, > + listeners: { > + change: function(checkbox, isRemovable) { > + let inputPanel = checkbox.up('inputpanel'); > + let pathField = inputPanel.down('[name=path]'); > + let uuidField = inputPanel.down('pbsPartitionSelector[name=backing-device]'); > + let uuidEditField = inputPanel.down('[name=backing-device]'); this 'double' field modifying would also not be necessary. even if we leave the displayedit field, this code here does not make much sense, since the checkbox is always visible/editable, but the device itself is not? either this is editable on an existing datastore (then the device should also be editable afterwards), or this is not changeable, then the checkbox must also vanish on edit in any case, the checkbox change can/should only happen when the device is editable so we can omit trying to modifying the display field here? > + > + uuidField.allowBlank = !isRemovable; > + uuidEditField.setDisabled(!isRemovable); > + uuidField.setDisabled(!isRemovable); > + uuidField.setValue(''); > + if (isRemovable) { > + pathField.setFieldLabel(gettext('On device path')); > + } else { > + pathField.setFieldLabel(gettext('Backing Path')); > + } > + }, > + }, > + }, > { > xtype: 'textfield', > name: 'comment', From d.csapak at proxmox.com Mon Nov 25 14:00:04 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 14:00:04 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 15/26] ui: add (un)mount button to summary In-Reply-To: <20241122144713.299130-16-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-16-h.laimer@proxmox.com> Message-ID: <417cbdb8-90c0-4004-ab9e-e3e610485242@proxmox.com> this patch removes the 'connection summary' from the overview, because... On 11/22/24 15:47, Hannes Laimer wrote: > And only try to load datastore information if the datastore is > available. > > Signed-off-by: Hannes Laimer > --- > changes since v13: > * stop statusStore update on first failed request, start again on mount > > www/datastore/Summary.js | 94 +++++++++++++++++++++++++++++++++++++++- > 1 file changed, 92 insertions(+), 2 deletions(-) > > diff --git a/www/datastore/Summary.js b/www/datastore/Summary.js > index a932b4e01..2d79a7951 100644 > --- a/www/datastore/Summary.js > +++ b/www/datastore/Summary.js > @@ -309,7 +309,84 @@ Ext.define('PBS.DataStoreSummary', { > model: 'pve-rrd-datastore', > }); > > - me.callParent(); > + me.statusStore = Ext.create('Proxmox.data.ObjectStore', { > + url: `/api2/json/admin/datastore/${me.datastore}/status`, > + interval: 1000, > + }); > + > + let unmountBtn = Ext.create('Ext.Button', { > + text: gettext('Unmount'), > + hidden: true, > + handler: () => { > + Proxmox.Utils.API2Request({ > + url: `/admin/datastore/${me.datastore}/unmount`, > + method: 'POST', > + failure: function(response) { > + Ext.Msg.alert(gettext('Error'), response.htmlStatus); > + }, > + success: function(response, options) { > + Ext.create('Proxmox.window.TaskViewer', { > + upid: response.result.data, > + }).show(); > + }, > + }); > + }, > + }); > + > + let mountBtn = Ext.create('Ext.Button', { > + text: gettext('Mount'), > + hidden: true, > + handler: () => { > + Proxmox.Utils.API2Request({ > + url: `/admin/datastore/${me.datastore}/mount`, > + method: 'POST', > + failure: function(response) { > + Ext.Msg.alert(gettext('Error'), response.htmlStatus); > + }, > + success: function(response, options) { > + me.statusStore.startUpdate(); > + Ext.create('Proxmox.window.TaskViewer', { > + upid: response.result.data, > + }).show(); > + }, > + }); > + }, > + }); > + > + Ext.apply(me, { > + tbar: [unmountBtn, mountBtn, '->', { xtype: 'proxmoxRRDTypeSelector' }], > + }); you define a few buttons and overwrite the 'tbar' config, here, but forgot to add the 'connection summary' again. please don't do it this way, but rather put the buttons to the remaining tbar config above. since you don't actually need any info from the initComponent (AFAICS) this should not be a problem you should be able to add a 'referenceHolder' property on the view to be able to use 'reference' and 'lookup' to get to the components in the listener below If it's really not possible to get to the tbar, please remove the original 'tbar' definition at least, since it'll not be used anyway. > + > + me.mon(me.statusStore, 'load', (s, records, success) => { > + if (!success) { > + me.statusStore.stopUpdate(); > + me.down('pbsDataStoreInfo').fireEvent('deactivate'); > + Proxmox.Utils.API2Request({ > + url: `/config/datastore/${me.datastore}`, > + success: response => { > + let mode = response.result.data['maintenance-mode']; > + let [type, _message] = PBS.Utils.parseMaintenanceMode(mode); > + if (!response.result.data['backing-device']) { > + return; > + } > + if (!type || type === 'read-only') { > + unmountBtn.setDisabled(true); > + mountBtn.setDisabled(false); > + } else if (type === 'unmount') { > + unmountBtn.setDisabled(true); > + mountBtn.setDisabled(true); > + } else { > + unmountBtn.setDisabled(false); > + mountBtn.setDisabled(false); > + } > + }, > + }); > + } else { > + me.down('pbsDataStoreInfo').fireEvent('activate'); > + unmountBtn.setDisabled(false); > + mountBtn.setDisabled(true); > + } > + }); i'm not completely sure about that, but i think one should call 'me.mon' only after the callParents call. I may be mistaken though. (you should be able to move the code below that, since it'll trigger only after the initcomponent anyway) > > let sp = Ext.state.Manager.getProvider(); > me.mon(sp, 'statechange', function(provider, key, value) { > @@ -322,11 +399,17 @@ Ext.define('PBS.DataStoreSummary', { > Proxmox.Utils.updateColumns(me); > }); > > + me.callParent(); > + > Proxmox.Utils.API2Request({ > url: `/config/datastore/${me.datastore}`, > waitMsgTarget: me.down('pbsDataStoreInfo'), > success: function(response) { > - let path = Ext.htmlEncode(response.result.data.path); > + let data = response.result.data; > + let path = Ext.htmlEncode(data.path); > + const removable = Object.prototype.hasOwnProperty.call(data, "backing-device"); i mean it works, but our usual way to code that would be something like: let removable = !!data['backing-device']; is there a special reason for calling the hasOwnProperty method? > + unmountBtn.setHidden(!removable); > + mountBtn.setHidden(!removable); > me.down('pbsDataStoreInfo').setTitle(`${me.datastore} (${path})`); > me.down('pbsDataStoreNotes').setNotes(response.result.data.comment); > }, > @@ -344,6 +427,13 @@ Ext.define('PBS.DataStoreSummary', { > let hasIoTicks = records?.some((rec) => rec?.data?.io_ticks !== undefined); > me.down('#ioDelayChart').setVisible(!success || hasIoTicks); > }, undefined, { single: true }); > + me.on('afterrender', () => { > + me.statusStore.startUpdate(); > + }); > + > + me.on('destroy', () => { > + me.statusStore.stopUpdate(); > + }); any special reason to put this here instead of the activate/deactivate/destroy handlers above, were we also handle the rrdstore? > > me.query('proxmoxRRDChart').forEach((chart) => { > chart.setStore(me.rrdstore); From d.csapak at proxmox.com Mon Nov 25 14:06:14 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 14:06:14 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14] fixup! ui: tree: render unmounted datastores correctly In-Reply-To: <20241125084752.28727-1-h.laimer@proxmox.com> References: <20241122144713.299130-17-h.laimer@proxmox.com> <20241125084752.28727-1-h.laimer@proxmox.com> Message-ID: one comment inline On 11/25/24 09:47, Hannes Laimer wrote: > --- > fixes small problem with the original patch where all datastores were rendered with the plug > icon in the UI > > sent in reply to original patch since it is a really minor or change and > can just be squashed in > www/NavigationTree.js | 3 ++- > 1 file changed, 2 insertions(+), 1 deletion(-) > > diff --git a/www/NavigationTree.js b/www/NavigationTree.js > index dd03fbd62..29ecae7fe 100644 > --- a/www/NavigationTree.js > +++ b/www/NavigationTree.js > @@ -266,7 +266,8 @@ Ext.define('PBS.view.main.NavigationTree', { > while (name.localeCompare(getChildTextAt(j)) > 0 && (j+1) < list.childNodes.length) { > j++; > } > - let mainIcon = `fa fa-${records[i].data.mount-status !== 'nonremovable' ? 'plug' : 'database'}`; > + const isRemovable = records[i].data['mount-status'] !== 'nonremovable'; > + let mainIcon = `fa fa-${isRemovable ? 'plug' : 'database'}`; i think it'll not happen, but if an 'old' api returns no 'mount-status' at all, i think we should default to the 'normal' symbol, not the plug one so e.g. let mountStatus = records[i].data['mount-status'] ?? 'nonremovable'; let isRemoveable = mountStatus !== 'nonremovable'; or something like this > let [qtip, iconCls] = ['', mainIcon]; > const maintenance = records[i].data.maintenance; > From f.gruenbichler at proxmox.com Mon Nov 25 14:10:28 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 14:10:28 +0100 Subject: [pbs-devel] applied-series: [PATCH v6 proxmox-backup 0/4] fix #2996: client: allow optional match patterns for restore In-Reply-To: References: <20241112104316.206282-1-c.ebner@proxmox.com> <1732534011.hf3x11ngo1.astroid@yuna.none> Message-ID: <1732540219.vzaa0abqfo.astroid@yuna.none> On November 25, 2024 1:09 pm, Christian Ebner wrote: > On 11/25/24 12:29, Fabian Gr?nbichler wrote: >> Consider this >> >> Reviewed-by: Fabian Gr?nbichler >> Tested-by: Fabian Gr?nbichler >> >> but I'd like to add the following on-top, unless you object: > > No objections, looks good to me. Thx! > >> >> Subject: [PATCH proxmox-backup] api types: replace PathPatterns with Vec >> >> PathPatterns is hard to distinguish from PathPattern, so would need to be >> renamed anyway.. but there isn't really a reason to define a separate API type >> just for this. >> >> Signed-off-by: Fabian Gr?nbichler >> --- >> pbs-api-types/src/pathpatterns.rs | 27 +-------------------------- >> proxmox-backup-client/src/main.rs | 8 ++++++-- >> pxar-bin/src/main.rs | 9 +++++++-- >> 3 files changed, 14 insertions(+), 30 deletions(-) >> >> diff --git a/pbs-api-types/src/pathpatterns.rs b/pbs-api-types/src/pathpatterns.rs >> index c40926a44..505ecc8aa 100644 >> --- a/pbs-api-types/src/pathpatterns.rs >> +++ b/pbs-api-types/src/pathpatterns.rs >> @@ -1,4 +1,4 @@ >> -use proxmox_schema::{const_regex, ApiStringFormat, ApiType, ArraySchema, Schema, StringSchema}; >> +use proxmox_schema::{const_regex, ApiStringFormat, ApiType, Schema, StringSchema}; >> >> use serde::{Deserialize, Serialize}; >> >> @@ -13,12 +13,6 @@ pub const PATH_PATTERN_SCHEMA: Schema = >> .format(&PATH_PATTERN_FORMAT) >> .schema(); >> >> -pub const PATH_PATTERN_LIST_SCHEMA: Schema = ArraySchema::new( >> - "List of paths or match patterns for matching filenames.", >> - &PATH_PATTERN_SCHEMA, >> -) >> -.schema(); >> - >> #[derive(Default, Deserialize, Serialize)] >> /// Path or path pattern for filename matching >> pub struct PathPattern { >> @@ -34,22 +28,3 @@ impl AsRef<[u8]> for PathPattern { >> self.pattern.as_bytes() >> } >> } >> - >> -#[derive(Default, Deserialize, Serialize)] >> -/// Array of paths and/or path patterns for filename matching >> -pub struct PathPatterns { >> - patterns: Vec, >> -} >> - >> -impl ApiType for PathPatterns { >> - const API_SCHEMA: Schema = PATH_PATTERN_LIST_SCHEMA; >> -} >> - >> -impl IntoIterator for PathPatterns { >> - type Item = PathPattern; >> - type IntoIter = std::vec::IntoIter; >> - >> - fn into_iter(self) -> Self::IntoIter { >> - self.patterns.into_iter() >> - } >> -} >> diff --git a/proxmox-backup-client/src/main.rs b/proxmox-backup-client/src/main.rs >> index cfeed77d7..89f91e2b5 100644 >> --- a/proxmox-backup-client/src/main.rs >> +++ b/proxmox-backup-client/src/main.rs >> @@ -26,7 +26,7 @@ use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; >> >> use pbs_api_types::{ >> ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup, BackupNamespace, BackupPart, >> - BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PathPatterns, >> + BackupType, ClientRateLimitConfig, CryptMode, Fingerprint, GroupListItem, PathPattern, >> PruneJobOptions, PruneListItem, RateLimitConfig, SnapshotListItem, StorageStatus, >> BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, >> CATALOG_NAME, ENCRYPTED_KEY_BLOB_NAME, MANIFEST_BLOB_NAME, >> @@ -1407,7 +1407,11 @@ We do not extract '.pxar' archives when writing to standard output. >> flatten: true, >> }, >> pattern: { >> - type: PathPatterns, >> + type: Array, >> + items: { >> + type: PathPattern, >> + }, >> + description: "Path or match pattern to limit files that get restored.", >> optional: true, >> }, >> "allow-existing-dirs": { >> diff --git a/pxar-bin/src/main.rs b/pxar-bin/src/main.rs >> index eb3580d92..7dff1e38c 100644 >> --- a/pxar-bin/src/main.rs >> +++ b/pxar-bin/src/main.rs >> @@ -13,7 +13,8 @@ use serde_json::Value; >> use tokio::signal::unix::{signal, SignalKind}; >> >> use pathpatterns::{MatchEntry, MatchType, PatternFlag}; >> -use pbs_api_types::PathPatterns; >> + >> +use pbs_api_types::PathPattern; >> use pbs_client::pxar::tools::format_single_line_entry; >> use pbs_client::pxar::{ >> Flags, OverwriteFlags, PxarExtractOptions, PxarWriters, ENCODER_MAX_ENTRIES, >> @@ -55,8 +56,12 @@ fn extract_archive_from_reader( >> description: "Archive name.", >> }, >> pattern: { >> - type: PathPatterns, >> + type: Array, >> + items: { >> + type: PathPattern, >> + }, >> optional: true, >> + description: "Path or match pattern to limit files that get restored.", >> }, >> target: { >> description: "Target directory", > > From l.wagner at proxmox.com Mon Nov 25 14:15:42 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Mon, 25 Nov 2024 14:15:42 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 00/26] add removable datastores In-Reply-To: <20241122144713.299130-1-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> Message-ID: <064ff027-e4c2-4909-9365-659da6dd1fd3@proxmox.com> On 2024-11-22 15:46, Hannes Laimer wrote: > These patches add support for removable datastores. All removable > datastores have a backing-device(a UUID) associated with them. Removable > datastores work like normal ones, just that they can be unplugged. It is > possible to create a removable datastore, sync backups onto it, unplug > it and use it on a different PBS. > > The datastore path specified is relative to the root of the used device. > Removable datastores are bind mounted to /mnt/datastore/. > Multiple datastores can be created on a single device, but only device with > a single datastore on them will be auto-mounted. > > When a removable datastore is deleted and 'destroy-data' is set, the > device has to be mounted. If 'destroy-data' is not set the datastore > can be deleted even if the device is not present. Removable datastores > are automatically mounted when plugged in. > Tested these patches against the latest state on master. Works fine generally, but there were a couple of rough edges that were at least unexpected (at least to me, not being very familiar with this new feature) In no particular order, they were: - When creating a removable datastore, it struck me as odd that the "On device path" has to be absolute (start with a /), considering that the path you enter is relative to the root directory of the partition. - a removable datastore cannot be ZFS. I guess there are technical reasons for this, but it was a bit unexpected, especially considering that you can create a non-removable datastore on ZFS without any issues. Maybe the docs could give some background on this, or at least mention that removable datastores cannot be created on ZFS formatted disks - ran into an issue with the 'unmounting' maintenance mode. I pressed 'unmount' in the UI. One of my bash session still had its working directory in the mounted datastore, leading to umount failing with a 'device busy' message. After this, the datastore was in the 'unmounting' maintenance mode which I could not clear anymore, neither through the UI, nor through the proxmox-backup-manager CLI tool. Eventually I had to remove the `maintenance-mode` line from `datastores.cfg` by hand. - When a datastore is unmounted, accessing the GC job view or content view, the system logs are spammed by HTTP 400 error messages, e.g. GET /api2/json/admin/datastore/disk2/status: 400 Bad Request: [client [::xxxxxxxxx]:34382] datastore 'disk2' is not mounted Might be annoying and/or confusing for system admins looking through the logs when they debug some other issue. - Similarly, if a removeable-datastore is added as a storage in PVE, pvestatd logs errors every 10 seconds if the datastore is not mounted. Not sure if this is possible, but maybe we could handle this more graceful to not spam the logs? - Something that also was a bit confusing was the following: - Attached new disk to my PBS test VM - Formatted the disk as ext4 using the webui (name: disk1, device: sdb1), opting to NOT create a datastore as part of the formatting process - Created a removable datastore on the new partition (name: disk1-store, device: sdb1) -> This led to the partition being mounted twice: /dev/sdb1 on /mnt/datastore/disk1 type ext4 (rw,relatime) /dev/sdb1 on /mnt/datastore/disk1-store type ext4 (rw,relatime) -> if 'unmount' is pressed for the datastore, only the second mount is unmounted. This could be confusing to users who expect to be able to safely remove the disk after unmounting the datastore. - For contrast, if while creating the partition one opts to create the removable datastore immediately, the partition is only mounted once. - Also, when trying to mount a datastore without the disk being available, the error message in the task log might not be super clear to some users: TASK ERROR: mounting to tmp path failed: command "mount" "UUID=a264c664-9e0e-47ad-abf5-960e0aabfe0b" "/run/proxmox-backup/mount/5292c7d2-a54b-42f1-be87-88810b2e90cd" failed - status code: 1 - mount: /run/proxmox-backup/mount/5292c7d2-a54b-42f1-be87-88810b2e90cd: can't find UUID=a264c664-9e0e-47ad-abf5-960e0aabfe0b.) Maybe we could add clearer error messages for the more common error cases, like disks being not available? I don't think any of these are blockers, but nevertheless I thought I'd bring these up. -- - Lukas From d.csapak at proxmox.com Mon Nov 25 14:23:25 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 14:23:25 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 19/26] ui: maintenance: fix disable msg field if no type is selected In-Reply-To: <20241122144713.299130-20-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-20-h.laimer@proxmox.com> Message-ID: <8a7a5150-9449-4d3b-b3a0-078aaa8fdcd6@proxmox.com> one comment inline On 11/22/24 15:47, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > www/window/MaintenanceOptions.js | 7 ++++++- > 1 file changed, 6 insertions(+), 1 deletion(-) > > diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js > index 1ee92542e..527c36987 100644 > --- a/www/window/MaintenanceOptions.js > +++ b/www/window/MaintenanceOptions.js > @@ -56,12 +56,17 @@ Ext.define('PBS.window.MaintenanceOptions', { > fieldLabel: gettext('Maintenance Type'), > value: '__default__', > deleteEmpty: true, > + listeners: { > + change: (field, newValue) => { > + Ext.getCmp('message-field').setDisabled(newValue === '__default__'); i'd rather you use 'itemId' instead and use our 'up().down()' mechanism like we do everywhere else, or use a 'referenceHolder' and 'reference'+'lookup' instead the reason is that (html) id's have to be globally unique, and having one that is generically named 'message-field' might clash with something else at one point... > + }, > + }, > }, > { > xtype: 'proxmoxtextfield', > + id: 'message-field', > name: 'maintenance-msg', > fieldLabel: gettext('Description'), > - // FIXME: disable if maintenance type is none > }, > ], > }, From d.csapak at proxmox.com Mon Nov 25 14:24:43 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 14:24:43 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 20/26] ui: render 'unmount' maintenance mode correctly In-Reply-To: <20241122144713.299130-21-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-21-h.laimer@proxmox.com> Message-ID: <777e4fa5-73d0-42bc-9940-37e231ef7b81@proxmox.com> comments inline On 11/22/24 15:47, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > www/Utils.js | 4 +++- > www/window/MaintenanceOptions.js | 10 ++++++++++ > 2 files changed, 13 insertions(+), 1 deletion(-) > > diff --git a/www/Utils.js b/www/Utils.js > index 7756e9b5d..6bae9b709 100644 > --- a/www/Utils.js > +++ b/www/Utils.js > @@ -775,7 +775,7 @@ Ext.define('PBS.Utils', { > let extra = ''; > > if (activeTasks !== undefined) { > - const conflictingTasks = activeTasks.write + (type === 'offline' ? activeTasks.read : 0); > + const conflictingTasks = activeTasks.write + (type === 'offline' || type === 'unmount' ? activeTasks.read : 0); > > if (conflictingTasks > 0) { > extra += '| '; > @@ -795,6 +795,8 @@ Ext.define('PBS.Utils', { > break; > case 'offline': modeText = gettext("Offline"); > break; > + case 'unmount': modeText = gettext("Unmounting"); > + break; > } > return `${modeText} ${extra}`; > }, > diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js > index 527c36987..d7348cb4f 100644 > --- a/www/window/MaintenanceOptions.js > +++ b/www/window/MaintenanceOptions.js > @@ -52,6 +52,7 @@ Ext.define('PBS.window.MaintenanceOptions', { > items: [ > { > xtype: 'pbsMaintenanceType', > + id: 'type-field', same as previous patch: please don't use 'id' for this (especially using such generic names) > name: 'maintenance-type', > fieldLabel: gettext('Maintenance Type'), > value: '__default__', > @@ -85,6 +86,15 @@ Ext.define('PBS.window.MaintenanceOptions', { > }; > } > > + let unmounting = options['maintenance-type'] === 'unmount'; > + let defaultType = options['maintenance-type'] === '__default__'; > + if (unmounting) { > + options['maintenance-type'] = ''; > + } > + > me.callParent([options]); > + > + Ext.ComponentManager.get('type-field').setDisabled(unmounting); > + Ext.ComponentManager.get('message-field').setDisabled(unmounting || defaultType); > }, > }); From f.gruenbichler at proxmox.com Mon Nov 25 14:24:13 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 14:24:13 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 05/25] api: admin: add (un)mount endpoint for removable datastores In-Reply-To: <20241122144713.299130-6-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-6-h.laimer@proxmox.com> Message-ID: <1732540770.nh1cgxu1lj.astroid@yuna.none> On November 22, 2024 3:46 pm, Hannes Laimer wrote: > Removable datastores can be mounted unless > - they are already > - their device is not present > For unmounting the maintenance mode is set to `unmount`, > which prohibits the starting of any new tasks envolving any > IO, this mode is unset either > - on completion of the unmount > - on abort of the unmount tasks > If the unmounting itself should fail, the maintenance mode stays in > place and requires manual intervention by unsetting it in the config > file directly. This is intentional, as unmounting should not fail, > and if it should the situation should be looked at. > > Signed-off-by: Hannes Laimer > --- > changes since v13: > * improve logging > * fix racy unmount > * (manually) changing maintenance during unmount will prevent unmounting and > result in failed unmount task > > src/api2/admin/datastore.rs | 294 ++++++++++++++++++++++++++++++++++-- > 1 file changed, 283 insertions(+), 11 deletions(-) > > diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs > index 3b863c06b..85522345e 100644 > --- a/src/api2/admin/datastore.rs > +++ b/src/api2/admin/datastore.rs > @@ -4,7 +4,7 @@ use std::collections::HashSet; > use std::ffi::OsStr; > use std::ops::Deref; > use std::os::unix::ffi::OsStrExt; > -use std::path::PathBuf; > +use std::path::{Path, PathBuf}; > use std::sync::Arc; > > use anyhow::{bail, format_err, Error}; > @@ -14,7 +14,7 @@ use hyper::{header, Body, Response, StatusCode}; > use serde::Deserialize; > use serde_json::{json, Value}; > use tokio_stream::wrappers::ReceiverStream; > -use tracing::{info, warn}; > +use tracing::{debug, info, warn}; > > use proxmox_async::blocking::WrappedReaderStream; > use proxmox_async::{io::AsyncChannelWriter, stream::AsyncReaderStream}; > @@ -30,6 +30,7 @@ use proxmox_sys::fs::{ > file_read_firstline, file_read_optional_string, replace_file, CreateOptions, > }; > use proxmox_time::CalendarEvent; > +use proxmox_worker_task::WorkerTaskContext; > > use pxar::accessor::aio::Accessor; > use pxar::EntryKind; > @@ -38,13 +39,13 @@ use pbs_api_types::{ > print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, > BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode, > DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, > - JobScheduleStatus, KeepOptions, Operation, PruneJobOptions, SnapshotListItem, > - SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, > - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, > - IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, > - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, > - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, > - VERIFICATION_OUTDATED_AFTER_SCHEMA, > + JobScheduleStatus, KeepOptions, MaintenanceMode, MaintenanceType, Operation, PruneJobOptions, > + SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, > + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, > + CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, > + MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, > + PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, > + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, > }; > use pbs_client::pxar::{create_tar, create_zip}; > use pbs_config::CachedUserInfo; > @@ -59,8 +60,8 @@ use pbs_datastore::index::IndexFile; > use pbs_datastore::manifest::BackupManifest; > use pbs_datastore::prune::compute_prune_info; > use pbs_datastore::{ > - check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, > - StoreProgress, > + check_backup_owner, ensure_datastore_is_mounted, task_tracking, BackupDir, BackupGroup, > + DataStore, LocalChunkReader, StoreProgress, > }; > use pbs_tools::json::required_string_param; > use proxmox_rest_server::{formatter, WorkerTask}; > @@ -2394,6 +2395,275 @@ pub async fn set_backup_owner( > .await? > } > > +/// Here we > +/// > +/// 1. mount the removable device to `/mount/` > +/// 2. bind mount `/mount//` to `/mnt/datastore/` > +/// 3. unmount `/mount/` > +/// > +/// leaving us with the datastore being mounted directly with its name under /mnt/datastore/... > +/// > +/// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to > +/// the same path, this is *very* unlikely since the device is only mounted really shortly, but > +/// technically possible. > +pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> { > + if let Some(uuid) = datastore.backing_device.as_ref() { > + let mount_point = datastore.absolute_path(); > + if pbs_datastore::get_datastore_mount_status(&datastore) == Some(true) { > + bail!("device is already mounted at '{}'", mount_point); > + } > + let tmp_mount_path = format!( > + "{}/{:x}", > + pbs_buildcfg::rundir!("/mount"), > + proxmox_uuid::Uuid::generate() > + ); > + > + let default_options = proxmox_sys::fs::CreateOptions::new(); > + proxmox_sys::fs::create_path( > + &tmp_mount_path, > + Some(default_options.clone()), > + Some(default_options.clone()), > + )?; > + > + info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path); > + crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path)) > + .map_err(|e| format_err!("mounting to tmp path failed: {e}"))?; after this point, any error should trigger an unmount before being bubbled up.. > + > + let full_store_path = format!( > + "{tmp_mount_path}/{}", > + datastore.path.trim_start_matches('/') > + ); > + let backup_user = pbs_config::backup_user()?; > + let options = CreateOptions::new() > + .owner(backup_user.uid) > + .group(backup_user.gid); > + > + proxmox_sys::fs::create_path( > + &mount_point, > + Some(default_options.clone()), > + Some(options.clone()), > + ) > + .map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?; > + > + // can't be created before it is mounted, so we have to do it here > + proxmox_sys::fs::create_path( > + &full_store_path, > + Some(default_options.clone()), > + Some(options.clone()), > + ) > + .map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?; > + > + info!( > + "bind mount '{}'({}) to '{}'", > + datastore.name, datastore.path, mount_point > + ); > + if let Err(err) = > + crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point)) > + { > + debug!("unmounting '{}'", tmp_mount_path); > + let _ = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)) > + .inspect_err(|e| warn!("unmounting from tmp path '{tmp_mount_path} failed: {e}'")); > + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)) > + .inspect_err(|e| warn!("removing tmp path '{tmp_mount_path} failed: {e}'")); this doesn't log the error, so adding context doesn't help at all.. > + return Err(format_err!( > + "Datastore '{}' cound not be mounted: {}.", > + datastore.name, > + err > + )); > + } > + > + debug!("unmounting '{}'", tmp_mount_path); > + let _ = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)) > + .map_err(|e| format_err!("unmounting from tmp path '{tmp_mount_path} failed: {e}'")); > + let _ = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)) > + .map_err(|e| format_err!("removing tmp path '{tmp_mount_path} failed: {e}'")); same here > + > + Ok(()) > + } else { > + Err(format_err!( > + "Datastore '{}' cannot be mounted because it is not removable.", > + datastore.name > + )) > + } > +} > + > +#[api( > + protected: true, > + input: { > + properties: { > + store: { > + schema: DATASTORE_SCHEMA, > + }, > + } > + }, > + returns: { > + schema: UPID_SCHEMA, > + }, > + access: { > + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), > + }, > +)] > +/// Mount removable datastore. > +pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > + let (section_config, _digest) = pbs_config::datastore::config()?; > + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > + > + if datastore.backing_device.is_none() { > + bail!("datastore '{store}' is not removable"); > + } > + > + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > + > + let upid = WorkerTask::new_thread( > + "mount-device", > + Some(store), > + auth_id.to_string(), > + to_stdout, > + move |_worker| do_mount_device(datastore), > + )?; > + > + Ok(json!(upid)) > +} > + > +fn expect_maintanance_unmounting( > + store: &str, > +) -> Result<(pbs_config::BackupLockGuard, DataStoreConfig), Error> { > + let lock = pbs_config::datastore::lock_config()?; > + let (section_config, _digest) = pbs_config::datastore::config()?; > + let store_config: DataStoreConfig = section_config.lookup("datastore", store)?; > + > + if store_config > + .get_maintenance_mode() > + .map_or(true, |m| m.ty != MaintenanceType::Unmount) > + { > + bail!("maintenance mode is not 'Unmount'"); > + } > + > + Ok((lock, store_config)) > +} > + > +fn unset_maintenance( > + _lock: pbs_config::BackupLockGuard, > + mut config: DataStoreConfig, > +) -> Result<(), Error> { > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + config.maintenance_mode = None; > + section_config.set_data(&config.name, "datastore", &config)?; > + pbs_config::datastore::save_config(§ion_config)?; > + Ok(()) > +} > + > +fn do_unmount_device( > + datastore: DataStoreConfig, > + worker: Option<&dyn WorkerTaskContext>, > +) -> Result<(), Error> { > + if datastore.backing_device.is_none() { > + bail!("can't unmount non-removable datastore"); > + } > + let mount_point = datastore.absolute_path(); > + > + let mut active_operations = task_tracking::get_active_operations(&datastore.name)?; > + let mut old_status = String::new(); > + let mut aborted = false; > + while active_operations.read + active_operations.write > 0 { > + if let Some(worker) = worker { > + if worker.abort_requested() { > + aborted = true; > + break; > + } > + let status = format!( > + "cannot unmount yet, still {} read and {} write operations active", > + active_operations.read, active_operations.write > + ); > + if status != old_status { > + info!("{status}"); > + old_status = status; > + } > + } > + std::thread::sleep(std::time::Duration::from_secs(1)); > + active_operations = task_tracking::get_active_operations(&datastore.name)?; > + } > + > + if aborted { this still doesn't re-check whether the request was aborted.. the loop above sleeps for a second, it's possible the worker got aborted in that time frame.. > + let _ = expect_maintanance_unmounting(&datastore.name) > + .inspect_err(|e| warn!("maintenance mode was not as expected: {e}")) > + .and_then(|(lock, config)| { > + unset_maintenance(lock, config) > + .inspect_err(|e| warn!("could not reset maintenance mode: {e}")) > + }); > + bail!("aborted, due to user request"); > + } else { > + let (lock, config) = expect_maintanance_unmounting(&datastore.name)?; > + crate::tools::disks::unmount_by_mountpoint(Path::new(&mount_point))?; > + let _ = unset_maintenance(lock, config) > + .inspect_err(|e| warn!("could not reset maintenance mode: {e}")); this should return the error.. > + } > + Ok(()) > +} > + > +#[api( > + protected: true, > + input: { > + properties: { > + store: { schema: DATASTORE_SCHEMA }, > + }, > + }, > + returns: { > + schema: UPID_SCHEMA, > + }, > + access: { > + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), > + } > +)] > +/// Unmount a removable device that is associated with the datastore > +pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { > + let _lock = pbs_config::datastore::lock_config()?; > + let (mut section_config, _digest) = pbs_config::datastore::config()?; > + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; > + > + if datastore.backing_device.is_none() { > + bail!("datastore '{store}' is not removable"); > + } > + > + ensure_datastore_is_mounted(&datastore)?; > + > + datastore.set_maintenance_mode(Some(MaintenanceMode { > + ty: MaintenanceType::Unmount, > + message: None, > + }))?; > + section_config.set_data(&store, "datastore", &datastore)?; > + pbs_config::datastore::save_config(§ion_config)?; > + > + drop(_lock); > + > + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > + > + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) > + { > + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); > + let _ = proxmox_daemon::command_socket::send_raw( > + sock, > + &format!( > + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", > + &store > + ), > + ) > + .await; > + } > + > + let upid = WorkerTask::new_thread( > + "unmount-device", > + Some(store), > + auth_id.to_string(), > + to_stdout, > + move |worker| do_unmount_device(datastore, Some(&worker)), > + )?; > + > + Ok(json!(upid)) > +} > + > #[sortable] > const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > ( > @@ -2432,6 +2702,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > .get(&API_METHOD_LIST_GROUPS) > .delete(&API_METHOD_DELETE_GROUP), > ), > + ("mount", &Router::new().post(&API_METHOD_MOUNT)), > ( > "namespace", > // FIXME: move into datastore:: sub-module?! > @@ -2466,6 +2737,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ > .delete(&API_METHOD_DELETE_SNAPSHOT), > ), > ("status", &Router::new().get(&API_METHOD_STATUS)), > + ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)), > ( > "upload-backup-log", > &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From d.csapak at proxmox.com Mon Nov 25 14:28:07 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 14:28:07 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 24/26] ui: support create removable datastore through directory creation In-Reply-To: <20241122144713.299130-25-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-25-h.laimer@proxmox.com> Message-ID: <777a0155-3110-49bd-ac57-6b1e7b058290@proxmox.com> a bit offtopic, but why only directory storages and not also zfs based ones? e.g. i can imagine users wanting a zpool on an external disk too (for checksumming, send/receive, snapshotting, etc) On 11/22/24 15:47, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > src/api2/node/disks/directory.rs | 2 ++ > www/DirectoryList.js | 13 +++++++++++++ > www/window/CreateDirectory.js | 14 ++++++++++++++ > 3 files changed, 29 insertions(+) > > diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs > index ff817b253..2f7cc7a27 100644 > --- a/src/api2/node/disks/directory.rs > +++ b/src/api2/node/disks/directory.rs > @@ -150,6 +150,8 @@ pub fn list_datastore_mounts() -> Result, Error> { > "removable-datastore": { > description: "The added datastore is removable.", > type: bool, > + optional: true, > + default: false, > }, > filesystem: { > type: FileSystemType, > diff --git a/www/DirectoryList.js b/www/DirectoryList.js > index adefa9abf..25921a623 100644 > --- a/www/DirectoryList.js > +++ b/www/DirectoryList.js > @@ -121,6 +121,19 @@ Ext.define('PBS.admin.Directorylist', { > ], > > columns: [ > + { > + text: '', > + flex: 0, > + width: 35, > + dataIndex: 'removable', > + renderer: function(_text, _, row) { > + if (row.data.removable) { > + return ``; > + } else { > + return ''; > + } > + }, > + }, > { > text: gettext('Path'), > dataIndex: 'path', > diff --git a/www/window/CreateDirectory.js b/www/window/CreateDirectory.js > index 6aabe21ab..38d6979d9 100644 > --- a/www/window/CreateDirectory.js > +++ b/www/window/CreateDirectory.js > @@ -43,6 +43,20 @@ Ext.define('PBS.window.CreateDirectory', { > name: 'add-datastore', > fieldLabel: gettext('Add as Datastore'), > value: '1', > + listeners: { > + change(field, newValue, _oldValue) { > + let form = field.up('form'); > + let rmBox = form.down('[name=removable-datastore]'); > + > + rmBox.setDisabled(!newValue); > + rmBox.setValue(false); > + }, > + }, > + }, > + { > + xtype: 'proxmoxcheckbox', > + name: 'removable-datastore', > + fieldLabel: gettext('is removable'), > }, > ], > }); From d.csapak at proxmox.com Mon Nov 25 14:32:49 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 14:32:49 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 00/26] add removable datastores In-Reply-To: <064ff027-e4c2-4909-9365-659da6dd1fd3@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <064ff027-e4c2-4909-9365-659da6dd1fd3@proxmox.com> Message-ID: <85d1ba3d-e5f5-4872-b41a-435ba2a60959@proxmox.com> On 11/25/24 14:15, Lukas Wagner wrote: > On 2024-11-22 15:46, Hannes Laimer wrote: >> These patches add support for removable datastores. All removable >> datastores have a backing-device(a UUID) associated with them. Removable >> datastores work like normal ones, just that they can be unplugged. It is >> possible to create a removable datastore, sync backups onto it, unplug >> it and use it on a different PBS. >> >> The datastore path specified is relative to the root of the used device. >> Removable datastores are bind mounted to /mnt/datastore/. >> Multiple datastores can be created on a single device, but only device with >> a single datastore on them will be auto-mounted. >> >> When a removable datastore is deleted and 'destroy-data' is set, the >> device has to be mounted. If 'destroy-data' is not set the datastore >> can be deleted even if the device is not present. Removable datastores >> are automatically mounted when plugged in. >> > [snip] > > - Something that also was a bit confusing was the following: > - Attached new disk to my PBS test VM > - Formatted the disk as ext4 using the webui (name: disk1, device: sdb1), opting to NOT create a datastore as part of the formatting process > - Created a removable datastore on the new partition (name: disk1-store, device: sdb1) > -> This led to the partition being mounted twice: > > /dev/sdb1 on /mnt/datastore/disk1 type ext4 (rw,relatime) > /dev/sdb1 on /mnt/datastore/disk1-store type ext4 (rw,relatime) > > -> if 'unmount' is pressed for the datastore, only the second mount is unmounted. This could be confusing > to users who expect to be able to safely remove the disk after unmounting the datastore. > > - For contrast, if while creating the partition one opts to create the removable datastore immediately, > the partition is only mounted once. > > i had slightly different, but also confusing experience did basically the same as Lukas, but my target path was the exact same as the already mounted disk which lead to an 'it's already mounted' error (which yeah was expected, i just created the disk) in that case we could e.g. try to simply use the already mounted path for that? (idk how much work checking if it's the right on, etc. that would involve though...) From f.gruenbichler at proxmox.com Mon Nov 25 14:40:03 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 14:40:03 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 01/25] pbs-api-types: add backing-device to DataStoreConfig In-Reply-To: <20241122144713.299130-2-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-2-h.laimer@proxmox.com> Message-ID: <1732541889.q8m3lvhzkn.astroid@yuna.none> On November 22, 2024 3:46 pm, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > changes since v13: > * drop get_mount_point > * update DATASTORE_DIR_NAME_SCHAME description but you didn't just change the description, this also changed semantics quite a bit? in v13, it was a relative path for removable datastores (which makes a lot of sense - the "path" is the subdir on the mounted device). could we go back to that please? > > pbs-api-types/src/datastore.rs | 25 ++++++++++++++++++++++--- > 1 file changed, 22 insertions(+), 3 deletions(-) > > diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs > index 711051d05..b722c9ab7 100644 > --- a/pbs-api-types/src/datastore.rs > +++ b/pbs-api-types/src/datastore.rs > @@ -45,7 +45,7 @@ const_regex! { > > pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); > > -pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") > +pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or an absolute on-device path for removable datastores.") because an "absolute on-device path" is an oxymoron and confusing.. > .min_length(1) > .max_length(4096) > .schema(); > @@ -163,6 +163,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = > .minimum(1) > .schema(); > > +/// Base directory where datastores are mounted > +pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; > + > #[api] > #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] > #[serde(rename_all = "lowercase")] > @@ -237,7 +240,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore > schema: DATASTORE_SCHEMA, > }, > path: { > - schema: DIR_NAME_SCHEMA, > + schema: DATASTORE_DIR_NAME_SCHEMA, > }, > "notify-user": { > optional: true, > @@ -276,6 +279,12 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore > format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), > type: String, > }, > + "backing-device": { > + description: "The UUID of the filesystem partition for removable datastores.", > + optional: true, > + format: &proxmox_schema::api_types::UUID_FORMAT, > + type: String, > + } > } > )] > #[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] > @@ -323,6 +332,11 @@ pub struct DataStoreConfig { > /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " > #[serde(skip_serializing_if = "Option::is_none")] > pub maintenance_mode: Option, > + > + /// The UUID of the device(for removable datastores) > + #[updater(skip)] > + #[serde(skip_serializing_if = "Option::is_none")] > + pub backing_device: Option, > } > > #[api] > @@ -357,12 +371,17 @@ impl DataStoreConfig { > notification_mode: None, > tuning: None, > maintenance_mode: None, > + backing_device: None, > } > } > > /// Returns the absolute path to the datastore content. > pub fn absolute_path(&self) -> String { > - self.path.clone() > + if self.backing_device.is_some() { > + format!("{DATASTORE_MOUNT_DIR}/{}", self.name) > + } else { > + self.path.clone() > + } > } > > pub fn get_maintenance_mode(&self) -> Option { > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.heiss at proxmox.com Mon Nov 25 14:40:48 2024 From: c.heiss at proxmox.com (Christoph Heiss) Date: Mon, 25 Nov 2024 14:40:48 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/3] docs: add installation media & wizard documentation In-Reply-To: <20241001142353.863405-1-c.heiss@proxmox.com> References: <20241001142353.863405-1-c.heiss@proxmox.com> Message-ID: Ping, still applies. On Tue, Oct 01, 2024 at 04:23:29PM +0200, Christoph Heiss wrote: > This series adds documentation on how to create a proper installation > media, as well as how to use our installation wizard. Mostly taken from > the existing PVE/PMG equivalent, adapted as needed of course. > > Patch #3 contains all the new images and can be found in full on my > corresponding staff branch - see also the note attached to that patch. > > Christoph Heiss (3): > docs: add installation media preparation guide > docs: add installation wizard guide > docs: images: add installer guide screenshots > > .../screenshots/pbs-installer-grub-menu.png | Bin 0 -> 66840 bytes > .../screenshots/pbs-installer-location.png | Bin 0 -> 146079 bytes > .../screenshots/pbs-installer-network.png | Bin 0 -> 156355 bytes > .../screenshots/pbs-installer-password.png | Bin 0 -> 144742 bytes > .../screenshots/pbs-installer-progress.png | Bin 0 -> 165959 bytes > .../screenshots/pbs-installer-select-disk.png | Bin 0 -> 168424 bytes > .../screenshots/pbs-installer-summary.png | Bin 0 -> 141937 bytes > docs/images/screenshots/pbs-tui-installer.png | Bin 0 -> 4729 bytes > docs/installation-media.rst | 147 ++++++++ > docs/installation.rst | 25 +- > docs/local-zfs.rst | 1 + > docs/system-requirements.rst | 2 + > docs/using-the-installer.rst | 345 ++++++++++++++++++ > 13 files changed, 500 insertions(+), 20 deletions(-) > create mode 100644 docs/images/screenshots/pbs-installer-grub-menu.png > create mode 100644 docs/images/screenshots/pbs-installer-location.png > create mode 100644 docs/images/screenshots/pbs-installer-network.png > create mode 100644 docs/images/screenshots/pbs-installer-password.png > create mode 100644 docs/images/screenshots/pbs-installer-progress.png > create mode 100644 docs/images/screenshots/pbs-installer-select-disk.png > create mode 100644 docs/images/screenshots/pbs-installer-summary.png > create mode 100644 docs/images/screenshots/pbs-tui-installer.png > create mode 100644 docs/installation-media.rst > create mode 100644 docs/using-the-installer.rst > > -- > 2.46.0 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From f.gruenbichler at proxmox.com Mon Nov 25 14:40:51 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 14:40:51 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 06/25] api: removable datastore creation In-Reply-To: <20241122144713.299130-7-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-7-h.laimer@proxmox.com> Message-ID: <1732541569.2njxo69lcr.astroid@yuna.none> On November 22, 2024 3:46 pm, Hannes Laimer wrote: > Devices can contains multiple datastores. > If the specified path already contains a datastore, `reuse datastore` has > to be set so it'll be added without creating a chunckstore. > > Signed-off-by: Hannes Laimer > --- > change since v13: > * cleanup > > src/api2/config/datastore.rs | 54 ++++++++++++++++++++++++++---------- > 1 file changed, 40 insertions(+), 14 deletions(-) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 37d1528c7..420f8ddd0 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -1,7 +1,7 @@ > use std::path::PathBuf; > > use ::serde::{Deserialize, Serialize}; > -use anyhow::{bail, Error}; > +use anyhow::{bail, format_err, Error}; > use hex::FromHex; > use serde_json::Value; > use tracing::warn; > @@ -21,7 +21,8 @@ use pbs_config::BackupLockGuard; > use pbs_datastore::chunk_store::ChunkStore; > > use crate::api2::admin::{ > - prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, > + datastore::do_mount_device, prune::list_prune_jobs, sync::list_config_sync_jobs, > + verify::list_verification_jobs, > }; > use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; > use crate::api2::config::sync::delete_sync_job; > @@ -32,6 +33,7 @@ use pbs_config::CachedUserInfo; > use proxmox_rest_server::WorkerTask; > > use crate::server::jobstate; > +use crate::tools::disks::unmount_by_mountpoint; > > #[api( > input: { > @@ -73,37 +75,57 @@ pub(crate) fn do_create_datastore( > datastore: DataStoreConfig, > reuse_datastore: bool, > ) -> Result<(), Error> { > - let path: PathBuf = datastore.path.clone().into(); > + let path: PathBuf = datastore.absolute_path().into(); > > if path.parent().is_none() { > bail!("cannot create datastore in root path"); > } > > + let need_unmount = datastore.backing_device.is_some(); > + if need_unmount { > + do_mount_device(datastore.clone())?; > + }; > + > let tuning: DatastoreTuning = serde_json::from_value( > DatastoreTuning::API_SCHEMA > .parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?, > )?; > > - if reuse_datastore { > - ChunkStore::verify_chunkstore(&path)?; > + let res = if reuse_datastore { > + ChunkStore::verify_chunkstore(&path) > } else { > + let mut is_empty = true; > if let Ok(dir) = std::fs::read_dir(&path) { > for file in dir { > let name = file?.file_name(); > let name = name.to_str(); > if !name.map_or(false, |name| name.starts_with('.') || name == "lost+found") { > - bail!("datastore path is not empty"); > + is_empty = false; > + break; > } > } > } > - let backup_user = pbs_config::backup_user()?; > - let _store = ChunkStore::create( > - &datastore.name, > - path, > - backup_user.uid, > - backup_user.gid, > - tuning.sync_level.unwrap_or_default(), > - )?; > + if is_empty { > + let backup_user = pbs_config::backup_user()?; > + ChunkStore::create( > + &datastore.name, > + path.clone(), > + backup_user.uid, > + backup_user.gid, > + tuning.sync_level.unwrap_or_default(), > + ) > + .map(|_| ()) > + } else { > + Err(format_err!("datastore path not empty")) > + } > + }; > + > + if res.is_err() { > + if need_unmount { > + let _ = unmount_by_mountpoint(&path) > + .inspect_err(|e| warn!("could not unmount device: {e}")); I think I prefer if let Err(err) = .. { warn!(..); } it's more obvious that an error is "caught" and logged when I read that (inspect_err and no bubbling up vs. map_err and bubbling up look very similar when quickly browsing code) } > + } > + return res; > } > > config.set_data(&datastore.name, "datastore", &datastore)?; > @@ -147,6 +169,10 @@ pub fn create_datastore( > param_bail!("name", "datastore '{}' already exists.", config.name); > } > > + if !config.path.starts_with("/") { > + param_bail!("path", "expected an abolute path, '{}' is not", config.path); > + } see comment on the first patch, I think you misunderstood my feedback there ;) this check here should only be done for non-removable datastores. removable datastores should continue to have their relative path in the config.. > + > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Mon Nov 25 14:44:40 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 14:44:40 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 07/25] api: add check for nested datastores on creation In-Reply-To: <20241122144713.299130-8-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-8-h.laimer@proxmox.com> Message-ID: <1732542160.of8bqk38he.astroid@yuna.none> On November 22, 2024 3:46 pm, Hannes Laimer wrote: > Signed-off-by: Hannes Laimer > --- > * new in v14, and not removable datastore specific > > src/api2/config/datastore.rs | 14 ++++++++++++++ > 1 file changed, 14 insertions(+) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 420f8ddd0..75e1a1a56 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -81,6 +81,20 @@ pub(crate) fn do_create_datastore( > bail!("cannot create datastore in root path"); > } > > + for store in config.convert_to_typed_array::("datastore")? { > + if store.backing_device != datastore.backing_device { > + continue; > + } this is not needed, if you compare absolute_paths below? > + if store.path.starts_with(&datastore.path) || datastore.path.starts_with(&store.path) { this is broken, as `path` is a String here, and not a Path, so `starts_with` doesn't properly match on path components, but on arbitrary substrings.. i.e., I can't configure two removable datastores, one using "removable" and one using "removable2" as path atm.. > + param_bail!( > + "path", > + "nested datastores not allowed: '{}' already in '{}'", > + store.name, > + store.path > + ); > + } > + } > + > let need_unmount = datastore.backing_device.is_some(); > if need_unmount { > do_mount_device(datastore.clone())?; > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From f.gruenbichler at proxmox.com Mon Nov 25 14:47:51 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 14:47:51 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 10/25] add auto-mounting for removable datastores In-Reply-To: <20241122144713.299130-11-h.laimer@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-11-h.laimer@proxmox.com> Message-ID: <1732542459.6hj82l5m0b.astroid@yuna.none> On November 22, 2024 3:46 pm, Hannes Laimer wrote: > If a device houses multiple datastore, none of them will be mounted > automatically. If a device only contains a single datastore it will be > mounted automatically. The reason for not mounting multiple datastore > automatically is that we don't know which is actually wanted, and since > mounting all means also all have to be unmounted manually, it made sense > to have the user choose which to mount. > > Signed-off-by: Hannes Laimer > --- > changes since v13: > * skip API alltogether and use mounting wrapper directly > * load datastore config directly > > debian/proxmox-backup-server.install | 1 + > debian/proxmox-backup-server.udev | 3 ++ > etc/Makefile | 1 + > etc/removable-device-attach at .service | 8 ++++ > src/bin/proxmox_backup_manager/datastore.rs | 53 ++++++++++++++++++++- > 5 files changed, 65 insertions(+), 1 deletion(-) > create mode 100644 etc/removable-device-attach at .service > > diff --git a/debian/proxmox-backup-server.install b/debian/proxmox-backup-server.install > index 79757eadb..ff581e3dd 100644 > --- a/debian/proxmox-backup-server.install > +++ b/debian/proxmox-backup-server.install > @@ -4,6 +4,7 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/ > etc/proxmox-backup-daily-update.timer /lib/systemd/system/ > etc/proxmox-backup-proxy.service /lib/systemd/system/ > etc/proxmox-backup.service /lib/systemd/system/ > +etc/removable-device-attach at .service /lib/systemd/system/ > usr/bin/pmt > usr/bin/pmtx > usr/bin/proxmox-tape > diff --git a/debian/proxmox-backup-server.udev b/debian/proxmox-backup-server.udev > index afdfb2bc7..e21b8bc71 100644 > --- a/debian/proxmox-backup-server.udev > +++ b/debian/proxmox-backup-server.udev > @@ -16,3 +16,6 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER > SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg" > > LABEL="persistent_storage_tape_end" > + > +# triggers the mounting of a removable device > +ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}" > \ No newline at end of file > diff --git a/etc/Makefile b/etc/Makefile > index 42f639f62..26e91684e 100644 > --- a/etc/Makefile > +++ b/etc/Makefile > @@ -2,6 +2,7 @@ include ../defines.mk > > UNITS := \ > proxmox-backup-daily-update.timer \ > + removable-device-attach at .service > > DYNAMIC_UNITS := \ > proxmox-backup-banner.service \ > diff --git a/etc/removable-device-attach at .service b/etc/removable-device-attach at .service > new file mode 100644 > index 000000000..e10d1ea3c > --- /dev/null > +++ b/etc/removable-device-attach at .service > @@ -0,0 +1,8 @@ > +[Unit] > +Description=Try to mount the removable device of a datastore with uuid '%i'. > +After=proxmox-backup-proxy.service > +Requires=proxmox-backup-proxy.service > + > +[Service] > +Type=simple > +ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i > diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs > index 32a55fb9c..bcfdae786 100644 > --- a/src/bin/proxmox_backup_manager/datastore.rs > +++ b/src/bin/proxmox_backup_manager/datastore.rs > @@ -9,7 +9,7 @@ use proxmox_backup::api2; > use proxmox_backup::api2::config::datastore::DeletableProperty; > use proxmox_backup::client_helpers::connect_to_localhost; > > -use anyhow::Error; > +use anyhow::{format_err, Error}; > use serde_json::Value; > > #[api( > @@ -244,6 +244,53 @@ async fn update_datastore(name: String, mut param: Value) -> Result<(), Error> { > Ok(()) > } > > +#[api( > + protected: true, > + input: { > + properties: { > + uuid: { > + type: String, > + description: "The UUID of the device that should be mounted", > + }, > + "output-format": { > + schema: OUTPUT_FORMAT, > + optional: true, > + }, > + }, > + }, > +)] > +/// Try mounting a removable datastore given the UUID. > +async fn uuid_mount(param: Value, _rpcenv: &mut dyn RpcEnvironment) -> Result { > + let uuid = param["uuid"] > + .as_str() > + .ok_or_else(|| format_err!("uuid has to be specified"))?; > + > + let (config, _digest) = pbs_config::datastore::config()?; > + let list: Vec = config.convert_to_typed_array("datastore")?; > + let matching_stores: Vec = list > + .into_iter() > + .filter(|store: &DataStoreConfig| { > + store > + .backing_device > + .clone() > + .map_or(false, |device| device.eq(&uuid)) > + }) > + .collect(); > + > + if matching_stores.len() != 1 { > + return Ok(Value::Null); > + } > + > + if let Some(store) = matching_stores.get(0) { > + api2::admin::datastore::do_mount_device(store.clone())?; > + return Ok(Value::Null); this return is redundant ;) > + } > + > + // we don't want to fail for UUIDs that are not associated with datastores, as that produces > + // quite some noise in the logs, given this is check for every device that is plugged in. > + Ok(Value::Null) > +} > + > pub fn datastore_commands() -> CommandLineInterface { > let cmd_def = CliCommandMap::new() > .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) > @@ -289,6 +336,10 @@ pub fn datastore_commands() -> CommandLineInterface { > pbs_config::datastore::complete_calendar_event, > ), > ) > + .insert( > + "uuid-mount", > + CliCommand::new(&API_METHOD_UUID_MOUNT).arg_param(&["uuid"]), > + ) > .insert( > "remove", > CliCommand::new(&API_METHOD_DELETE_DATASTORE) > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From d.csapak at proxmox.com Mon Nov 25 15:23:55 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Mon, 25 Nov 2024 15:23:55 +0100 Subject: [pbs-devel] [PATCH v2 proxmox-backup] ui: sync job: fix source group filters based on sync direction In-Reply-To: <20241125110323.169074-1-c.ebner@proxmox.com> References: <20241125110323.169074-1-c.ebner@proxmox.com> Message-ID: <39c2cd0a-981d-4b7d-80ce-b828634eb4b9@proxmox.com> seems good to me now, and works as intended from my tests) in general I'd like to think a bit harder about how we can make that better so we don't have to manually write the update in so many different places with various conditions, but for now it seems good enough (e.g. I could imagine using viewmodels here might help? or custom events that the gridfilter could listen to...) anyway, consider this Reviewed-by: Dominik Csapak Tested-by: Dominik Csapak From f.gruenbichler at proxmox.com Mon Nov 25 15:37:01 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Mon, 25 Nov 2024 15:37:01 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] reuse-datastore: avoid creating another default prune job In-Reply-To: References: <20241125085953.19828-1-g.goller@proxmox.com> Message-ID: <1732545364.j7koj2vyxl.astroid@yuna.none> On November 25, 2024 11:10 am, Christian Ebner wrote: > On 11/25/24 09:59, Gabriel Goller wrote: >> If a datastore with a default prune job is removed, the prune job is >> preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also >> create a default prune job for every datastore ? this means that when >> reusing a datastore that previously existed, you end up with duplicate >> prune jobs. > > Looking at this once more, I am not so sure anymore that this should > only check for the default prune job? Why not check if there is any > prune job configured at all for this datastore, and only if there is > none create the new default prune job? that would also work? - if no prune job exists for this store, create default one - if explicit prune job options where given, create that one - otherwise, don't add a prune job (no options given, and one exists already for this store) From t.lamprecht at proxmox.com Mon Nov 25 15:45:57 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 15:45:57 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 0/3] docs: add installation media & wizard documentation In-Reply-To: <20241001142353.863405-1-c.heiss@proxmox.com> References: <20241001142353.863405-1-c.heiss@proxmox.com> Message-ID: <6bcf22c9-99aa-47a8-9587-6e223bea4257@proxmox.com> Am 01.10.24 um 16:23 schrieb Christoph Heiss: > This series adds documentation on how to create a proper installation > media, as well as how to use our installation wizard. Mostly taken from > the existing PVE/PMG equivalent, adapted as needed of course. > > Patch #3 contains all the new images and can be found in full on my > corresponding staff branch - see also the note attached to that patch. > > Christoph Heiss (3): > docs: add installation media preparation guide > docs: add installation wizard guide > docs: images: add installer guide screenshots > > .../screenshots/pbs-installer-grub-menu.png | Bin 0 -> 66840 bytes > .../screenshots/pbs-installer-location.png | Bin 0 -> 146079 bytes > .../screenshots/pbs-installer-network.png | Bin 0 -> 156355 bytes > .../screenshots/pbs-installer-password.png | Bin 0 -> 144742 bytes > .../screenshots/pbs-installer-progress.png | Bin 0 -> 165959 bytes > .../screenshots/pbs-installer-select-disk.png | Bin 0 -> 168424 bytes > .../screenshots/pbs-installer-summary.png | Bin 0 -> 141937 bytes > docs/images/screenshots/pbs-tui-installer.png | Bin 0 -> 4729 bytes > docs/installation-media.rst | 147 ++++++++ > docs/installation.rst | 25 +- > docs/local-zfs.rst | 1 + > docs/system-requirements.rst | 2 + > docs/using-the-installer.rst | 345 ++++++++++++++++++ > 13 files changed, 500 insertions(+), 20 deletions(-) > create mode 100644 docs/images/screenshots/pbs-installer-grub-menu.png > create mode 100644 docs/images/screenshots/pbs-installer-location.png > create mode 100644 docs/images/screenshots/pbs-installer-network.png > create mode 100644 docs/images/screenshots/pbs-installer-password.png > create mode 100644 docs/images/screenshots/pbs-installer-progress.png > create mode 100644 docs/images/screenshots/pbs-installer-select-disk.png > create mode 100644 docs/images/screenshots/pbs-installer-summary.png > create mode 100644 docs/images/screenshots/pbs-tui-installer.png > create mode 100644 docs/installation-media.rst > create mode 100644 docs/using-the-installer.rst > applied, thanks! From t.lamprecht at proxmox.com Mon Nov 25 15:48:17 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 15:48:17 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 1/2] d/control: bump proxmox-widget-toolkit dependency In-Reply-To: <20241111125619.193930-1-l.wagner@proxmox.com> References: <20241111125619.193930-1-l.wagner@proxmox.com> Message-ID: <66ca5712-3947-455f-bf56-ea8a2c161486@proxmox.com> Am 11.11.24 um 13:56 schrieb Lukas Wagner: > We need "notification: matcher: match-field: show known fields/values", > which was released in proxmox-widget-toolkit 4.2.4 > > Signed-off-by: Lukas Wagner > --- > debian/control | 2 +- > 1 file changed, 1 insertion(+), 1 deletion(-) > > applied both patches, thanks! From h.laimer at proxmox.com Mon Nov 25 15:48:57 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 15:48:57 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 07/25] api: add check for nested datastores on creation In-Reply-To: <1732542160.of8bqk38he.astroid@yuna.none> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-8-h.laimer@proxmox.com> <1732542160.of8bqk38he.astroid@yuna.none> Message-ID: <19884931-b142-4e80-88ae-db2dfb10259c@proxmox.com> On 11/25/24 14:44, Fabian Gr?nbichler wrote: > On November 22, 2024 3:46 pm, Hannes Laimer wrote: >> Signed-off-by: Hannes Laimer >> --- >> * new in v14, and not removable datastore specific >> >> src/api2/config/datastore.rs | 14 ++++++++++++++ >> 1 file changed, 14 insertions(+) >> >> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs >> index 420f8ddd0..75e1a1a56 100644 >> --- a/src/api2/config/datastore.rs >> +++ b/src/api2/config/datastore.rs >> @@ -81,6 +81,20 @@ pub(crate) fn do_create_datastore( >> bail!("cannot create datastore in root path"); >> } >> >> + for store in config.convert_to_typed_array::("datastore")? { >> + if store.backing_device != datastore.backing_device { >> + continue; >> + } > > this is not needed, if you compare absolute_paths below? > absolute path is DS_MNT_DIR/{name}, it does not contain the on-device path, we need this >> + if store.path.starts_with(&datastore.path) || datastore.path.starts_with(&store.path) { > > this is broken, as `path` is a String here, and not a Path, so > `starts_with` doesn't properly match on path components, but on > arbitrary substrings.. > > i.e., I can't configure two removable datastores, one using "removable" > and one using "removable2" as path atm.. > >> + param_bail!( >> + "path", >> + "nested datastores not allowed: '{}' already in '{}'", >> + store.name, >> + store.path >> + ); >> + } >> + } >> + >> let need_unmount = datastore.backing_device.is_some(); >> if need_unmount { >> do_mount_device(datastore.clone())?; >> -- >> 2.39.5 >> >> >> >> _______________________________________________ >> pbs-devel mailing list >> pbs-devel at lists.proxmox.com >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel >> >> >> > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > From t.lamprecht at proxmox.com Mon Nov 25 15:50:30 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 15:50:30 +0100 Subject: [pbs-devel] applied: [PATCH v2 proxmox-backup] ui: sync job: fix source group filters based on sync direction In-Reply-To: <20241125110323.169074-1-c.ebner@proxmox.com> References: <20241125110323.169074-1-c.ebner@proxmox.com> Message-ID: <829829bf-0cfd-4518-a994-b3ac0263c367@proxmox.com> Am 25.11.24 um 12:03 schrieb Christian Ebner: > Fix switching the source for group filters based on the sync jobs > sync direction. > > The helper to set the local namespace for the group filers was > introduced in commit 43a92c8c ("ui: group filter: allow to set > namespace for local datastore"), but never used because lost during > subsequent iterations of reworking the patch series. > > The switching is corrected by: > - correctly initializing the local store and namespace for the group > filer of sync jobs in push direction in the controller init, if a > datastore is set. > - fixing an incorrect check for the sync direction in the remote > datastore selector change listener. > - conditionally switching namespace to be set for the group filter in > the remote and local namespace selector change listeners. > - conditionally switching datastore to be set for the group filter in > the local datastore selector change listener. > > Reported-by: Lukas Wagner > Signed-off-by: Christian Ebner > --- > changes since version 1 (thanks @Dominik for catching the issues): > - only init group filters if there is a datastore given > - also switch group filters datastore when switching local datastore > - removed unneeded setLocalDatastore, as setLocalNamespace sets > datastore and namespace > - fixed eslint issues > - updated commit message > > www/window/SyncJobEdit.js | 31 +++++++++++++++++++++++++++++-- > 1 file changed, 29 insertions(+), 2 deletions(-) > > applied, with Dominik's R-b and T-b, thanks! From t.lamprecht at proxmox.com Mon Nov 25 15:53:21 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 15:53:21 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 0/3] implement a minimum length of 8 characters for new passwords In-Reply-To: <20241004134054.263913-1-s.sterz@proxmox.com> References: <20241004134054.263913-1-s.sterz@proxmox.com> Message-ID: <47a1770c-cc86-4aeb-9d8e-6c942baf6d63@proxmox.com> Am 04.10.24 um 15:40 schrieb Shannon Sterz: > this patch series aims to implement a minimum length of 8 characters for > new passwords. this is more in line with NIST's current recommendations > [1]. > > the first patch in this series also ignores the `password` parameter in > the update user endpoint, as it is currently redundant. see the commit > for more info. > > [1]: https://pages.nist.gov/800-63-4/sp800-63b.html#passwordver > > Shannon Sterz (3): > api: ignore password parameter in the update_user endpoint > api: enforce minimum character limit of 8 on new passwords > ui: set min length for new passwords to 8 > > pbs-api-types/src/lib.rs | 2 +- > src/api2/access/mod.rs | 4 ++-- > src/api2/access/user.rs | 32 +++++++++++--------------------- > www/config/UserView.js | 1 + > www/window/UserEdit.js | 2 +- > 5 files changed, 16 insertions(+), 25 deletions(-) > > -- > 2.39.5 > applied, thanks! From f.gruenbichler at proxmox.com Mon Nov 25 15:53:57 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=) Date: Mon, 25 Nov 2024 15:53:57 +0100 (CET) Subject: [pbs-devel] [PATCH proxmox-backup v14 07/25] api: add check for nested datastores on creation In-Reply-To: <19884931-b142-4e80-88ae-db2dfb10259c@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-8-h.laimer@proxmox.com> <1732542160.of8bqk38he.astroid@yuna.none> <19884931-b142-4e80-88ae-db2dfb10259c@proxmox.com> Message-ID: <2041103716.10452.1732546437086@webmail.proxmox.com> > Hannes Laimer hat am 25.11.2024 15:48 CET geschrieben: > > > On 11/25/24 14:44, Fabian Gr?nbichler wrote: > > On November 22, 2024 3:46 pm, Hannes Laimer wrote: > >> Signed-off-by: Hannes Laimer > >> --- > >> * new in v14, and not removable datastore specific > >> > >> src/api2/config/datastore.rs | 14 ++++++++++++++ > >> 1 file changed, 14 insertions(+) > >> > >> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > >> index 420f8ddd0..75e1a1a56 100644 > >> --- a/src/api2/config/datastore.rs > >> +++ b/src/api2/config/datastore.rs > >> @@ -81,6 +81,20 @@ pub(crate) fn do_create_datastore( > >> bail!("cannot create datastore in root path"); > >> } > >> > >> + for store in config.convert_to_typed_array::("datastore")? { > >> + if store.backing_device != datastore.backing_device { > >> + continue; > >> + } > > > > this is not needed, if you compare absolute_paths below? > > > > absolute path is DS_MNT_DIR/{name}, it does not contain the on-device > path, we need this fair enough, but the current form is still broken across the board.. we then actually need two checks here: - absolute paths not overlapping for removable and regular datastores - on device paths not overlapping within a device for removable datastores > > >> + if store.path.starts_with(&datastore.path) || datastore.path.starts_with(&store.path) { > > > > this is broken, as `path` is a String here, and not a Path, so > > `starts_with` doesn't properly match on path components, but on > > arbitrary substrings.. > > > > i.e., I can't configure two removable datastores, one using "removable" > > and one using "removable2" as path atm.. and this still needs to be addressed.. > > > >> + param_bail!( > >> + "path", > >> + "nested datastores not allowed: '{}' already in '{}'", > >> + store.name, > >> + store.path > >> + ); > >> + } > >> + } > >> + > >> let need_unmount = datastore.backing_device.is_some(); > >> if need_unmount { > >> do_mount_device(datastore.clone())?; > >> -- > >> 2.39.5 > >> > >> > >> > >> _______________________________________________ > >> pbs-devel mailing list > >> pbs-devel at lists.proxmox.com > >> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > >> > >> > >> > > > > > > _______________________________________________ > > pbs-devel mailing list > > pbs-devel at lists.proxmox.com > > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > > > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From t.lamprecht at proxmox.com Mon Nov 25 16:39:42 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 16:39:42 +0100 Subject: [pbs-devel] applied: [PATCH proxmox v4 4/7] rest-server: add custom handlebars escape fn In-Reply-To: <20240913131033.396324-5-g.goller@proxmox.com> References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-5-g.goller@proxmox.com> Message-ID: <31def050-1627-4ff6-8100-32fae5aa3303@proxmox.com> Am 13.09.24 um 15:10 schrieb Gabriel Goller: > Add a custom handlebars escape function. It's nearly identical to the > default `html_escape` fn [0], but it does not escape the '='. This is > needed to support base64 encoded values. > > [0]: https://docs.rs/handlebars/latest/handlebars/fn.html_escape.html > > Signed-off-by: Gabriel Goller > --- > proxmox-rest-server/src/api_config.rs | 28 ++++++++++++++++++++++++++- > 1 file changed, 27 insertions(+), 1 deletion(-) > > applied, thanks! From h.laimer at proxmox.com Mon Nov 25 17:21:21 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:21 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v14 24/26] ui: support create removable datastore through directory creation In-Reply-To: <777a0155-3110-49bd-ac57-6b1e7b058290@proxmox.com> References: <20241122144713.299130-1-h.laimer@proxmox.com> <20241122144713.299130-25-h.laimer@proxmox.com> <777a0155-3110-49bd-ac57-6b1e7b058290@proxmox.com> Message-ID: <4e1305ea-b839-4fec-aa13-bcd312a67c52@proxmox.com> On 11/25/24 14:28, Dominik Csapak wrote: > a bit offtopic, but why only directory storages and not also zfs based > ones? > no reason, should be pretty easy to add. I'll do a follow-up > e.g. i can imagine users wanting a zpool on an external disk too (for > checksumming, send/receive, snapshotting, etc) > > On 11/22/24 15:47, Hannes Laimer wrote: >> Signed-off-by: Hannes Laimer >> --- >> ? src/api2/node/disks/directory.rs |? 2 ++ >> ? www/DirectoryList.js???????????? | 13 +++++++++++++ >> ? www/window/CreateDirectory.js??? | 14 ++++++++++++++ >> ? 3 files changed, 29 insertions(+) >> >> diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/ >> directory.rs >> index ff817b253..2f7cc7a27 100644 >> --- a/src/api2/node/disks/directory.rs >> +++ b/src/api2/node/disks/directory.rs >> @@ -150,6 +150,8 @@ pub fn list_datastore_mounts() -> >> Result, Error> { >> ????????????? "removable-datastore": { >> ????????????????? description: "The added datastore is removable.", >> ????????????????? type: bool, >> +??????????????? optional: true, >> +??????????????? default: false, >> ????????????? }, >> ????????????? filesystem: { >> ????????????????? type: FileSystemType, >> diff --git a/www/DirectoryList.js b/www/DirectoryList.js >> index adefa9abf..25921a623 100644 >> --- a/www/DirectoryList.js >> +++ b/www/DirectoryList.js >> @@ -121,6 +121,19 @@ Ext.define('PBS.admin.Directorylist', { >> ????? ], >> ????? columns: [ >> +??? { >> +??????? text: '', >> +??????? flex: 0, >> +??????? width: 35, >> +??????? dataIndex: 'removable', >> +??????? renderer: function(_text, _, row) { >> +??????? if (row.data.removable) { >> +??????????? return ``; >> +??????? } else { >> +??????????? return ''; >> +??????? } >> +??????? }, >> +??? }, >> ????? { >> ????????? text: gettext('Path'), >> ????????? dataIndex: 'path', >> diff --git a/www/window/CreateDirectory.js b/www/window/ >> CreateDirectory.js >> index 6aabe21ab..38d6979d9 100644 >> --- a/www/window/CreateDirectory.js >> +++ b/www/window/CreateDirectory.js >> @@ -43,6 +43,20 @@ Ext.define('PBS.window.CreateDirectory', { >> ????????? name: 'add-datastore', >> ????????? fieldLabel: gettext('Add as Datastore'), >> ????????? value: '1', >> +??????? listeners: { >> +??????? change(field, newValue, _oldValue) { >> +??????????? let form = field.up('form'); >> +??????????? let rmBox = form.down('[name=removable-datastore]'); >> + >> +??????????? rmBox.setDisabled(!newValue); >> +??????????? rmBox.setValue(false); >> +??????? }, >> +??????? }, >> +??? }, >> +??? { >> +??????? xtype: 'proxmoxcheckbox', >> +??????? name: 'removable-datastore', >> +??????? fieldLabel: gettext('is removable'), >> ????? }, >> ????? ], >> ? }); > > From h.laimer at proxmox.com Mon Nov 25 17:21:54 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:54 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 07/26] api: add check for nested datastores on creation In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-8-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/config/datastore.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index d4cee458d..d6cfdbb0c 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -1,4 +1,4 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use ::serde::{Deserialize, Serialize}; use anyhow::{bail, format_err, Error}; @@ -81,6 +81,25 @@ pub(crate) fn do_create_datastore( bail!("cannot create datastore in root path"); } + let new_store_path = Path::new(&datastore.path); + for store in config.convert_to_typed_array::("datastore")? { + if store.backing_device != datastore.backing_device { + continue; + } + + // Since we check for that on creation, we assume all removable datastore + // paths are relative, so don't have a leading `/`. + let store_path = Path::new(&store.path); + if store_path.starts_with(&new_store_path) || new_store_path.starts_with(&store_path) { + param_bail!( + "path", + "nested datastores not allowed: '{}' already in '{}'", + store.name, + store.path + ); + } + } + let need_unmount = datastore.backing_device.is_some(); if need_unmount { do_mount_device(datastore.clone())?; -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:53 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:53 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 06/26] api: removable datastore creation In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-7-h.laimer@proxmox.com> Devices can contains multiple datastores. If the specified path already contains a datastore, `reuse datastore` has to be set so it'll be added without creating a chunckstore. Signed-off-by: Hannes Laimer --- src/api2/config/datastore.rs | 62 ++++++++++++++++++++++++++++-------- 1 file changed, 48 insertions(+), 14 deletions(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 37d1528c7..d4cee458d 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use ::serde::{Deserialize, Serialize}; -use anyhow::{bail, Error}; +use anyhow::{bail, format_err, Error}; use hex::FromHex; use serde_json::Value; use tracing::warn; @@ -21,7 +21,8 @@ use pbs_config::BackupLockGuard; use pbs_datastore::chunk_store::ChunkStore; use crate::api2::admin::{ - prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, + datastore::do_mount_device, prune::list_prune_jobs, sync::list_config_sync_jobs, + verify::list_verification_jobs, }; use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; use crate::api2::config::sync::delete_sync_job; @@ -32,6 +33,7 @@ use pbs_config::CachedUserInfo; use proxmox_rest_server::WorkerTask; use crate::server::jobstate; +use crate::tools::disks::unmount_by_mountpoint; #[api( input: { @@ -73,37 +75,58 @@ pub(crate) fn do_create_datastore( datastore: DataStoreConfig, reuse_datastore: bool, ) -> Result<(), Error> { - let path: PathBuf = datastore.path.clone().into(); + let path: PathBuf = datastore.absolute_path().into(); if path.parent().is_none() { bail!("cannot create datastore in root path"); } + let need_unmount = datastore.backing_device.is_some(); + if need_unmount { + do_mount_device(datastore.clone())?; + }; + let tuning: DatastoreTuning = serde_json::from_value( DatastoreTuning::API_SCHEMA .parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?, )?; - if reuse_datastore { - ChunkStore::verify_chunkstore(&path)?; + let res = if reuse_datastore { + ChunkStore::verify_chunkstore(&path) } else { + let mut is_empty = true; if let Ok(dir) = std::fs::read_dir(&path) { for file in dir { let name = file?.file_name(); let name = name.to_str(); if !name.map_or(false, |name| name.starts_with('.') || name == "lost+found") { - bail!("datastore path is not empty"); + is_empty = false; + break; } } } - let backup_user = pbs_config::backup_user()?; - let _store = ChunkStore::create( - &datastore.name, - path, - backup_user.uid, - backup_user.gid, - tuning.sync_level.unwrap_or_default(), - )?; + if is_empty { + let backup_user = pbs_config::backup_user()?; + ChunkStore::create( + &datastore.name, + path.clone(), + backup_user.uid, + backup_user.gid, + tuning.sync_level.unwrap_or_default(), + ) + .map(|_| ()) + } else { + Err(format_err!("datastore path not empty")) + } + }; + + if res.is_err() { + if need_unmount { + if let Err(e) = unmount_by_mountpoint(&path) { + warn!("could not unmount device: {e}"); + } + } + return res; } config.set_data(&datastore.name, "datastore", &datastore)?; @@ -147,6 +170,17 @@ pub fn create_datastore( param_bail!("name", "datastore '{}' already exists.", config.name); } + if config.backing_device.is_none() && !config.path.starts_with("/") { + param_bail!("path", "expected an abolute path, '{}' is not", config.path); + } + if config.backing_device.is_some() && config.path.starts_with("/") { + param_bail!( + "path", + "expected a relative on-device path, '{}' is not", + config.path + ); + } + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:56 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:56 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 09/26] bin: manager: add (un)mount command In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-10-h.laimer@proxmox.com> We can't just directly delegate these commands to the API endpoints since both mounting and unmounting are done in a worker, and that one would be killed when the parent ends. In this case that would be the CLI process, which basically ends right after spwaning the worker. Signed-off-by: Hannes Laimer --- pbs-config/src/datastore.rs | 14 ++++ src/bin/proxmox_backup_manager/datastore.rs | 74 +++++++++++++++++++++ 2 files changed, 88 insertions(+) diff --git a/pbs-config/src/datastore.rs b/pbs-config/src/datastore.rs index dc5bb3da9..396dcb371 100644 --- a/pbs-config/src/datastore.rs +++ b/pbs-config/src/datastore.rs @@ -62,6 +62,20 @@ pub fn complete_datastore_name(_arg: &str, _param: &HashMap) -> } } +pub fn complete_removable_datastore_name( + _arg: &str, + _param: &HashMap, +) -> Vec { + match config() { + Ok((data, _digest)) => data + .sections + .into_iter() + .filter_map(|(name, (_, c))| c.get("backing-device").map(|_| name)) + .collect(), + Err(_) => Vec::new(), + } +} + pub fn complete_acl_path(_arg: &str, _param: &HashMap) -> Vec { let mut list = vec![ String::from("/"), diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index 3a349451f..32a55fb9c 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -42,6 +42,34 @@ fn list_datastores(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result Result<(), Error> { + param["node"] = "localhost".into(); + + let info = &api2::admin::datastore::API_METHOD_MOUNT; + let result = match info.handler { + ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?, + _ => unreachable!(), + }; + + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + Ok(()) +} + #[api( input: { properties: { @@ -101,6 +129,34 @@ async fn create_datastore(mut param: Value) -> Result { Ok(Value::Null) } +#[api( + protected: true, + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + digest: { + optional: true, + schema: PROXMOX_CONFIG_DIGEST_SCHEMA, + }, + }, + }, +)] +/// Unmount a removable datastore. +async fn unmount_datastore(mut param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<(), Error> { + param["node"] = "localhost".into(); + + let info = &api2::admin::datastore::API_METHOD_UNMOUNT; + let result = match info.handler { + ApiHandler::Async(handler) => (handler)(param, info, rpcenv).await?, + _ => unreachable!(), + }; + + crate::wait_for_local_worker(result.as_str().unwrap()).await?; + Ok(()) +} + #[api( protected: true, input: { @@ -191,6 +247,15 @@ async fn update_datastore(name: String, mut param: Value) -> Result<(), Error> { pub fn datastore_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) + .insert( + "mount", + CliCommand::new(&API_METHOD_MOUNT_DATASTORE) + .arg_param(&["store"]) + .completion_cb( + "store", + pbs_config::datastore::complete_removable_datastore_name, + ), + ) .insert( "show", CliCommand::new(&API_METHOD_SHOW_DATASTORE) @@ -201,6 +266,15 @@ pub fn datastore_commands() -> CommandLineInterface { "create", CliCommand::new(&API_METHOD_CREATE_DATASTORE).arg_param(&["name", "path"]), ) + .insert( + "unmount", + CliCommand::new(&API_METHOD_UNMOUNT_DATASTORE) + .arg_param(&["store"]) + .completion_cb( + "store", + pbs_config::datastore::complete_removable_datastore_name, + ), + ) .insert( "update", CliCommand::new(&API_METHOD_UPDATE_DATASTORE) -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:59 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:59 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 12/26] docs: add removable datastores section In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-13-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- docs/storage.rst | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/docs/storage.rst b/docs/storage.rst index f1e15d522..361af4420 100644 --- a/docs/storage.rst +++ b/docs/storage.rst @@ -165,6 +165,44 @@ following command creates a new datastore called ``store1`` on # proxmox-backup-manager datastore create store1 /backup/disk1/store1 +Removable Datastores +^^^^^^^^^^^^^^^^^^^^ +Removable datastores have a ``backing-device`` associated with them, they can be +mounted and unmounted. Other than that they behave the same way a normal datastore +would. + +They can be created on already correctly formatted partitions, which, as with normal +datastores, should be either ``ext4`` or ``xfs``. It is also possible to create them +on completely unused disks through "Administration" > "Disks / Storage" > "Directory", +using this method the disk will be partitioned and formatted automatically for the datastore. + +Devices with only one datastore on them will be mounted automatically. It is possible to create a +removable datastore on one PBS and use it on multiple instances, the device just has to be added +on each instance as a removable datastore by checking "reuse datastore" on creation. +If the device already contains a datastore at the specified path it'll just be added as +a new datastore to the PBS instance and will be mounted whenever plugged in. Unmounting has +to be done through the UI by clicking "Unmount" on the summary page or using the CLI. + +A single device can house multiple datastores, they only limitation is that they are not +allowed to be nested. + +.. code-block:: console + + # proxmox-backup-manager datastore unmount store1 + +both will wait for any running tasks to finish and unmount the device. + +All removable datastores are mounted under /mnt/datastore/, and the specified path +refers to the path on the device. + +All datastores present on a device can be listed using ``proxmox-backup-debug``. + +.. code-block:: console + + # proxmox-backup-debug inspect device /dev/... + + + Managing Datastores ^^^^^^^^^^^^^^^^^^^ -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:57 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:57 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 10/26] add auto-mounting for removable datastores In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-11-h.laimer@proxmox.com> If a device houses multiple datastore, none of them will be mounted automatically. If a device only contains a single datastore it will be mounted automatically. The reason for not mounting multiple datastore automatically is that we don't know which is actually wanted, and since mounting all means also all have to be unmounted manually, it made sense to have the user choose which to mount. Signed-off-by: Hannes Laimer --- debian/proxmox-backup-server.install | 1 + debian/proxmox-backup-server.udev | 3 ++ etc/Makefile | 1 + etc/removable-device-attach at .service | 8 ++++ src/bin/proxmox_backup_manager/datastore.rs | 52 ++++++++++++++++++++- 5 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 etc/removable-device-attach at .service diff --git a/debian/proxmox-backup-server.install b/debian/proxmox-backup-server.install index 79757eadb..ff581e3dd 100644 --- a/debian/proxmox-backup-server.install +++ b/debian/proxmox-backup-server.install @@ -4,6 +4,7 @@ etc/proxmox-backup-daily-update.service /lib/systemd/system/ etc/proxmox-backup-daily-update.timer /lib/systemd/system/ etc/proxmox-backup-proxy.service /lib/systemd/system/ etc/proxmox-backup.service /lib/systemd/system/ +etc/removable-device-attach at .service /lib/systemd/system/ usr/bin/pmt usr/bin/pmtx usr/bin/proxmox-tape diff --git a/debian/proxmox-backup-server.udev b/debian/proxmox-backup-server.udev index afdfb2bc7..e21b8bc71 100644 --- a/debian/proxmox-backup-server.udev +++ b/debian/proxmox-backup-server.udev @@ -16,3 +16,6 @@ SUBSYSTEM=="scsi_generic", SUBSYSTEMS=="scsi", ATTRS{type}=="1", ENV{ID_SCSI_SER SYMLINK+="tape/by-id/scsi-$env{ID_SCSI_SERIAL}-sg" LABEL="persistent_storage_tape_end" + +# triggers the mounting of a removable device +ACTION=="add", SUBSYSTEM=="block", ENV{ID_FS_UUID}!="", TAG+="systemd", ENV{SYSTEMD_WANTS}="removable-device-attach@$env{ID_FS_UUID}" \ No newline at end of file diff --git a/etc/Makefile b/etc/Makefile index 42f639f62..26e91684e 100644 --- a/etc/Makefile +++ b/etc/Makefile @@ -2,6 +2,7 @@ include ../defines.mk UNITS := \ proxmox-backup-daily-update.timer \ + removable-device-attach at .service DYNAMIC_UNITS := \ proxmox-backup-banner.service \ diff --git a/etc/removable-device-attach at .service b/etc/removable-device-attach at .service new file mode 100644 index 000000000..e10d1ea3c --- /dev/null +++ b/etc/removable-device-attach at .service @@ -0,0 +1,8 @@ +[Unit] +Description=Try to mount the removable device of a datastore with uuid '%i'. +After=proxmox-backup-proxy.service +Requires=proxmox-backup-proxy.service + +[Service] +Type=simple +ExecStart=/usr/sbin/proxmox-backup-manager datastore uuid-mount %i diff --git a/src/bin/proxmox_backup_manager/datastore.rs b/src/bin/proxmox_backup_manager/datastore.rs index 32a55fb9c..8711997de 100644 --- a/src/bin/proxmox_backup_manager/datastore.rs +++ b/src/bin/proxmox_backup_manager/datastore.rs @@ -9,7 +9,7 @@ use proxmox_backup::api2; use proxmox_backup::api2::config::datastore::DeletableProperty; use proxmox_backup::client_helpers::connect_to_localhost; -use anyhow::Error; +use anyhow::{format_err, Error}; use serde_json::Value; #[api( @@ -244,6 +244,52 @@ async fn update_datastore(name: String, mut param: Value) -> Result<(), Error> { Ok(()) } +#[api( + protected: true, + input: { + properties: { + uuid: { + type: String, + description: "The UUID of the device that should be mounted", + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + }, + }, +)] +/// Try mounting a removable datastore given the UUID. +async fn uuid_mount(param: Value, _rpcenv: &mut dyn RpcEnvironment) -> Result { + let uuid = param["uuid"] + .as_str() + .ok_or_else(|| format_err!("uuid has to be specified"))?; + + let (config, _digest) = pbs_config::datastore::config()?; + let list: Vec = config.convert_to_typed_array("datastore")?; + let matching_stores: Vec = list + .into_iter() + .filter(|store: &DataStoreConfig| { + store + .backing_device + .clone() + .map_or(false, |device| device.eq(&uuid)) + }) + .collect(); + + if matching_stores.len() != 1 { + return Ok(Value::Null); + } + + if let Some(store) = matching_stores.get(0) { + api2::admin::datastore::do_mount_device(store.clone())?; + } + + // we don't want to fail for UUIDs that are not associated with datastores, as that produces + // quite some noise in the logs, given this is check for every device that is plugged in. + Ok(Value::Null) +} + pub fn datastore_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert("list", CliCommand::new(&API_METHOD_LIST_DATASTORES)) @@ -289,6 +335,10 @@ pub fn datastore_commands() -> CommandLineInterface { pbs_config::datastore::complete_calendar_event, ), ) + .insert( + "uuid-mount", + CliCommand::new(&API_METHOD_UUID_MOUNT).arg_param(&["uuid"]), + ) .insert( "remove", CliCommand::new(&API_METHOD_DELETE_DATASTORE) -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:03 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:03 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 16/26] ui: tree: render unmounted datastores correctly In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-17-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/NavigationTree.js | 18 +++++++++++++++--- www/css/ext6-pbs.css | 8 ++++++++ www/datastore/DataStoreListSummary.js | 1 + 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/www/NavigationTree.js b/www/NavigationTree.js index 53c8daff9..bcf1d1984 100644 --- a/www/NavigationTree.js +++ b/www/NavigationTree.js @@ -267,13 +267,25 @@ Ext.define('PBS.view.main.NavigationTree', { j++; } - let [qtip, iconCls] = ['', 'fa fa-database']; + let mountStatus = records[i].data['mount-status'] ?? 'nonremovable'; + let isRemovable = mountStatus !== 'nonremovable'; + let mainIcon = `fa fa-${isRemovable ? 'plug' : 'database'}`; + let [qtip, iconCls] = ['', mainIcon]; const maintenance = records[i].data.maintenance; + + const removable_not_mounted = records[i].data['mount-status'] === 'notmounted'; + if (removable_not_mounted) { + iconCls = `${mainIcon} pmx-tree-icon-custom unplugged`; + qtip = gettext('Removable datastore not mounted'); + } if (maintenance) { const [type, message] = PBS.Utils.parseMaintenanceMode(maintenance); qtip = `${type}${message ? ': ' + message : ''}`; - let maintenanceTypeCls = type === 'delete' ? 'destroying' : 'maintenance'; - iconCls = `fa fa-database pmx-tree-icon-custom ${maintenanceTypeCls}`; + let maintenanceTypeCls = 'maintenance'; + if (type === 'delete') { + maintenanceTypeCls = 'destroying'; + } + iconCls = `${mainIcon} pmx-tree-icon-custom ${maintenanceTypeCls}`; } if (getChildTextAt(j).localeCompare(name) !== 0) { diff --git a/www/css/ext6-pbs.css b/www/css/ext6-pbs.css index c33ce6845..706e681e9 100644 --- a/www/css/ext6-pbs.css +++ b/www/css/ext6-pbs.css @@ -271,6 +271,10 @@ span.snapshot-comment-column { content: "\ "; } +.x-treelist-item-icon.fa-plug, .pmx-tree-icon-custom.fa-plug { + font-size: 12px; +} + /* datastore maintenance */ .pmx-tree-icon-custom.maintenance:after { content: "\f0ad"; @@ -290,6 +294,10 @@ span.snapshot-comment-column { color: #888; } +.pmx-tree-icon-custom.unplugged:before { + color: #888; +} + /*' PBS specific icons */ .pbs-icon-tape { diff --git a/www/datastore/DataStoreListSummary.js b/www/datastore/DataStoreListSummary.js index b908034d8..f7ea83e7b 100644 --- a/www/datastore/DataStoreListSummary.js +++ b/www/datastore/DataStoreListSummary.js @@ -22,6 +22,7 @@ Ext.define('PBS.datastore.DataStoreListSummary', { stillbad: 0, deduplication: 1.0, error: "", + removable: false, maintenance: '', }, }, -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:09 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:09 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 22/26] api: node: include removable datastores in directory list In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-23-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index b6006b47c..11d07af42 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -45,6 +45,8 @@ pub struct DatastoreMountInfo { pub path: String, /// The mounted device. pub device: String, + /// This is removable + pub removable: bool, /// File system type pub filesystem: Option, /// Mount options @@ -61,7 +63,7 @@ pub struct DatastoreMountInfo { } }, returns: { - description: "List of systemd datastore mount units.", + description: "List of removable-datastore devices and systemd datastore mount units.", type: Array, items: { type: DatastoreMountInfo, @@ -100,6 +102,28 @@ pub fn list_datastore_mounts() -> Result, Error> { path: data.Where, filesystem: data.Type, options: data.Options, + removable: false, + }); + } + + let (config, _digest) = pbs_config::datastore::config()?; + let store_list: Vec = config.convert_to_typed_array("datastore")?; + + for item in store_list + .into_iter() + .filter(|store| store.backing_device.is_some()) + { + let Some(backing_device) = item.backing_device.as_deref() else { + continue; + }; + list.push(DatastoreMountInfo { + unitfile: "datastore config".to_string(), + name: item.name.clone(), + device: format!("/dev/disk/by-uuid/{backing_device}"), + path: item.absolute_path(), + filesystem: None, + options: None, + removable: true, }); } -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:05 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:05 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 18/26] ui: add datastore status mask for unmounted removable datastores In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-19-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/css/ext6-pbs.css | 12 ++++++++++++ www/datastore/Summary.js | 21 +++++++++++++-------- 2 files changed, 25 insertions(+), 8 deletions(-) diff --git a/www/css/ext6-pbs.css b/www/css/ext6-pbs.css index 706e681e9..891189ae3 100644 --- a/www/css/ext6-pbs.css +++ b/www/css/ext6-pbs.css @@ -261,6 +261,18 @@ span.snapshot-comment-column { content: "\f0ad"; } +.pbs-unplugged-mask div.x-mask-msg-text { + background: None; + padding: 12px 0 0; +} + +.pbs-unplugged-mask:before { + font-size: 3em; + display: flex; + justify-content: center; + content: "\f1e6"; +} + /* the small icons TODO move to proxmox-widget-toolkit */ .pmx-tree-icon-custom:after { position: relative; diff --git a/www/datastore/Summary.js b/www/datastore/Summary.js index 1be26ff3d..deeda46e0 100644 --- a/www/datastore/Summary.js +++ b/www/datastore/Summary.js @@ -61,16 +61,21 @@ Ext.define('PBS.DataStoreInfo', { Proxmox.Utils.API2Request({ url: `/config/datastore/${me.view.datastore}`, success: function(response) { - const config = response.result.data; - if (config['maintenance-mode']) { - const [_type, msg] = PBS.Utils.parseMaintenanceMode(config['maintenance-mode']); - me.view.el.mask( - `${gettext('Datastore is in maintenance mode')}${msg ? ': ' + msg : ''}`, - 'fa pbs-maintenance-mask', - ); - } else { + let maintenanceString = response.result.data['maintenance-mode']; + let removable = !!response.result.data['backing-device']; + if (!maintenanceString && !removable) { me.view.el.mask(gettext('Datastore is not available')); + return; } + + let [_type, msg] = PBS.Utils.parseMaintenanceMode(maintenanceString); + let isUnplugged = !maintenanceString && removable; + let maskMessage = isUnplugged + ? gettext('Datastore is not mounted') + : `${gettext('Datastore is in maintenance mode')}${msg ? ': ' + msg : ''}`; + + let maskIcon = isUnplugged ? 'fa pbs-unplugged-mask' : 'fa pbs-maintenance-mask'; + me.view.el.mask(maskMessage, maskIcon); }, }); return; -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:07 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:07 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 20/26] ui: render 'unmount' maintenance mode correctly In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-21-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Utils.js | 4 +++- www/window/MaintenanceOptions.js | 9 +++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/www/Utils.js b/www/Utils.js index 7756e9b5d..6bae9b709 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -775,7 +775,7 @@ Ext.define('PBS.Utils', { let extra = ''; if (activeTasks !== undefined) { - const conflictingTasks = activeTasks.write + (type === 'offline' ? activeTasks.read : 0); + const conflictingTasks = activeTasks.write + (type === 'offline' || type === 'unmount' ? activeTasks.read : 0); if (conflictingTasks > 0) { extra += '| '; @@ -795,6 +795,8 @@ Ext.define('PBS.Utils', { break; case 'offline': modeText = gettext("Offline"); break; + case 'unmount': modeText = gettext("Unmounting"); + break; } return `${modeText} ${extra}`; }, diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js index 7e3b42516..896d6a58e 100644 --- a/www/window/MaintenanceOptions.js +++ b/www/window/MaintenanceOptions.js @@ -86,6 +86,15 @@ Ext.define('PBS.window.MaintenanceOptions', { }; } + let unmounting = options['maintenance-type'] === 'unmount'; + let defaultType = options['maintenance-type'] === '__default__'; + if (unmounting) { + options['maintenance-type'] = ''; + } + me.callParent([options]); + + me.lookupReference('type-field').setDisabled(unmounting); + me.lookupReference('message-field').setDisabled(unmounting || defaultType); }, }); -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:49 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:49 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 02/26] maintenance: make is_offline more generic In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-3-h.laimer@proxmox.com> ... and add MaintenanceType::Delete to it. We also want to clear any cach entries if we are deleting the datastore, not just if it is marked as offline. Signed-off-by: Hannes Laimer --- pbs-api-types/src/maintenance.rs | 7 +++---- pbs-datastore/src/datastore.rs | 5 +++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index 1e3413dca..a7b8b078d 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -77,10 +77,9 @@ pub struct MaintenanceMode { } impl MaintenanceMode { - /// Used for deciding whether the datastore is cleared from the internal cache after the last - /// task finishes, so all open files are closed. - pub fn is_offline(&self) -> bool { - self.ty == MaintenanceType::Offline + /// Used for deciding whether the datastore is cleared from the internal cache + pub fn clear_from_cache(&self) -> bool { + self.ty == MaintenanceType::Offline || self.ty == MaintenanceType::Delete } pub fn check(&self, operation: Option) -> Result<(), Error> { diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 2755fed8c..2bf2b8437 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -116,7 +116,8 @@ impl Drop for DataStore { && pbs_config::datastore::config() .and_then(|(s, _)| s.lookup::("datastore", self.name())) .map_or(false, |c| { - c.get_maintenance_mode().map_or(false, |m| m.is_offline()) + c.get_maintenance_mode() + .map_or(false, |m| m.clear_from_cache()) }); if remove_from_cache { @@ -216,7 +217,7 @@ impl DataStore { let datastore: DataStoreConfig = config.lookup("datastore", name)?; if datastore .get_maintenance_mode() - .map_or(false, |m| m.is_offline()) + .map_or(false, |m| m.clear_from_cache()) { // the datastore drop handler does the checking if tasks are running and clears the // cache entry, so we just have to trigger it here -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:52 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:52 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 05/26] api: admin: add (un)mount endpoint for removable datastores In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-6-h.laimer@proxmox.com> Removable datastores can be mounted unless - they are already - their device is not present For unmounting the maintenance mode is set to `unmount`, which prohibits the starting of any new tasks envolving any IO, this mode is unset either - on completion of the unmount - on abort of the unmount tasks If the unmounting itself should fail, the maintenance mode stays in place and requires manual intervention by unsetting it in the config file directly. This is intentional, as unmounting should not fail, and if it should the situation should be looked at. Signed-off-by: Hannes Laimer --- src/api2/admin/datastore.rs | 300 ++++++++++++++++++++++++++++++++++-- 1 file changed, 290 insertions(+), 10 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 9fdda8b7a..2f441b550 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -4,7 +4,7 @@ use std::collections::HashSet; use std::ffi::OsStr; use std::ops::Deref; use std::os::unix::ffi::OsStrExt; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::sync::Arc; use anyhow::{bail, format_err, Error}; @@ -30,6 +30,7 @@ use proxmox_sys::fs::{ file_read_firstline, file_read_optional_string, replace_file, CreateOptions, }; use proxmox_time::CalendarEvent; +use proxmox_worker_task::WorkerTaskContext; use pxar::accessor::aio::Accessor; use pxar::EntryKind; @@ -38,13 +39,13 @@ use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, - JobScheduleStatus, KeepOptions, Operation, PruneJobOptions, SnapshotListItem, - SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, - BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, - IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, - PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, - PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, - VERIFICATION_OUTDATED_AFTER_SCHEMA, + JobScheduleStatus, KeepOptions, MaintenanceMode, MaintenanceType, Operation, PruneJobOptions, + SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, + BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, + CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, + MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, + UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -59,8 +60,8 @@ use pbs_datastore::index::IndexFile; use pbs_datastore::manifest::BackupManifest; use pbs_datastore::prune::compute_prune_info; use pbs_datastore::{ - check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, - StoreProgress, + check_backup_owner, ensure_datastore_is_mounted, task_tracking, BackupDir, BackupGroup, + DataStore, LocalChunkReader, StoreProgress, }; use pbs_tools::json::required_string_param; use proxmox_rest_server::{formatter, WorkerTask}; @@ -2392,6 +2393,283 @@ pub async fn set_backup_owner( .await? } +fn setup_mounted_device(datastore: &DataStoreConfig, tmp_mount_path: &str) -> Result<(), Error> { + let default_options = proxmox_sys::fs::CreateOptions::new(); + let mount_point = datastore.absolute_path(); + let full_store_path = format!( + "{tmp_mount_path}/{}", + datastore.path.trim_start_matches('/') + ); + let backup_user = pbs_config::backup_user()?; + let options = CreateOptions::new() + .owner(backup_user.uid) + .group(backup_user.gid); + + proxmox_sys::fs::create_path( + &mount_point, + Some(default_options.clone()), + Some(options.clone()), + ) + .map_err(|e| format_err!("creating mountpoint '{mount_point}' failed: {e}"))?; + + // can't be created before it is mounted, so we have to do it here + proxmox_sys::fs::create_path( + &full_store_path, + Some(default_options.clone()), + Some(options.clone()), + ) + .map_err(|e| format_err!("creating datastore path '{full_store_path}' failed: {e}"))?; + + info!( + "bind mount '{}'({}) to '{}'", + datastore.name, datastore.path, mount_point + ); + + crate::tools::disks::bind_mount(Path::new(&full_store_path), Path::new(&mount_point)) +} + +/// Here we +/// +/// 1. mount the removable device to `/mount/` +/// 2. bind mount `/mount//` to `/mnt/datastore/` +/// 3. unmount `/mount/` +/// +/// leaving us with the datastore being mounted directly with its name under /mnt/datastore/... +/// +/// The reason for the randomized device mounting paths is to avoid two tasks trying to mount to +/// the same path, this is *very* unlikely since the device is only mounted really shortly, but +/// technically possible. +pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> { + if let Some(uuid) = datastore.backing_device.as_ref() { + if pbs_datastore::get_datastore_mount_status(&datastore) == Some(true) { + bail!( + "device is already mounted at '{}'", + datastore.absolute_path() + ); + } + let tmp_mount_path = format!( + "{}/{:x}", + pbs_buildcfg::rundir!("/mount"), + proxmox_uuid::Uuid::generate() + ); + + let default_options = proxmox_sys::fs::CreateOptions::new(); + proxmox_sys::fs::create_path( + &tmp_mount_path, + Some(default_options.clone()), + Some(default_options.clone()), + )?; + + info!("temporarily mounting '{uuid}' to '{}'", tmp_mount_path); + crate::tools::disks::mount_by_uuid(uuid, Path::new(&tmp_mount_path)) + .map_err(|e| format_err!("mounting to tmp path failed: {e}"))?; + + let setup_result = setup_mounted_device(&datastore, &tmp_mount_path); + + let mut unmounted = true; + if let Err(e) = crate::tools::disks::unmount_by_mountpoint(Path::new(&tmp_mount_path)) { + unmounted = false; + warn!("unmounting from tmp path '{tmp_mount_path} failed: {e}'"); + } + if unmounted { + if let Err(e) = std::fs::remove_dir(std::path::Path::new(&tmp_mount_path)) { + warn!("removing tmp path '{tmp_mount_path} failed: {e}'"); + } + } + + setup_result.map_err(|e| { + format_err!( + "Datastore '{}' could not be created: {}.", + datastore.name, + e + ) + })?; + } else { + bail!( + "Datastore '{}' cannot be mounted because it is not removable.", + datastore.name + ) + } + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + store: { + schema: DATASTORE_SCHEMA, + }, + } + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false), + }, +)] +/// Mount removable datastore. +pub fn mount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let (section_config, _digest) = pbs_config::datastore::config()?; + let datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + let upid = WorkerTask::new_thread( + "mount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |_worker| do_mount_device(datastore), + )?; + + Ok(json!(upid)) +} + +fn expect_maintanance_unmounting( + store: &str, +) -> Result<(pbs_config::BackupLockGuard, DataStoreConfig), Error> { + let lock = pbs_config::datastore::lock_config()?; + let (section_config, _digest) = pbs_config::datastore::config()?; + let store_config: DataStoreConfig = section_config.lookup("datastore", store)?; + + if store_config + .get_maintenance_mode() + .map_or(true, |m| m.ty != MaintenanceType::Unmount) + { + bail!("maintenance mode is not 'Unmount'"); + } + + Ok((lock, store_config)) +} + +fn unset_maintenance( + _lock: pbs_config::BackupLockGuard, + mut config: DataStoreConfig, +) -> Result<(), Error> { + let (mut section_config, _digest) = pbs_config::datastore::config()?; + config.maintenance_mode = None; + section_config.set_data(&config.name, "datastore", &config)?; + pbs_config::datastore::save_config(§ion_config)?; + Ok(()) +} + +fn do_unmount_device( + datastore: DataStoreConfig, + worker: Option<&dyn WorkerTaskContext>, +) -> Result<(), Error> { + if datastore.backing_device.is_none() { + bail!("can't unmount non-removable datastore"); + } + let mount_point = datastore.absolute_path(); + + let mut active_operations = task_tracking::get_active_operations(&datastore.name)?; + let mut old_status = String::new(); + let mut aborted = false; + while active_operations.read + active_operations.write > 0 { + if let Some(worker) = worker { + if worker.abort_requested() || expect_maintanance_unmounting(&datastore.name).is_err() { + aborted = true; + break; + } + let status = format!( + "cannot unmount yet, still {} read and {} write operations active", + active_operations.read, active_operations.write + ); + if status != old_status { + info!("{status}"); + old_status = status; + } + } + std::thread::sleep(std::time::Duration::from_secs(1)); + active_operations = task_tracking::get_active_operations(&datastore.name)?; + } + + if aborted || worker.map_or(false, |w| w.abort_requested()) { + let _ = expect_maintanance_unmounting(&datastore.name) + .inspect_err(|e| warn!("maintenance mode was not as expected: {e}")) + .and_then(|(lock, config)| { + unset_maintenance(lock, config) + .inspect_err(|e| warn!("could not reset maintenance mode: {e}")) + }); + bail!("aborted, due to user request"); + } else { + let (lock, config) = expect_maintanance_unmounting(&datastore.name)?; + crate::tools::disks::unmount_by_mountpoint(Path::new(&mount_point))?; + unset_maintenance(lock, config) + .map_err(|e| format_err!("could not reset maintenance mode: {e}"))?; + } + Ok(()) +} + +#[api( + protected: true, + input: { + properties: { + store: { schema: DATASTORE_SCHEMA }, + }, + }, + returns: { + schema: UPID_SCHEMA, + }, + access: { + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true), + } +)] +/// Unmount a removable device that is associated with the datastore +pub async fn unmount(store: String, rpcenv: &mut dyn RpcEnvironment) -> Result { + let _lock = pbs_config::datastore::lock_config()?; + let (mut section_config, _digest) = pbs_config::datastore::config()?; + let mut datastore: DataStoreConfig = section_config.lookup("datastore", &store)?; + + if datastore.backing_device.is_none() { + bail!("datastore '{store}' is not removable"); + } + + ensure_datastore_is_mounted(&datastore)?; + + datastore.set_maintenance_mode(Some(MaintenanceMode { + ty: MaintenanceType::Unmount, + message: None, + }))?; + section_config.set_data(&store, "datastore", &datastore)?; + pbs_config::datastore::save_config(§ion_config)?; + + drop(_lock); + + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; + let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) + { + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( + sock, + &format!( + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", + &store + ), + ) + .await; + } + + let upid = WorkerTask::new_thread( + "unmount-device", + Some(store), + auth_id.to_string(), + to_stdout, + move |worker| do_unmount_device(datastore, Some(&worker)), + )?; + + Ok(json!(upid)) +} + #[sortable] const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ ( @@ -2430,6 +2708,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .get(&API_METHOD_LIST_GROUPS) .delete(&API_METHOD_DELETE_GROUP), ), + ("mount", &Router::new().post(&API_METHOD_MOUNT)), ( "namespace", // FIXME: move into datastore:: sub-module?! @@ -2464,6 +2743,7 @@ const DATASTORE_INFO_SUBDIRS: SubdirMap = &[ .delete(&API_METHOD_DELETE_SNAPSHOT), ), ("status", &Router::new().get(&API_METHOD_STATUS)), + ("unmount", &Router::new().post(&API_METHOD_UNMOUNT)), ( "upload-backup-log", &Router::new().upload(&API_METHOD_UPLOAD_BACKUP_LOG), -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:50 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:50 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 03/26] maintenance: add 'Unmount' maintenance type In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-4-h.laimer@proxmox.com> From: Dietmar Maurer Signed-off-by: Dietmar Maurer Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 3 +++ pbs-api-types/src/maintenance.rs | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 8827604c3..9bcec7191 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -401,6 +401,9 @@ impl DataStoreConfig { match current_type { Some(MaintenanceType::ReadOnly) => { /* always OK */ } Some(MaintenanceType::Offline) => { /* always OK */ } + Some(MaintenanceType::Unmount) => { + bail!("datastore is being unmounted"); + } Some(MaintenanceType::Delete) => { match new_type { Some(MaintenanceType::Delete) => { /* allow to delete a deleted storage */ } diff --git a/pbs-api-types/src/maintenance.rs b/pbs-api-types/src/maintenance.rs index a7b8b078d..3c9aa8190 100644 --- a/pbs-api-types/src/maintenance.rs +++ b/pbs-api-types/src/maintenance.rs @@ -38,7 +38,6 @@ pub enum Operation { /// Maintenance type. pub enum MaintenanceType { // TODO: - // - Add "unmounting" once we got pluggable datastores // - Add "GarbageCollection" or "DeleteOnly" as type and track GC (or all deletes) as separate // operation, so that one can enable a mode where nothing new can be added but stuff can be // cleaned @@ -48,6 +47,8 @@ pub enum MaintenanceType { Offline, /// The datastore is being deleted. Delete, + /// The (removable) datastore is being unmounted. + Unmount, } serde_plain::derive_display_from_serialize!(MaintenanceType); serde_plain::derive_fromstr_from_deserialize!(MaintenanceType); @@ -79,7 +80,9 @@ pub struct MaintenanceMode { impl MaintenanceMode { /// Used for deciding whether the datastore is cleared from the internal cache pub fn clear_from_cache(&self) -> bool { - self.ty == MaintenanceType::Offline || self.ty == MaintenanceType::Delete + self.ty == MaintenanceType::Offline + || self.ty == MaintenanceType::Delete + || self.ty == MaintenanceType::Unmount } pub fn check(&self, operation: Option) -> Result<(), Error> { @@ -93,6 +96,8 @@ impl MaintenanceMode { if let Some(Operation::Lookup) = operation { return Ok(()); + } else if self.ty == MaintenanceType::Unmount { + bail!("datastore is being unmounted"); } else if self.ty == MaintenanceType::Offline { bail!("offline maintenance mode: {}", message); } else if self.ty == MaintenanceType::ReadOnly { -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:48 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:48 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 01/26] pbs-api-types: add backing-device to DataStoreConfig In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-2-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 711051d05..8827604c3 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -45,7 +45,7 @@ const_regex! { pub const CHUNK_DIGEST_FORMAT: ApiStringFormat = ApiStringFormat::Pattern(&SHA256_HEX_REGEX); -pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name") +pub const DATASTORE_DIR_NAME_SCHEMA: Schema = StringSchema::new("Either the absolute path to the datastore directory, or a relative on-device path for removable datastores.") .min_length(1) .max_length(4096) .schema(); @@ -163,6 +163,9 @@ pub const PRUNE_SCHEMA_KEEP_YEARLY: Schema = .minimum(1) .schema(); +/// Base directory where datastores are mounted +pub const DATASTORE_MOUNT_DIR: &str = "/mnt/datastore"; + #[api] #[derive(Debug, Default, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -237,7 +240,7 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore schema: DATASTORE_SCHEMA, }, path: { - schema: DIR_NAME_SCHEMA, + schema: DATASTORE_DIR_NAME_SCHEMA, }, "notify-user": { optional: true, @@ -276,6 +279,12 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA), type: String, }, + "backing-device": { + description: "The UUID of the filesystem partition for removable datastores.", + optional: true, + format: &proxmox_schema::api_types::UUID_FORMAT, + type: String, + } } )] #[derive(Serialize, Deserialize, Updater, Clone, PartialEq)] @@ -323,6 +332,11 @@ pub struct DataStoreConfig { /// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in " #[serde(skip_serializing_if = "Option::is_none")] pub maintenance_mode: Option, + + /// The UUID of the device(for removable datastores) + #[updater(skip)] + #[serde(skip_serializing_if = "Option::is_none")] + pub backing_device: Option, } #[api] @@ -357,12 +371,17 @@ impl DataStoreConfig { notification_mode: None, tuning: None, maintenance_mode: None, + backing_device: None, } } /// Returns the absolute path to the datastore content. pub fn absolute_path(&self) -> String { - self.path.clone() + if self.backing_device.is_some() { + format!("{DATASTORE_MOUNT_DIR}/{}", self.name) + } else { + self.path.clone() + } } pub fn get_maintenance_mode(&self) -> Option { -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:51 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:51 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 04/26] datastore: add helper for checking if a datastore is mounted In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-5-h.laimer@proxmox.com> ... at a specific location. Also adds two additional functions to get the mount status, and ensuring a removable datastore is mounted. Co-authored-by: Wolfgang Bumiller Signed-off-by: Hannes Laimer --- pbs-datastore/src/datastore.rs | 74 +++++++++++++++++++++++++++++ pbs-datastore/src/lib.rs | 4 +- src/server/metric_collection/mod.rs | 4 ++ 3 files changed, 81 insertions(+), 1 deletion(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 2bf2b8437..6a9fc2dc0 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1,5 +1,6 @@ use std::collections::{HashMap, HashSet}; use std::io::{self, Write}; +use std::os::unix::ffi::OsStrExt; use std::os::unix::io::AsRawFd; use std::path::{Path, PathBuf}; use std::sync::{Arc, LazyLock, Mutex}; @@ -14,6 +15,7 @@ use proxmox_schema::ApiType; use proxmox_sys::error::SysError; use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions}; use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard}; +use proxmox_sys::linux::procfs::MountInfo; use proxmox_sys::process_locker::ProcessLockSharedGuard; use proxmox_worker_task::WorkerTaskContext; @@ -46,6 +48,70 @@ pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> Ok(()) } +/// Check if a device with a given UUID is currently mounted at store_mount_point by +/// comparing the `st_rdev` values of `/dev/disk/by-uuid/` and the source device in +/// /proc/self/mountinfo. +/// +/// If we can't check if it is mounted, we treat that as not mounted, +/// returning false. +/// +/// Reasons it could fail other than not being mounted where expected: +/// - could not read /proc/self/mountinfo +/// - could not stat /dev/disk/by-uuid/ +/// - /dev/disk/by-uuid/ is not a block device +/// +/// Since these are very much out of our control, there is no real value in distinguishing +/// between them, so for this function they all are treated as 'device not mounted' +fn is_datastore_mounted_at(store_mount_point: String, device_uuid: &str) -> bool { + use nix::sys::stat::SFlag; + + let store_mount_point = Path::new(&store_mount_point); + + let dev_node = match nix::sys::stat::stat(format!("/dev/disk/by-uuid/{device_uuid}").as_str()) { + Ok(stat) if SFlag::from_bits_truncate(stat.st_mode) == SFlag::S_IFBLK => stat.st_rdev, + _ => return false, + }; + + let Ok(mount_info) = MountInfo::read() else { + return false; + }; + + for (_, entry) in mount_info { + let Some(source) = entry.mount_source else { + continue; + }; + + if entry.mount_point != store_mount_point || !source.as_bytes().starts_with(b"/") { + continue; + } + + if let Ok(stat) = nix::sys::stat::stat(source.as_os_str()) { + let sflag = SFlag::from_bits_truncate(stat.st_mode); + + if sflag == SFlag::S_IFBLK && stat.st_rdev == dev_node { + return true; + } + } + } + + false +} + +pub fn get_datastore_mount_status(config: &DataStoreConfig) -> Option { + let Some(ref device_uuid) = config.backing_device else { + return None; + }; + Some(is_datastore_mounted_at(config.absolute_path(), device_uuid)) +} + +pub fn ensure_datastore_is_mounted(config: &DataStoreConfig) -> Result<(), Error> { + match get_datastore_mount_status(config) { + Some(true) => Ok(()), + Some(false) => Err(format_err!("Datastore '{}' is not mounted", config.name)), + None => Ok(()), + } +} + /// Datastore Management /// /// A Datastore can store severals backups, and provides the @@ -156,6 +222,12 @@ impl DataStore { } } + if get_datastore_mount_status(&config) == Some(false) { + let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); + datastore_cache.remove(&config.name); + bail!("datastore '{}' is not mounted", config.name); + } + let mut datastore_cache = DATASTORE_MAP.lock().unwrap(); let entry = datastore_cache.get(name); @@ -259,6 +331,8 @@ impl DataStore { ) -> Result, Error> { let name = config.name.clone(); + ensure_datastore_is_mounted(&config)?; + let tuning: DatastoreTuning = serde_json::from_value( DatastoreTuning::API_SCHEMA .parse_property_string(config.tuning.as_deref().unwrap_or(""))?, diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs index 8050cf4d0..5014b6c09 100644 --- a/pbs-datastore/src/lib.rs +++ b/pbs-datastore/src/lib.rs @@ -201,7 +201,9 @@ pub use manifest::BackupManifest; pub use store_progress::StoreProgress; mod datastore; -pub use datastore::{check_backup_owner, DataStore}; +pub use datastore::{ + check_backup_owner, ensure_datastore_is_mounted, get_datastore_mount_status, DataStore, +}; mod hierarchy; pub use hierarchy::{ diff --git a/src/server/metric_collection/mod.rs b/src/server/metric_collection/mod.rs index b95dba203..2ede8408f 100644 --- a/src/server/metric_collection/mod.rs +++ b/src/server/metric_collection/mod.rs @@ -176,6 +176,10 @@ fn collect_disk_stats_sync() -> (DiskStat, Vec) { continue; } + if pbs_datastore::get_datastore_mount_status(&config) == Some(false) { + continue; + } + datastores.push(gather_disk_stats( disk_manager.clone(), Path::new(&config.absolute_path()), -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:47 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:47 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 00/26] add removable datastores Message-ID: <20241125162213.157011-1-h.laimer@proxmox.com> These patches add support for removable datastores. All removable datastores have a backing-device(a UUID) associated with them. Removable datastores work like normal ones, just that they can be unplugged. It is possible to create a removable datastore, sync backups onto it, unplug it and use it on a different PBS. The datastore path specified is relative to the root of the used device. Removable datastores are bind mounted to /mnt/datastore/. Multiple datastores can be created on a single device, but only device with a single datastore on them will be auto-mounted. When a removable datastore is deleted and 'destroy-data' is set, the device has to be mounted. If 'destroy-data' is not set the datastore can be deleted even if the device is not present. Removable datastores are automatically mounted when plugged in. v15: thanks @Fabian and @Dominik * ui: drop pmxDisplayEditField for device/partition selector * ui: use reference instead of id * ui: fix problem with overriding tbar * ui: some general cleanup mentioned by @Dominik * recheck if aborted before unmounting * fix nested check * removable datastore now have a relative path, not absolute and add check for it * restructure mounting, make sure it is unmounted whenever goes wrong during creation * use if let Err(..) instead of .inspect_err(..) v14: thanks @Fabian * add two functions to get mount status, or ensure mounted for rm.ds., avoiding repeating things * use enum for `mount_status` instead of Option * fix problme with unmounting, now check for unmounting maintenance mode before actually unmounting, manually chaning the maintenance mode during unmounting will not prevent unmounting * improve logging for mounting: add context and adjust logging levels * improve uuid_mount function: load config file directly and call do_mount function directly without going through the API * add logging for cleanup on ds deletion * move check for nesting into do_create_datastore, and check fir all datastore(not just removable) * remove redundant check when deleting through directory * use single worker when creating removable datastore through dir endpoint * drop get_mount_point function * ui: stop loading status after first failed attempt, prevents logs spamming v13: thanks @Fabian * allow multiple datastore on devices * replace `is_datastore_available` by a more specific function, it is now removable datastore specific and won't be called for normal ones * replace removable/is_available in status structs with mount_state, which is `None` for normal datastore as it makes it less ambiguous what is meant * remove notion of 'available' from normal datastores and replace it with mounted/mount_status for removable ones, as it never really made sense in the first place * abort of an unmount task will now reset the maintanance mode * add check for race when setting maintenance at end of unmounting task * improve documentation and commit messages * remove not needed tokio::spawn * only auto mount devices with single datastore on them * drop ptach that added flag for excluding used partitions * make auto mount service not dynamic * add debug command to scan devices for datastores they may contain * rebase onto master v12: thanks @Wolfgang * use bind mounts, so now /path/to/ds is mounted to /mnt/datastore/ this is a bit cleaner and allows for multiple datastores on a single device to be mounted individually, if we want to allow that in the future * small code improvements v11: * rebase onto master v10: thanks @Gabriel and @Wolfgang * make is_datastore_available more robust * fix a lot of wording * drop format on uuid_mount command for UUID * only gather_disk_stats if datastore is available * overall code improvements * ui: include model in partition selector * rebased onto master v9: * change mount point to `/mnt/datastore/` * update "Directory" list UI * add `absolute_path()` from Dietmar's RFC * update docs v8: * still depends on [1] * paths for removable datastores are now relative to `/mnt/removable_datastore/` * add support for creation of removable datastore through the "create directory" endpoint (last 3 patches) * update datastore creation UI * update docs v7: * depends on [1] * improve logging when waiting for tasks * drop `update-datatore-cache` refactoring * fix some commit messages [1] https://lists.proxmox.com/pipermail/pbs-devel/2024-April/008739.html v6: * remove 'drop' flag in datastore cache * use maintenance-mode 'unmount' for unmounting process, only for the unmounting not for being unmounted * rename/simplify update-datastore-cache command * ui: integrate new unmounting maintenance mode * basically a mix of v3 and v4 v5: thanks @Dietmar and @Christian * drop --force for unmount since it'll always fail if tasks are still running, and if there are not normal unount will work * improve several commit messages * improve error message wording * add removable datastore section to docs * add documentation for is_datastore_available v4: thanks a lot @Dietmar and @Christian * make check if mounted wayyy faster * don't keep track of mounting state * drop Unplugged maintenance mode * use UUID_FORMAT for uuid field * a lot of small things, like use of bail!, inline format!, ... * include improvement to cache handling v3: * remove lazy unmounting (since 9cba51ac782d04085c0af55128f32178e5132358 is applied) * fix CLI (un)mount command, thanks @Gabriel * add removable datastore CLI autocomplete helper * rebase onto master * move ui patches to the end thanks @Lukas and @Thomas for the feedback v2: * fix datastore 'add' button in the UI * some format!("{}", a) -> format!("{a}") * replace `const` with `let` in js code * change icon `fa-usb` -> `fa-plug` * add some docs * add JDoc for parseMaintenanceMode * proxmox-schema dep bump Dietmar Maurer (1): maintenance: add 'Unmount' maintenance type Hannes Laimer (25): pbs-api-types: add backing-device to DataStoreConfig maintenance: make is_offline more generic datastore: add helper for checking if a datastore is mounted api: admin: add (un)mount endpoint for removable datastores api: removable datastore creation api: add check for nested datastores on creation pbs-api-types: add mount_status field to DataStoreListItem bin: manager: add (un)mount command add auto-mounting for removable datastores datastore: handle deletion of removable datastore properly docs: add removable datastores section ui: add partition selector form ui: add removable datastore creation support ui: add (un)mount button to summary ui: tree: render unmounted datastores correctly ui: utils: make parseMaintenanceMode more robust ui: add datastore status mask for unmounted removable datastores ui: maintenance: fix disable msg field if no type is selected ui: render 'unmount' maintenance mode correctly api: node: allow creation of removable datastore through directory endpoint api: node: include removable datastores in directory list node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR ui: support create removable datastore through directory creation bin: debug: add inspect device command api: disks: only return UUID of partitions if it actually is one debian/proxmox-backup-server.install | 1 + debian/proxmox-backup-server.udev | 3 + docs/storage.rst | 38 +++ etc/Makefile | 1 + etc/removable-device-attach at .service | 8 + pbs-api-types/src/datastore.rs | 47 ++- pbs-api-types/src/maintenance.rs | 12 +- pbs-config/src/datastore.rs | 14 + pbs-datastore/src/datastore.rs | 83 ++++- pbs-datastore/src/lib.rs | 4 +- src/api2/admin/datastore.rs | 316 +++++++++++++++++++- src/api2/config/datastore.rs | 122 +++++++- src/api2/node/disks/directory.rs | 74 +++-- src/api2/status/mod.rs | 30 +- src/bin/proxmox_backup_debug/inspect.rs | 149 +++++++++ src/bin/proxmox_backup_manager/datastore.rs | 126 +++++++- src/server/metric_collection/mod.rs | 4 + src/tools/disks/mod.rs | 5 +- www/DirectoryList.js | 13 + www/Makefile | 1 + www/NavigationTree.js | 18 +- www/Utils.js | 33 +- www/css/ext6-pbs.css | 20 ++ www/datastore/DataStoreListSummary.js | 1 + www/datastore/Summary.js | 122 +++++++- www/form/PartitionSelector.js | 81 +++++ www/window/CreateDirectory.js | 14 + www/window/DataStoreEdit.js | 32 ++ www/window/MaintenanceOptions.js | 17 +- 29 files changed, 1298 insertions(+), 91 deletions(-) create mode 100644 etc/removable-device-attach at .service create mode 100644 www/form/PartitionSelector.js -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:01 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:01 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 14/26] ui: add removable datastore creation support In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-15-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/window/DataStoreEdit.js | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js index b8e866df2..239700835 100644 --- a/www/window/DataStoreEdit.js +++ b/www/window/DataStoreEdit.js @@ -63,6 +63,17 @@ Ext.define('PBS.DataStoreEdit', { emptyText: gettext('An absolute path'), validator: val => val?.trim() !== '/', }, + { + xtype: 'pbsPartitionSelector', + fieldLabel: gettext('Device'), + name: 'backing-device', + disabled: true, + allowBlank: true, + cbind: { + editable: '{isCreate}', + }, + emptyText: gettext('Device path'), + }, ], column2: [ { @@ -88,6 +99,27 @@ Ext.define('PBS.DataStoreEdit', { }, ], columnB: [ + { + xtype: 'checkbox', + boxLabel: gettext('Removable datastore'), + submitValue: false, + listeners: { + change: function(checkbox, isRemovable) { + let inputPanel = checkbox.up('inputpanel'); + let pathField = inputPanel.down('[name=path]'); + let uuidEditField = inputPanel.down('[name=backing-device]'); + + uuidEditField.setDisabled(!isRemovable); + uuidEditField.allowBlank = !isRemovable; + uuidEditField.setValue(''); + if (isRemovable) { + pathField.setFieldLabel(gettext('On device path')); + } else { + pathField.setFieldLabel(gettext('Backing Path')); + } + }, + }, + }, { xtype: 'textfield', name: 'comment', -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:00 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:00 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 13/26] ui: add partition selector form In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-14-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Makefile | 1 + www/form/PartitionSelector.js | 81 +++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 www/form/PartitionSelector.js diff --git a/www/Makefile b/www/Makefile index ff19ae7a5..f62b44288 100644 --- a/www/Makefile +++ b/www/Makefile @@ -49,6 +49,7 @@ JSSRC= \ form/NamespaceMaxDepth.js \ form/CalendarEvent.js \ form/PermissionPathSelector.js \ + form/PartitionSelector.js \ form/GroupSelector.js \ form/GroupFilter.js \ form/VerifyOutdatedAfter.js \ diff --git a/www/form/PartitionSelector.js b/www/form/PartitionSelector.js new file mode 100644 index 000000000..162dbe418 --- /dev/null +++ b/www/form/PartitionSelector.js @@ -0,0 +1,81 @@ +Ext.define('pbs-partition-list', { + extend: 'Ext.data.Model', + fields: ['name', 'uuid', 'filesystem', 'devpath', 'size', 'model'], + proxy: { + type: 'proxmox', + url: "/api2/json/nodes/localhost/disks/list?skipsmart=1&include-partitions=1", + reader: { + transform: (rawData) => rawData.data + .flatMap(disk => (disk.partitions + .map(part => ({ ...part, model: disk.model })) ?? []) + .filter(partition => partition.used === 'filesystem')), + }, + }, + idProperty: 'devpath', + +}); + +Ext.define('PBS.form.PartitionSelector', { + extend: 'Proxmox.form.ComboGrid', + alias: 'widget.pbsPartitionSelector', + + allowBlank: false, + autoSelect: false, + submitEmpty: false, + valueField: 'uuid', + displayField: 'devpath', + + store: { + model: 'pbs-partition-list', + autoLoad: true, + sorters: 'devpath', + }, + getSubmitData: function() { + let me = this; + let data = null; + if (!me.disabled && me.submitValue && !me.isFileUpload()) { + let val = me.getSubmitValue(); + if (val !== undefined && val !== null && val !== '') { + data = {}; + data[me.getName()] = val; + } else if (me.getDeleteEmpty()) { + data = {}; + data.delete = me.getName(); + } + } + return data; + }, + listConfig: { + columns: [ + { + header: gettext('Path'), + sortable: true, + dataIndex: 'devpath', + renderer: (v, metaData, rec) => Ext.String.htmlEncode(v), + flex: 1, + }, + { + header: gettext('Filesystem'), + sortable: true, + dataIndex: 'filesystem', + flex: 1, + }, + { + header: gettext('Size'), + sortable: true, + dataIndex: 'size', + renderer: Proxmox.Utils.format_size, + flex: 1, + }, + { + header: gettext('Model'), + sortable: true, + dataIndex: 'model', + flex: 1, + }, + ], + viewConfig: { + emptyText: 'No usable partitions present', + }, + }, +}); -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:04 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:04 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 17/26] ui: utils: make parseMaintenanceMode more robust In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-18-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/Utils.js | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/www/Utils.js b/www/Utils.js index 4853be36c..7756e9b5d 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -740,14 +740,29 @@ Ext.define('PBS.Utils', { return `${icon} ${value}`; }, - // FIXME: this "parser" is brittle and relies on the order the arguments will appear in + /** + * Parses maintenance mode property string. + * Examples: + * "offline,message=foo" -> ["offline", "foo"] + * "offline" -> ["offline", null] + * "message=foo,offline" -> ["offline", "foo"] + * null/undefined -> [null, null] + * + * @param {string|null} mode - Maintenance mode string to parse. + * @return {Array} - Parsed maintenance mode values. + */ parseMaintenanceMode: function(mode) { - let [type, message] = mode.split(/,(.+)/); - type = type.split("=").pop(); - message = message ? message.split("=")[1] - .replace(/^"(.*)"$/, '$1') - .replaceAll('\\"', '"') : null; - return [type, message]; + if (!mode) { + return [null, null]; + } + return mode.split(',').reduce(([m, msg], pair) => { + const [key, value] = pair.split('='); + if (key === 'message') { + return [m, value.replace(/^"(.*)"$/, '$1').replace(/\\"/g, '"')]; + } else { + return [value ?? key, msg]; + } + }, [null, null]); }, renderMaintenance: function(mode, activeTasks) { -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:06 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:06 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 19/26] ui: maintenance: fix disable msg field if no type is selected In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-20-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- www/window/MaintenanceOptions.js | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js index 1ee92542e..7e3b42516 100644 --- a/www/window/MaintenanceOptions.js +++ b/www/window/MaintenanceOptions.js @@ -52,16 +52,22 @@ Ext.define('PBS.window.MaintenanceOptions', { items: [ { xtype: 'pbsMaintenanceType', + reference: 'type-field', name: 'maintenance-type', fieldLabel: gettext('Maintenance Type'), value: '__default__', deleteEmpty: true, + listeners: { + change: (field, newValue) => { + field.up('form').down('[name=maintenance-msg]').setDisabled(newValue === '__default__'); + }, + }, }, { xtype: 'proxmoxtextfield', + reference: 'message-field', name: 'maintenance-msg', fieldLabel: gettext('Description'), - // FIXME: disable if maintenance type is none }, ], }, -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:58 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:58 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 11/26] datastore: handle deletion of removable datastore properly In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-12-h.laimer@proxmox.com> Data deletion is only possible if the datastore is mounted, won't attempt mounting it for the purpose of deleting data. Signed-off-by: Hannes Laimer --- pbs-datastore/src/datastore.rs | 4 +++- src/api2/config/datastore.rs | 39 ++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs index 6a9fc2dc0..adf29f183 100644 --- a/pbs-datastore/src/datastore.rs +++ b/pbs-datastore/src/datastore.rs @@ -1535,7 +1535,9 @@ impl DataStore { // weird, but ok } Err(err) if err.is_errno(nix::errno::Errno::EBUSY) => { - warn!("Cannot delete datastore directory (is it a mount point?).") + if datastore_config.backing_device.is_none() { + warn!("Cannot delete datastore directory (is it a mount point?).") + } } Err(err) if err.is_errno(nix::errno::Errno::ENOTEMPTY) => { warn!("Datastore directory not empty, not deleting.") diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index d6cfdbb0c..9f2dac4b2 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -30,6 +30,7 @@ use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_bac use crate::api2::config::verify::delete_verification_job; use pbs_config::CachedUserInfo; +use pbs_datastore::get_datastore_mount_status; use proxmox_rest_server::WorkerTask; use crate::server::jobstate; @@ -574,6 +575,15 @@ pub async fn delete_datastore( http_bail!(NOT_FOUND, "datastore '{}' does not exist.", name); } + let store_config: DataStoreConfig = config.lookup("datastore", &name)?; + + if destroy_data && get_datastore_mount_status(&store_config) == Some(false) { + http_bail!( + BAD_REQUEST, + "cannot destroy data on '{name}' unless the datastore is mounted" + ); + } + if !keep_job_configs { for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { delete_verification_job(job.config.id, None, rpcenv)? @@ -604,6 +614,18 @@ pub async fn delete_datastore( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; + if let Ok(proxy_pid) = proxmox_rest_server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN) + { + let sock = proxmox_daemon::command_socket::path_from_pid(proxy_pid); + let _ = proxmox_daemon::command_socket::send_raw( + sock, + &format!( + "{{\"command\":\"update-datastore-cache\",\"args\":\"{}\"}}\n", + name.clone() + ), + ) + .await; + }; let upid = WorkerTask::new_thread( "delete-datastore", @@ -623,6 +645,23 @@ pub async fn delete_datastore( warn!("failed to notify after datastore removal: {err}"); } + // cleanup for removable datastores + // - unmount + // - remove mount dir, if destroy_data + if store_config.backing_device.is_some() { + let mount_point = store_config.absolute_path(); + if get_datastore_mount_status(&store_config) == Some(true) { + if let Err(e) = unmount_by_mountpoint(Path::new(&mount_point)) { + warn!("could not unmount device after deletion: {e}"); + } + } + if destroy_data { + if let Err(e) = std::fs::remove_dir(&mount_point) { + warn!("could not remove directory after deletion: {e}"); + } + } + } + Ok(()) }, )?; -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:02 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:02 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 15/26] ui: add (un)mount button to summary In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-16-h.laimer@proxmox.com> And only try to load datastore information if the datastore is available. Signed-off-by: Hannes Laimer --- www/datastore/Summary.js | 101 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 3 deletions(-) diff --git a/www/datastore/Summary.js b/www/datastore/Summary.js index a932b4e01..1be26ff3d 100644 --- a/www/datastore/Summary.js +++ b/www/datastore/Summary.js @@ -219,6 +219,53 @@ Ext.define('PBS.DataStoreSummary', { }, tbar: [ + { + xtype: 'button', + text: gettext('Unmount'), + hidden: true, + itemId: 'unmountButton', + reference: 'unmountButton', + handler: function() { + let me = this; + let datastore = me.up('panel').datastore; + Proxmox.Utils.API2Request({ + url: `/admin/datastore/${datastore}/unmount`, + method: 'POST', + failure: function(response) { + Ext.Msg.alert(gettext('Error'), response.htmlStatus); + }, + success: function(response, options) { + Ext.create('Proxmox.window.TaskViewer', { + upid: response.result.data, + }).show(); + }, + }); + }, + }, + { + xtype: 'button', + text: gettext('Mount'), + hidden: true, + itemId: 'mountButton', + reference: 'mountButton', + handler: function() { + let me = this; + let datastore = me.up('panel').datastore; + Proxmox.Utils.API2Request({ + url: `/admin/datastore/${datastore}/mount`, + method: 'POST', + failure: function(response) { + Ext.Msg.alert(gettext('Error'), response.htmlStatus); + }, + success: function(response, options) { + me.up('panel').statusStore.startUpdate(); + Ext.create('Proxmox.window.TaskViewer', { + upid: response.result.data, + }).show(); + }, + }); + }, + }, { xtype: 'button', text: gettext('Show Connection Information'), @@ -294,8 +341,12 @@ Ext.define('PBS.DataStoreSummary', { listeners: { activate: function() { this.rrdstore.startUpdate(); }, + afterrender: function() { this.statusStore.startUpdate(); }, deactivate: function() { this.rrdstore.stopUpdate(); }, - destroy: function() { this.rrdstore.stopUpdate(); }, + destroy: function() { + this.rrdstore.stopUpdate(); + this.statusStore.stopUpdate(); + }, resize: function(panel) { Proxmox.Utils.updateColumns(panel); }, @@ -309,7 +360,43 @@ Ext.define('PBS.DataStoreSummary', { model: 'pve-rrd-datastore', }); - me.callParent(); + me.statusStore = Ext.create('Proxmox.data.ObjectStore', { + url: `/api2/json/admin/datastore/${me.datastore}/status`, + interval: 1000, + }); + + me.mon(me.statusStore, 'load', (s, records, success) => { + let mountBtn = me.lookupReferenceHolder().lookupReference('mountButton'); + let unmountBtn = me.lookupReferenceHolder().lookupReference('unmountButton'); + if (!success) { + me.statusStore.stopUpdate(); + me.down('pbsDataStoreInfo').fireEvent('deactivate'); + Proxmox.Utils.API2Request({ + url: `/config/datastore/${me.datastore}`, + success: response => { + let mode = response.result.data['maintenance-mode']; + let [type, _message] = PBS.Utils.parseMaintenanceMode(mode); + if (!response.result.data['backing-device']) { + return; + } + if (!type || type === 'read-only') { + unmountBtn.setDisabled(true); + mountBtn.setDisabled(false); + } else if (type === 'unmount') { + unmountBtn.setDisabled(true); + mountBtn.setDisabled(true); + } else { + unmountBtn.setDisabled(false); + mountBtn.setDisabled(false); + } + }, + }); + } else { + me.down('pbsDataStoreInfo').fireEvent('activate'); + unmountBtn.setDisabled(false); + mountBtn.setDisabled(true); + } + }); let sp = Ext.state.Manager.getProvider(); me.mon(sp, 'statechange', function(provider, key, value) { @@ -322,11 +409,19 @@ Ext.define('PBS.DataStoreSummary', { Proxmox.Utils.updateColumns(me); }); + me.callParent(); + Proxmox.Utils.API2Request({ url: `/config/datastore/${me.datastore}`, waitMsgTarget: me.down('pbsDataStoreInfo'), success: function(response) { - let path = Ext.htmlEncode(response.result.data.path); + let mountBtn = me.lookupReferenceHolder().lookupReference('mountButton'); + let unmountBtn = me.lookupReferenceHolder().lookupReference('unmountButton'); + let data = response.result.data; + let path = Ext.htmlEncode(data.path); + const removable = !!data['backing-device']; + unmountBtn.setHidden(!removable); + mountBtn.setHidden(!removable); me.down('pbsDataStoreInfo').setTitle(`${me.datastore} (${path})`); me.down('pbsDataStoreNotes').setNotes(response.result.data.comment); }, -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:13 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:13 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 26/26] api: disks: only return UUID of partitions if it actually is one In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-27-h.laimer@proxmox.com> Some filesystems like FAT don't include a concept of UUIDs. Instead, tools like blkid tools like blkid derive these identifiers based on certain filesystem metadata, such as volume serial numbers or other unique information. This does however not follow the format specified in RFC 9562[1]. [1] https://datatracker.ietf.org/doc/html/rfc9562 Signed-off-by: Hannes Laimer --- src/tools/disks/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/tools/disks/mod.rs b/src/tools/disks/mod.rs index 6345fde7c..61aceccd6 100644 --- a/src/tools/disks/mod.rs +++ b/src/tools/disks/mod.rs @@ -898,7 +898,10 @@ fn get_partitions_info( let mut uuid = None; if let Some(devpath) = devpath.as_ref() { for info in lsblk_infos.iter().filter(|i| i.path.eq(devpath)) { - uuid = info.uuid.clone(); + uuid = info + .uuid + .clone() + .filter(|uuid| pbs_api_types::UUID_REGEX.is_match(uuid)); used = match info.partition_type.as_deref() { Some("21686148-6449-6e6f-744e-656564454649") => PartitionUsageType::BIOS, Some("c12a7328-f81f-11d2-ba4b-00a0c93ec93b") => PartitionUsageType::EFI, -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:10 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:10 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 23/26] node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-24-h.laimer@proxmox.com> ... since they do have the same value. Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 11d07af42..ff817b253 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -11,8 +11,8 @@ use proxmox_schema::api; use proxmox_section_config::SectionConfigData; use pbs_api_types::{ - DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_SCHEMA, NODE_SCHEMA, PRIV_SYS_AUDIT, - PRIV_SYS_MODIFY, UPID_SCHEMA, + DataStoreConfig, BLOCKDEVICE_NAME_SCHEMA, DATASTORE_MOUNT_DIR, DATASTORE_SCHEMA, NODE_SCHEMA, + PRIV_SYS_AUDIT, PRIV_SYS_MODIFY, UPID_SCHEMA, }; use crate::tools::disks::{ @@ -23,8 +23,6 @@ use crate::tools::systemd::{self, types::*}; use proxmox_rest_server::WorkerTask; -const BASE_MOUNT_DIR: &str = "/mnt/datastore/"; - #[api( properties: { "filesystem": { @@ -91,7 +89,7 @@ pub fn list_datastore_mounts() -> Result, Error> { let name = data .Where - .strip_prefix(BASE_MOUNT_DIR) + .strip_prefix(DATASTORE_MOUNT_DIR) .unwrap_or(&data.Where) .to_string(); @@ -185,7 +183,7 @@ pub fn create_datastore_disk( bail!("disk '{}' is already in use.", disk); } - let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); + let mount_point = format!("{}/{}", DATASTORE_MOUNT_DIR, &name); // check if the default path exists already. // bail if it is not empty or another filesystem mounted on top let default_path = std::path::PathBuf::from(&mount_point); @@ -193,7 +191,7 @@ pub fn create_datastore_disk( match std::fs::metadata(&default_path) { Err(_) => {} // path does not exist Ok(stat) => { - let basedir_dev = std::fs::metadata(BASE_MOUNT_DIR)?.st_dev(); + let basedir_dev = std::fs::metadata(DATASTORE_MOUNT_DIR)?.st_dev(); if stat.st_dev() != basedir_dev { bail!("path {default_path:?} already exists and is mountpoint"); } @@ -278,7 +276,7 @@ pub fn create_datastore_disk( )] /// Remove a Filesystem mounted under `/mnt/datastore/`. pub fn delete_datastore_disk(name: String) -> Result<(), Error> { - let path = format!("{}{}", BASE_MOUNT_DIR, name); + let path = format!("{}/{}", DATASTORE_MOUNT_DIR, name); // path of datastore cannot be changed let (config, _) = pbs_config::datastore::config()?; let datastores: Vec = config.convert_to_typed_array("datastore")?; -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:21:55 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:21:55 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 08/26] pbs-api-types: add mount_status field to DataStoreListItem In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-9-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- pbs-api-types/src/datastore.rs | 19 ++++++++++++++++- src/api2/admin/datastore.rs | 38 ++++++++++++++++++++-------------- src/api2/status/mod.rs | 30 +++++++++++++++++++++++---- 3 files changed, 66 insertions(+), 21 deletions(-) diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs index 9bcec7191..4927f3724 100644 --- a/pbs-api-types/src/datastore.rs +++ b/pbs-api-types/src/datastore.rs @@ -178,6 +178,20 @@ pub enum ChunkOrder { Inode, } +#[api] +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] +/// Current mounting status of a datastore, useful for removable datastores. +pub enum DataStoreMountStatus { + /// Removable datastore is currently mounted correctly. + Mounted, + /// Removable datastore is currebtly not mounted. + NotMounted, + /// Datastore is not removable, so there is no mount status. + #[default] + NonRemovable, +} + #[api] #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] @@ -451,6 +465,7 @@ impl DataStoreConfig { pub struct DataStoreListItem { pub store: String, pub comment: Option, + pub mount_status: DataStoreMountStatus, /// If the datastore is in maintenance mode, information about it #[serde(skip_serializing_if = "Option::is_none")] pub maintenance: Option, @@ -1456,6 +1471,7 @@ pub struct DataStoreStatusListItem { /// The available bytes of the underlying storage. (-1 on error) #[serde(skip_serializing_if = "Option::is_none")] pub avail: Option, + pub mount_status: DataStoreMountStatus, /// A list of usages of the past (last Month). #[serde(skip_serializing_if = "Option::is_none")] pub history: Option>>, @@ -1480,12 +1496,13 @@ pub struct DataStoreStatusListItem { } impl DataStoreStatusListItem { - pub fn empty(store: &str, err: Option) -> Self { + pub fn empty(store: &str, err: Option, mount_status: DataStoreMountStatus) -> Self { DataStoreStatusListItem { store: store.to_owned(), total: None, used: None, avail: None, + mount_status, history: None, history_start: None, history_delta: None, diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index 2f441b550..1c939bc20 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -38,14 +38,15 @@ use pxar::EntryKind; use pbs_api_types::{ print_ns_and_snapshot, print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupContent, BackupGroupDeleteStats, BackupNamespace, BackupType, Counts, CryptMode, - DataStoreConfig, DataStoreListItem, DataStoreStatus, GarbageCollectionJobStatus, GroupListItem, - JobScheduleStatus, KeepOptions, MaintenanceMode, MaintenanceType, Operation, PruneJobOptions, - SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, - BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CATALOG_NAME, - CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, - MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, - PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, - UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, + DataStoreConfig, DataStoreListItem, DataStoreMountStatus, DataStoreStatus, + GarbageCollectionJobStatus, GroupListItem, JobScheduleStatus, KeepOptions, MaintenanceMode, + MaintenanceType, Operation, PruneJobOptions, SnapshotListItem, SnapshotVerifyState, + BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, + BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA, + IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, + PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA, + VERIFICATION_OUTDATED_AFTER_SCHEMA, }; use pbs_client::pxar::{create_tar, create_zip}; use pbs_config::CachedUserInfo; @@ -1323,8 +1324,8 @@ pub fn get_datastore_list( let mut list = Vec::new(); - for (store, (_, data)) in &config.sections { - let acl_path = &["datastore", store]; + for (store, (_, data)) in config.sections { + let acl_path = &["datastore", &store]; let user_privs = user_info.lookup_privs(&auth_id, acl_path); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; @@ -1335,15 +1336,20 @@ pub fn get_datastore_list( } } + let store_config: DataStoreConfig = serde_json::from_value(data)?; + + let mount_status = match pbs_datastore::get_datastore_mount_status(&store_config) { + Some(true) => DataStoreMountStatus::Mounted, + Some(false) => DataStoreMountStatus::NotMounted, + None => DataStoreMountStatus::NonRemovable, + }; + if allowed || allow_id { list.push(DataStoreListItem { store: store.clone(), - comment: if !allowed { - None - } else { - data["comment"].as_str().map(String::from) - }, - maintenance: data["maintenance-mode"].as_str().map(String::from), + comment: store_config.comment.filter(|_| allowed), + mount_status, + maintenance: store_config.maintenance_mode, }); } } diff --git a/src/api2/status/mod.rs b/src/api2/status/mod.rs index 113aa9852..5efde9c3d 100644 --- a/src/api2/status/mod.rs +++ b/src/api2/status/mod.rs @@ -10,11 +10,12 @@ use proxmox_schema::api; use proxmox_sortable_macro::sortable; use pbs_api_types::{ - Authid, DataStoreStatusListItem, Operation, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, + Authid, DataStoreConfig, DataStoreMountStatus, DataStoreStatusListItem, Operation, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, }; use pbs_config::CachedUserInfo; -use pbs_datastore::DataStore; +use pbs_datastore::{get_datastore_mount_status, DataStore}; use crate::server::metric_collection::rrd::extract_rrd_data; use crate::tools::statistics::linear_regression; @@ -51,10 +52,26 @@ pub async fn datastore_status( for (store, (_, _)) in &config.sections { let user_privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP)) != 0; + + let store_config = config.lookup::("datastore", store)?; + + let mount_status = match get_datastore_mount_status(&store_config) { + Some(true) => DataStoreMountStatus::Mounted, + Some(false) => { + list.push(DataStoreStatusListItem::empty( + store, + None, + DataStoreMountStatus::NotMounted, + )); + continue; + } + None => DataStoreMountStatus::NonRemovable, + }; + if !allowed { if let Ok(datastore) = DataStore::lookup_datastore(store, Some(Operation::Lookup)) { if can_access_any_namespace(datastore, &auth_id, &user_info) { - list.push(DataStoreStatusListItem::empty(store, None)); + list.push(DataStoreStatusListItem::empty(store, None, mount_status)); } } continue; @@ -63,7 +80,11 @@ pub async fn datastore_status( let datastore = match DataStore::lookup_datastore(store, Some(Operation::Read)) { Ok(datastore) => datastore, Err(err) => { - list.push(DataStoreStatusListItem::empty(store, Some(err.to_string()))); + list.push(DataStoreStatusListItem::empty( + store, + Some(err.to_string()), + mount_status, + )); continue; } }; @@ -74,6 +95,7 @@ pub async fn datastore_status( total: Some(status.total), used: Some(status.used), avail: Some(status.available), + mount_status, history: None, history_start: None, history_delta: None, -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:12 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:12 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 25/26] bin: debug: add inspect device command In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-26-h.laimer@proxmox.com> ... to get information about (removable) datastores a device contains Signed-off-by: Hannes Laimer --- src/bin/proxmox_backup_debug/inspect.rs | 149 ++++++++++++++++++++++++ 1 file changed, 149 insertions(+) diff --git a/src/bin/proxmox_backup_debug/inspect.rs b/src/bin/proxmox_backup_debug/inspect.rs index 28a472b0f..17df09be2 100644 --- a/src/bin/proxmox_backup_debug/inspect.rs +++ b/src/bin/proxmox_backup_debug/inspect.rs @@ -331,6 +331,151 @@ fn inspect_file( Ok(()) } +/// Return the count of VM, CT and host backup groups and the count of namespaces +/// as this tuple (vm, ct, host, ns) +fn get_basic_ds_info(path: String) -> Result<(i64, i64, i64, i64), Error> { + let mut vms = 0; + let mut cts = 0; + let mut hosts = 0; + let mut ns = 0; + let mut walker = WalkDir::new(path).into_iter(); + + while let Some(entry_result) = walker.next() { + let entry = entry_result?; + if !entry.file_type().is_dir() { + continue; + } + + let Some(name) = entry.path().file_name().and_then(|a| a.to_str()) else { + continue; + }; + + if name == ".chunks" { + walker.skip_current_dir(); + continue; + } + + let dir_count = std::fs::read_dir(entry.path())? + .filter_map(Result::ok) + .filter(|entry| entry.path().is_dir()) + .count() as i64; + + match name { + "ns" => ns += dir_count, + "vm" => { + vms += dir_count; + walker.skip_current_dir(); + } + "ct" => { + cts += dir_count; + walker.skip_current_dir(); + } + "host" => { + hosts += dir_count; + walker.skip_current_dir(); + } + _ => { + // root or ns dir + } + } + } + + Ok((vms, cts, hosts, ns)) +} + +#[api( + input: { + properties: { + device: { + description: "Device path, usually /dev/...", + type: String, + }, + "output-format": { + schema: OUTPUT_FORMAT, + optional: true, + }, + } + } +)] +/// Inspect a device for possible datastores on it +fn inspect_device(device: String, param: Value) -> Result<(), Error> { + let output_format = get_output_format(¶m); + let tmp_mount_path = format!( + "{}/{:x}", + pbs_buildcfg::rundir!("/mount"), + proxmox_uuid::Uuid::generate() + ); + + let default_options = proxmox_sys::fs::CreateOptions::new(); + proxmox_sys::fs::create_path( + &tmp_mount_path, + Some(default_options.clone()), + Some(default_options.clone()), + )?; + let mut mount_cmd = std::process::Command::new("mount"); + mount_cmd.arg(device.clone()); + mount_cmd.arg(tmp_mount_path.clone()); + proxmox_sys::command::run_command(mount_cmd, None)?; + + let mut walker = WalkDir::new(tmp_mount_path.clone()).into_iter(); + + let mut stores = Vec::new(); + + let mut ds_count = 0; + while let Some(entry_result) = walker.next() { + let entry = entry_result?; + + if entry.file_type().is_dir() + && entry + .file_name() + .to_str() + .map_or(false, |name| name == ".chunks") + { + let store_path = entry + .path() + .to_str() + .and_then(|n| n.strip_suffix("/.chunks")); + + if let Some(store_path) = store_path { + ds_count += 1; + let (vm, ct, host, ns) = get_basic_ds_info(store_path.to_string())?; + stores.push(json!({ + "path": store_path.strip_prefix(&tmp_mount_path).unwrap_or("???"), + "vm-count": vm, + "ct-count": ct, + "host-count": host, + "ns-count": ns, + })); + }; + + walker.skip_current_dir(); + } + } + + let mut umount_cmd = std::process::Command::new("umount"); + umount_cmd.arg(tmp_mount_path.clone()); + proxmox_sys::command::run_command(umount_cmd, None)?; + std::fs::remove_dir(std::path::Path::new(&tmp_mount_path))?; + + if output_format == "text" { + println!("Device containes {} stores", ds_count); + println!("---------------"); + for s in stores { + println!( + "Datastore at {} | VM: {}, CT: {}, HOST: {}, NS: {}", + s["path"], s["vm-count"], s["ct-count"], s["host-count"], s["ns-count"] + ); + } + } else { + format_and_print_result( + &json!({"store_count": stores.len(), "stores": stores}), + &output_format, + ); + } + + Ok(()) +} + pub fn inspect_commands() -> CommandLineInterface { let cmd_def = CliCommandMap::new() .insert( @@ -340,6 +485,10 @@ pub fn inspect_commands() -> CommandLineInterface { .insert( "file", CliCommand::new(&API_METHOD_INSPECT_FILE).arg_param(&["file"]), + ) + .insert( + "device", + CliCommand::new(&API_METHOD_INSPECT_DEVICE).arg_param(&["device"]), ); cmd_def.into() -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:11 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:11 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 24/26] ui: support create removable datastore through directory creation In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-25-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 2 ++ www/DirectoryList.js | 13 +++++++++++++ www/window/CreateDirectory.js | 14 ++++++++++++++ 3 files changed, 29 insertions(+) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index ff817b253..2f7cc7a27 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -150,6 +150,8 @@ pub fn list_datastore_mounts() -> Result, Error> { "removable-datastore": { description: "The added datastore is removable.", type: bool, + optional: true, + default: false, }, filesystem: { type: FileSystemType, diff --git a/www/DirectoryList.js b/www/DirectoryList.js index adefa9abf..25921a623 100644 --- a/www/DirectoryList.js +++ b/www/DirectoryList.js @@ -121,6 +121,19 @@ Ext.define('PBS.admin.Directorylist', { ], columns: [ + { + text: '', + flex: 0, + width: 35, + dataIndex: 'removable', + renderer: function(_text, _, row) { + if (row.data.removable) { + return ``; + } else { + return ''; + } + }, + }, { text: gettext('Path'), dataIndex: 'path', diff --git a/www/window/CreateDirectory.js b/www/window/CreateDirectory.js index 6aabe21ab..38d6979d9 100644 --- a/www/window/CreateDirectory.js +++ b/www/window/CreateDirectory.js @@ -43,6 +43,20 @@ Ext.define('PBS.window.CreateDirectory', { name: 'add-datastore', fieldLabel: gettext('Add as Datastore'), value: '1', + listeners: { + change(field, newValue, _oldValue) { + let form = field.up('form'); + let rmBox = form.down('[name=removable-datastore]'); + + rmBox.setDisabled(!newValue); + rmBox.setValue(false); + }, + }, + }, + { + xtype: 'proxmoxcheckbox', + name: 'removable-datastore', + fieldLabel: gettext('is removable'), }, ], }); -- 2.39.5 From h.laimer at proxmox.com Mon Nov 25 17:22:08 2024 From: h.laimer at proxmox.com (Hannes Laimer) Date: Mon, 25 Nov 2024 17:22:08 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v15 21/26] api: node: allow creation of removable datastore through directory endpoint In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: <20241125162213.157011-22-h.laimer@proxmox.com> Signed-off-by: Hannes Laimer --- src/api2/node/disks/directory.rs | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs index 7f5402207..b6006b47c 100644 --- a/src/api2/node/disks/directory.rs +++ b/src/api2/node/disks/directory.rs @@ -123,6 +123,11 @@ pub fn list_datastore_mounts() -> Result, Error> { description: "Configure a datastore using the directory.", type: bool, optional: true, + default: false, + }, + "removable-datastore": { + description: "The added datastore is removable.", + type: bool, }, filesystem: { type: FileSystemType, @@ -141,7 +146,8 @@ pub fn list_datastore_mounts() -> Result, Error> { pub fn create_datastore_disk( name: String, disk: String, - add_datastore: Option, + add_datastore: bool, + removable_datastore: bool, filesystem: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result { @@ -156,7 +162,6 @@ pub fn create_datastore_disk( } let mount_point = format!("{}{}", BASE_MOUNT_DIR, &name); - // check if the default path exists already. // bail if it is not empty or another filesystem mounted on top let default_path = std::path::PathBuf::from(&mount_point); @@ -183,7 +188,6 @@ pub fn create_datastore_disk( move |_worker| { info!("create datastore '{name}' on disk {disk}"); - let add_datastore = add_datastore.unwrap_or(false); let filesystem = filesystem.unwrap_or(FileSystemType::Ext4); let manager = DiskManage::new(); @@ -196,18 +200,24 @@ pub fn create_datastore_disk( let uuid = get_fs_uuid(&partition)?; let uuid_path = format!("/dev/disk/by-uuid/{}", uuid); - let mount_unit_name = - create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?; + if !removable_datastore { + let mount_unit_name = + create_datastore_mount_unit(&name, &mount_point, filesystem, &uuid_path)?; - crate::tools::systemd::reload_daemon()?; - crate::tools::systemd::enable_unit(&mount_unit_name)?; - crate::tools::systemd::start_unit(&mount_unit_name)?; + crate::tools::systemd::reload_daemon()?; + crate::tools::systemd::enable_unit(&mount_unit_name)?; + crate::tools::systemd::start_unit(&mount_unit_name)?; + } if add_datastore { let lock = pbs_config::datastore::lock_config()?; - let datastore: DataStoreConfig = - serde_json::from_value(json!({ "name": name, "path": mount_point }))?; - + let datastore: DataStoreConfig = if removable_datastore { + serde_json::from_value( + json!({ "name": name, "path": format!("/{name}"), "backing-device": uuid }), + )? + } else { + serde_json::from_value(json!({ "name": name, "path": mount_point }))? + }; let (config, _digest) = pbs_config::datastore::config()?; if config.sections.contains_key(&datastore.name) { -- 2.39.5 From g.goller at proxmox.com Mon Nov 25 17:57:46 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Mon, 25 Nov 2024 17:57:46 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] reuse-datastore: avoid creating another default prune job In-Reply-To: <1732545364.j7koj2vyxl.astroid@yuna.none> References: <20241125085953.19828-1-g.goller@proxmox.com> <1732545364.j7koj2vyxl.astroid@yuna.none> Message-ID: On 25.11.2024 15:37, Fabian Gr?nbichler wrote: >On November 25, 2024 11:10 am, Christian Ebner wrote: >> On 11/25/24 09:59, Gabriel Goller wrote: >>> If a datastore with a default prune job is removed, the prune job is >>> preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also >>> create a default prune job for every datastore ? this means that when >>> reusing a datastore that previously existed, you end up with duplicate >>> prune jobs. >> >> Looking at this once more, I am not so sure anymore that this should >> only check for the default prune job? Why not check if there is any >> prune job configured at all for this datastore, and only if there is >> none create the new default prune job? > >that would also work? > >- if no prune job exists for this store, create default one >- if explicit prune job options where given, create that one >- otherwise, don't add a prune job (no options given, and one exists > already for this store) Yep, posted a v3! From g.goller at proxmox.com Mon Nov 25 18:10:48 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Mon, 25 Nov 2024 18:10:48 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] reuse-datastore: avoid creating another default prune job In-Reply-To: <1732545364.j7koj2vyxl.astroid@yuna.none> References: <20241125085953.19828-1-g.goller@proxmox.com> <1732545364.j7koj2vyxl.astroid@yuna.none> Message-ID: <2pzigu3qbfxvkyi2q2jlhxq27xxlpen4cw5gbqvb6qamlqz4w5@ua4ebln6ipu3> Was a bit too hasty on the previous reply. On 25.11.2024 15:37, Fabian Gr?nbichler wrote: >On November 25, 2024 11:10 am, Christian Ebner wrote: >> On 11/25/24 09:59, Gabriel Goller wrote: >>> If a datastore with a default prune job is removed, the prune job is >>> preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also >>> create a default prune job for every datastore ? this means that when >>> reusing a datastore that previously existed, you end up with duplicate >>> prune jobs. >> >> Looking at this once more, I am not so sure anymore that this should >> only check for the default prune job? Why not check if there is any >> prune job configured at all for this datastore, and only if there is >> none create the new default prune job? > >that would also work? > >- if no prune job exists for this store, create default one >- if explicit prune job options where given, create that one >- otherwise, don't add a prune job (no options given, and one exists > already for this store) This is the behavior that we have now? What I intended with this patch was to ignore the default prune job created by 'prune schedule' so that we don't create duplicated prune jobs. From t.lamprecht at proxmox.com Mon Nov 25 18:26:22 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 18:26:22 +0100 Subject: [pbs-devel] applied: [PATCH widget-toolkit v4 2/7] window: add consent modal In-Reply-To: <20240913131033.396324-3-g.goller@proxmox.com> References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-3-g.goller@proxmox.com> Message-ID: <110cd2e0-2e72-4456-be6b-8442c44647ed@proxmox.com> Am 13.09.24 um 15:10 schrieb Gabriel Goller: > Add consentModal that gets displayed before the login. Simply shows the > text in a scrollable box and contains a single button "OK". > > Signed-off-by: Gabriel Goller > --- > src/Makefile | 1 + > src/window/ConsentModal.js | 36 ++++++++++++++++++++++++++++++++++++ > 2 files changed, 37 insertions(+) > create mode 100644 src/window/ConsentModal.js > > applied, thanks! From t.lamprecht at proxmox.com Mon Nov 25 18:26:53 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 18:26:53 +0100 Subject: [pbs-devel] applied: [PATCH widget-toolkit v4 3/7] form: add support for multiline textarea In-Reply-To: <20240913131033.396324-4-g.goller@proxmox.com> References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-4-g.goller@proxmox.com> Message-ID: Am 13.09.24 um 15:10 schrieb Gabriel Goller: > This adds support for a editable multiline textarea in the ObjectGrid. > Now we can add a textarea row, which will open a textarea popup, and > encode the multi-line text into an base64 string (with utf8 support). > > Signed-off-by: Gabriel Goller > --- > src/Makefile | 1 + > src/form/TextAreaField.js | 60 +++++++++++++++++++++++++++++++++++++++ > src/grid/ObjectGrid.js | 29 +++++++++++++++++++ > 3 files changed, 90 insertions(+) > create mode 100644 src/form/TextAreaField.js > > applied, with a few style/naming fixes squashed in, thanks! From c.ebner at proxmox.com Mon Nov 25 18:40:08 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 18:40:08 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] use same config section type for all sync jobs Message-ID: <20241125174012.678523-1-c.ebner@proxmox.com> This patch series drops the `sync-push` config section type in favor of using the same `sync` for both, sync jobs in push and pull direction. Instead, encode the sync direction as optional parameter in the sync job config, defaulting to sync in pull direction. This reduces complexity by allowing to drop the optional parameter for most function calls. For api methods, the default remains to only show sync directions in pull direction, if no ListSyncDirection::All is passed, or the direction explicitly selected. This allows to default to show both directions in future Proxmox Backup Server version. This patch series depends on Dominik's patch series found here: https://lore.proxmox.com/pbs-devel/377618fd-0ea9-46ba-9aec-a47387eca50d at proxmox.com/T Christian Ebner (4): config: sync: use same config section type `sync` for push and pull api: admin/config: introduce sync direction as job config parameter bin: show direction in sync job list output api types: drop unused config type helpers for sync direction pbs-api-types/src/jobs.rs | 25 ++-- pbs-config/src/sync.rs | 17 +-- src/api2/admin/sync.rs | 18 +-- src/api2/config/datastore.rs | 16 +-- src/api2/config/notifications/mod.rs | 19 ++-- src/api2/config/sync.rs | 151 ++++++++----------------- src/bin/proxmox-backup-proxy.rs | 22 +--- src/bin/proxmox_backup_manager/sync.rs | 6 +- src/server/sync.rs | 2 +- 9 files changed, 88 insertions(+), 188 deletions(-) -- 2.39.5 From c.ebner at proxmox.com Mon Nov 25 18:40:09 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 18:40:09 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/4] config: sync: use same config section type `sync` for push and pull In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com> References: <20241125174012.678523-1-c.ebner@proxmox.com> Message-ID: <20241125174012.678523-2-c.ebner@proxmox.com> Use `sync` as config section type string for both, sync jobs in push and pull direction, renaming the now combined config plugin to sync plugin. Commit bcd80bf9 ("api types/config: add `sync-push` config type for push sync jobs") introduced the additional config type with the intend to reduce possible misconfiguration. Partially revert this to use the same config type string again, since the misconfiguration can happen nevertheless (by editing the config type) and currently sync job configs are only listed partially when fetched via the config api endpoint. The filtering based on the additional api parameter is however retained. Signed-off-by: Christian Ebner --- pbs-config/src/sync.rs | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/pbs-config/src/sync.rs b/pbs-config/src/sync.rs index 7fc977e77..10f528b5e 100644 --- a/pbs-config/src/sync.rs +++ b/pbs-config/src/sync.rs @@ -6,7 +6,7 @@ use anyhow::Error; use proxmox_schema::{ApiType, Schema}; use proxmox_section_config::{SectionConfig, SectionConfigData, SectionConfigPlugin}; -use pbs_api_types::{SyncDirection, SyncJobConfig, JOB_ID_SCHEMA}; +use pbs_api_types::{SyncJobConfig, JOB_ID_SCHEMA}; use crate::{open_backup_lockfile, replace_backup_config, BackupLockGuard}; @@ -18,19 +18,10 @@ fn init() -> SectionConfig { _ => unreachable!(), }; - let pull_plugin = SectionConfigPlugin::new( - SyncDirection::Pull.as_config_type_str().to_string(), - Some(String::from("id")), - obj_schema, - ); - let push_plugin = SectionConfigPlugin::new( - SyncDirection::Push.as_config_type_str().to_string(), - Some(String::from("id")), - obj_schema, - ); + let sync_plugin = + SectionConfigPlugin::new("sync".to_string(), Some(String::from("id")), obj_schema); let mut config = SectionConfig::new(&JOB_ID_SCHEMA); - config.register_plugin(pull_plugin); - config.register_plugin(push_plugin); + config.register_plugin(sync_plugin); config } -- 2.39.5 From c.ebner at proxmox.com Mon Nov 25 18:40:11 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 18:40:11 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 3/4] bin: show direction in sync job list output In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com> References: <20241125174012.678523-1-c.ebner@proxmox.com> Message-ID: <20241125174012.678523-4-c.ebner@proxmox.com> As the WebUI also lists the sync direction, display the direction in the cli output as well. Examplary output: ``` ??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????? ? id ? sync-direction ? store ? remote ? remote-store ? schedule ? group-filter ? rate-in ? comment ? ??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????? ? s-6c16fab2-9e85 ? ? datastore ? ? datastore ? hourly ? all ? ? ? ??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????? ? s-8764c440-3a6c ? push ? datastore ? local ? push-target-store ? hourly ? all ? ? ? ??????????????????????????????????????????????????????????????????????????????????????????????????????????????????????????? ``` Signed-off-by: Christian Ebner --- src/bin/proxmox_backup_manager/sync.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/bin/proxmox_backup_manager/sync.rs b/src/bin/proxmox_backup_manager/sync.rs index 3ccaae943..42df08d30 100644 --- a/src/bin/proxmox_backup_manager/sync.rs +++ b/src/bin/proxmox_backup_manager/sync.rs @@ -44,6 +44,7 @@ fn list_sync_jobs(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result References: <20241125174012.678523-1-c.ebner@proxmox.com> Message-ID: <20241125174012.678523-5-c.ebner@proxmox.com> Jobs for both sync directions are now stored using the same `sync` config section type, so drop the outdated helpers. Signed-off-by: Christian Ebner --- pbs-api-types/src/jobs.rs | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index 4a85378ce..16b16dd84 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -519,23 +519,6 @@ impl std::fmt::Display for SyncDirection { } } -impl SyncDirection { - pub fn as_config_type_str(&self) -> &'static str { - match self { - SyncDirection::Pull => "sync", - SyncDirection::Push => "sync-push", - } - } - - pub fn from_config_type_str(config_type: &str) -> Result { - match config_type { - "sync" => Ok(SyncDirection::Pull), - "sync-push" => Ok(SyncDirection::Push), - _ => bail!("invalid config type for sync job"), - } - } -} - pub const RESYNC_CORRUPT_SCHEMA: Schema = BooleanSchema::new("If the verification failed for a local snapshot, try to pull it again.") .schema(); -- 2.39.5 From c.ebner at proxmox.com Mon Nov 25 18:40:10 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Mon, 25 Nov 2024 18:40:10 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 2/4] api: admin/config: introduce sync direction as job config parameter In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com> References: <20241125174012.678523-1-c.ebner@proxmox.com> Message-ID: <20241125174012.678523-3-c.ebner@proxmox.com> Add the sync direction for the sync job as optional config parameter and refrain from using the config section type for conditional direction check, as they are now the same (see previous commit). Use the configured sync job parameter instead of passing it to the various methods as function parameter and only filter based on sync direction if an optional api parameter to distingush/filter based on direction is given. Signed-off-by: Christian Ebner --- pbs-api-types/src/jobs.rs | 8 +- src/api2/admin/sync.rs | 18 +-- src/api2/config/datastore.rs | 16 +-- src/api2/config/notifications/mod.rs | 19 ++-- src/api2/config/sync.rs | 151 ++++++++----------------- src/bin/proxmox-backup-proxy.rs | 22 +--- src/bin/proxmox_backup_manager/sync.rs | 5 +- src/server/sync.rs | 2 +- 8 files changed, 83 insertions(+), 158 deletions(-) diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs index e18197fb1..4a85378ce 100644 --- a/pbs-api-types/src/jobs.rs +++ b/pbs-api-types/src/jobs.rs @@ -597,7 +597,11 @@ pub const RESYNC_CORRUPT_SCHEMA: Schema = "resync-corrupt": { schema: RESYNC_CORRUPT_SCHEMA, optional: true, - } + }, + "sync-direction": { + type: SyncDirection, + optional: true, + }, } )] #[derive(Serialize, Deserialize, Clone, Updater, PartialEq)] @@ -633,6 +637,8 @@ pub struct SyncJobConfig { pub transfer_last: Option, #[serde(skip_serializing_if = "Option::is_none")] pub resync_corrupt: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub sync_direction: Option, } impl SyncJobConfig { diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index 2b8fce484..965be8d06 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -85,9 +85,9 @@ pub fn list_config_sync_jobs( let sync_direction = sync_direction.unwrap_or_default(); let mut list = Vec::with_capacity(config.sections.len()); - for (_, (sync_type, job)) in config.sections.into_iter() { + for (_, (_, job)) in config.sections.into_iter() { let job: SyncJobConfig = serde_json::from_value(job)?; - let direction = SyncDirection::from_config_type_str(&sync_type)?; + let direction = job.sync_direction.unwrap_or_default(); match &store { Some(store) if &job.store != store => continue, @@ -100,7 +100,7 @@ pub fn list_config_sync_jobs( _ => {} } - if !check_sync_job_read_access(&user_info, &auth_id, &job, direction) { + if !check_sync_job_read_access(&user_info, &auth_id, &job) { continue; } @@ -144,15 +144,9 @@ pub fn run_sync_job( let user_info = CachedUserInfo::new()?; let (config, _digest) = sync::config()?; - let (config_type, config_section) = config - .sections - .get(&id) - .ok_or_else(|| format_err!("No sync job with id '{id}' found in config"))?; + let sync_job: SyncJobConfig = config.lookup("sync", &id)?; - let sync_direction = SyncDirection::from_config_type_str(config_type)?; - let sync_job = SyncJobConfig::deserialize(config_section)?; - - if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job, sync_direction) { + if !check_sync_job_modify_access(&user_info, &auth_id, &sync_job) { bail!("permission check failed, '{auth_id}' is missing access"); } @@ -160,7 +154,7 @@ pub fn run_sync_job( let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let upid_str = do_sync_job(job, sync_job, &auth_id, None, sync_direction, to_stdout)?; + let upid_str = do_sync_job(job, sync_job, &auth_id, None, to_stdout)?; Ok(upid_str) } diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 8c307a233..1b0728a22 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -13,9 +13,8 @@ use proxmox_uuid::Uuid; use pbs_api_types::{ Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions, - MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA, - PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, - PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, + MaintenanceMode, PruneJobConfig, PruneJobOptions, DATASTORE_SCHEMA, PRIV_DATASTORE_ALLOCATE, + PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA, }; use pbs_config::BackupLockGuard; use pbs_datastore::chunk_store::ChunkStore; @@ -525,15 +524,8 @@ pub async fn delete_datastore( for job in list_verification_jobs(Some(name.clone()), Value::Null, rpcenv)? { delete_verification_job(job.config.id, None, rpcenv)? } - for direction in [SyncDirection::Pull, SyncDirection::Push] { - for job in list_config_sync_jobs( - Some(name.clone()), - Some(direction.into()), - Value::Null, - rpcenv, - )? { - delete_sync_job(job.config.id, None, rpcenv)? - } + for job in list_config_sync_jobs(Some(name.clone()), None, Value::Null, rpcenv)? { + delete_sync_job(job.config.id, None, rpcenv)? } for job in list_prune_jobs(Some(name.clone()), Value::Null, rpcenv)? { delete_prune_job(job.config.id, None, rpcenv)? diff --git a/src/api2/config/notifications/mod.rs b/src/api2/config/notifications/mod.rs index 2081b7b75..89b2ba8a4 100644 --- a/src/api2/config/notifications/mod.rs +++ b/src/api2/config/notifications/mod.rs @@ -9,7 +9,7 @@ use proxmox_schema::api; use proxmox_sortable_macro::sortable; use crate::api2::admin::datastore::get_datastore_list; -use pbs_api_types::{SyncDirection, PRIV_SYS_AUDIT}; +use pbs_api_types::PRIV_SYS_AUDIT; use crate::api2::admin::prune::list_prune_jobs; use crate::api2::admin::sync::list_config_sync_jobs; @@ -154,15 +154,13 @@ pub fn get_values( }); } - for direction in [SyncDirection::Pull, SyncDirection::Push] { - let sync_jobs = list_config_sync_jobs(None, Some(direction.into()), param.clone(), rpcenv)?; - for job in sync_jobs { - values.push(MatchableValue { - field: "job-id".into(), - value: job.config.id, - comment: job.config.comment, - }); - } + let sync_jobs = list_config_sync_jobs(None, None, param.clone(), rpcenv)?; + for job in sync_jobs { + values.push(MatchableValue { + field: "job-id".into(), + value: job.config.id, + comment: job.config.comment, + }); } let verify_jobs = list_verification_jobs(None, param.clone(), rpcenv)?; @@ -186,7 +184,6 @@ pub fn get_values( "package-updates", "prune", "sync", - "sync-push", "system-mail", "tape-backup", "tape-load", diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index afaa0d5e4..e8a1ad076 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -17,12 +17,12 @@ use pbs_config::sync; use pbs_config::CachedUserInfo; use pbs_datastore::check_backup_owner; +use crate::api2::admin::sync::ListSyncDirection; pub fn check_sync_job_read_access( user_info: &CachedUserInfo, auth_id: &Authid, job: &SyncJobConfig, - sync_direction: SyncDirection, ) -> bool { // check for audit access on datastore/namespace, applies for pull and push direction let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); @@ -30,6 +30,7 @@ pub fn check_sync_job_read_access( return false; } + let sync_direction = job.sync_direction.unwrap_or_default(); match sync_direction { SyncDirection::Pull => { if let Some(remote) = &job.remote { @@ -71,8 +72,8 @@ pub fn check_sync_job_modify_access( user_info: &CachedUserInfo, auth_id: &Authid, job: &SyncJobConfig, - sync_direction: SyncDirection, ) -> bool { + let sync_direction = job.sync_direction.unwrap_or_default(); match sync_direction { SyncDirection::Pull => { let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path()); @@ -150,7 +151,7 @@ pub fn check_sync_job_modify_access( input: { properties: { "sync-direction": { - type: SyncDirection, + type: ListSyncDirection, optional: true, }, }, @@ -168,23 +169,29 @@ pub fn check_sync_job_modify_access( /// List all sync jobs pub fn list_sync_jobs( _param: Value, - sync_direction: Option, + sync_direction: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result, Error> { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let user_info = CachedUserInfo::new()?; + let sync_direction = sync_direction.unwrap_or_default(); let (config, digest) = sync::config()?; - let sync_direction = sync_direction.unwrap_or_default(); - let list = config.convert_to_typed_array(sync_direction.as_config_type_str())?; + let list: Vec = config.convert_to_typed_array("sync")?; rpcenv["digest"] = hex::encode(digest).into(); let list = list .into_iter() .filter(|sync_job| { - check_sync_job_read_access(&user_info, &auth_id, sync_job, sync_direction) + let direction = sync_job.sync_direction.unwrap_or_default(); + match &sync_direction { + ListSyncDirection::Pull if direction != SyncDirection::Pull => return false, + ListSyncDirection::Push if direction != SyncDirection::Push => return false, + _ => {} + } + check_sync_job_read_access(&user_info, &auth_id, sync_job) }) .collect(); Ok(list) @@ -198,10 +205,6 @@ pub fn list_sync_jobs( type: SyncJobConfig, flatten: true, }, - "sync-direction": { - type: SyncDirection, - optional: true, - }, }, }, access: { @@ -212,16 +215,14 @@ pub fn list_sync_jobs( /// Create a new sync job. pub fn create_sync_job( config: SyncJobConfig, - sync_direction: Option, rpcenv: &mut dyn RpcEnvironment, ) -> Result<(), Error> { let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let user_info = CachedUserInfo::new()?; - let sync_direction = sync_direction.unwrap_or_default(); let _lock = sync::lock_config()?; - if !check_sync_job_modify_access(&user_info, &auth_id, &config, sync_direction) { + if !check_sync_job_modify_access(&user_info, &auth_id, &config) { bail!("permission check failed"); } @@ -229,6 +230,7 @@ pub fn create_sync_job( bail!("source and target datastore can't be the same"); } + let sync_direction = config.sync_direction.unwrap_or_default(); if sync_direction == SyncDirection::Push && config.resync_corrupt.is_some() { bail!("push jobs do not support resync-corrupt option"); } @@ -248,7 +250,7 @@ pub fn create_sync_job( param_bail!("id", "job '{}' already exists.", config.id); } - section_config.set_data(&config.id, sync_direction.as_config_type_str(), &config)?; + section_config.set_data(&config.id, "sync", &config)?; sync::save_config(§ion_config)?; @@ -278,17 +280,9 @@ pub fn read_sync_job(id: String, rpcenv: &mut dyn RpcEnvironment) -> Result { data.transfer_last = None; } + DeletableProperty::SyncDirection => { + data.sync_direction = None; + } } } } @@ -482,6 +475,9 @@ pub fn update_sync_job( if let Some(resync_corrupt) = update.resync_corrupt { data.resync_corrupt = Some(resync_corrupt); } + if let Some(sync_direction) = update.sync_direction { + data.sync_direction = Some(sync_direction); + } if update.limit.rate_in.is_some() { data.limit.rate_in = update.limit.rate_in; @@ -519,11 +515,11 @@ pub fn update_sync_job( } } - if !check_sync_job_modify_access(&user_info, &auth_id, &data, sync_direction) { + if !check_sync_job_modify_access(&user_info, &auth_id, &data) { bail!("permission check failed"); } - config.set_data(&id, sync_direction.as_config_type_str(), &data)?; + config.set_data(&id, "sync", &data)?; sync::save_config(&config)?; @@ -570,15 +566,16 @@ pub fn delete_sync_job( crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?; } - if let Some((config_type, config_section)) = config.sections.get(&id) { - let sync_direction = SyncDirection::from_config_type_str(config_type)?; - let job = SyncJobConfig::deserialize(config_section)?; - if !check_sync_job_modify_access(&user_info, &auth_id, &job, sync_direction) { - bail!("permission check failed"); + match config.lookup("sync", &id) { + Ok(job) => { + if !check_sync_job_modify_access(&user_info, &auth_id, &job) { + bail!("permission check failed"); + } + config.sections.remove(&id); + } + Err(_) => { + http_bail!(NOT_FOUND, "job '{}' does not exist.", id) } - config.sections.remove(&id); - } else { - http_bail!(NOT_FOUND, "job '{}' does not exist.", id) } sync::save_config(&config)?; @@ -647,62 +644,36 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator schedule: None, limit: pbs_api_types::RateLimitConfig::default(), // no limit transfer_last: None, + sync_direction: None, // use default }; // should work without ACLs - assert!(check_sync_job_read_access( - &user_info, - root_auth_id, - &job, - SyncDirection::Pull, - )); - assert!(check_sync_job_modify_access( - &user_info, - root_auth_id, - &job, - SyncDirection::Pull, - )); + assert!(check_sync_job_read_access(&user_info, root_auth_id, &job)); + assert!(check_sync_job_modify_access(&user_info, root_auth_id, &job,)); // user without permissions must fail assert!(!check_sync_job_read_access( &user_info, &no_perm_auth_id, &job, - SyncDirection::Pull, )); assert!(!check_sync_job_modify_access( &user_info, &no_perm_auth_id, &job, - SyncDirection::Pull, )); // reading without proper read permissions on either remote or local must fail - assert!(!check_sync_job_read_access( - &user_info, - &read_auth_id, - &job, - SyncDirection::Pull, - )); + assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job,)); // reading without proper read permissions on local end must fail job.remote = Some("remote1".to_string()); - assert!(!check_sync_job_read_access( - &user_info, - &read_auth_id, - &job, - SyncDirection::Pull, - )); + assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job,)); // reading without proper read permissions on remote end must fail job.remote = Some("remote0".to_string()); job.store = "localstore1".to_string(); - assert!(!check_sync_job_read_access( - &user_info, - &read_auth_id, - &job, - SyncDirection::Pull, - )); + assert!(!check_sync_job_read_access(&user_info, &read_auth_id, &job,)); // writing without proper write permissions on either end must fail job.store = "localstore0".to_string(); @@ -710,7 +681,6 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); // writing without proper write permissions on local end must fail @@ -723,53 +693,38 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); // reset remote to one where users have access job.remote = Some("remote1".to_string()); // user with read permission can only read, but not modify/run - assert!(check_sync_job_read_access( - &user_info, - &read_auth_id, - &job, - SyncDirection::Pull, - )); + assert!(check_sync_job_read_access(&user_info, &read_auth_id, &job,)); job.owner = Some(read_auth_id.clone()); assert!(!check_sync_job_modify_access( &user_info, &read_auth_id, &job, - SyncDirection::Pull, )); job.owner = None; assert!(!check_sync_job_modify_access( &user_info, &read_auth_id, &job, - SyncDirection::Pull, )); job.owner = Some(write_auth_id.clone()); assert!(!check_sync_job_modify_access( &user_info, &read_auth_id, &job, - SyncDirection::Pull, )); // user with simple write permission can modify/run - assert!(check_sync_job_read_access( - &user_info, - &write_auth_id, - &job, - SyncDirection::Pull, - )); + assert!(check_sync_job_read_access(&user_info, &write_auth_id, &job,)); assert!(check_sync_job_modify_access( &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); // but can't modify/run with deletion @@ -778,7 +733,6 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); // unless they have Datastore.Prune as well @@ -787,7 +741,6 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); // changing owner is not possible @@ -796,7 +749,6 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); // also not to the default 'root at pam' @@ -805,7 +757,6 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); // unless they have Datastore.Modify as well @@ -815,14 +766,12 @@ acl:1:/remote/remote1/remotestore1:write at pbs:RemoteSyncOperator &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); job.owner = None; assert!(check_sync_job_modify_access( &user_info, &write_auth_id, &job, - SyncDirection::Pull, )); Ok(()) diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs index 70283510d..ec3df856a 100644 --- a/src/bin/proxmox-backup-proxy.rs +++ b/src/bin/proxmox-backup-proxy.rs @@ -40,8 +40,8 @@ use pbs_buildcfg::configdir; use proxmox_time::CalendarEvent; use pbs_api_types::{ - Authid, DataStoreConfig, Operation, PruneJobConfig, SyncDirection, SyncJobConfig, - TapeBackupJobConfig, VerificationJobConfig, + Authid, DataStoreConfig, Operation, PruneJobConfig, SyncJobConfig, TapeBackupJobConfig, + VerificationJobConfig, }; use proxmox_backup::auth_helpers::*; @@ -589,14 +589,7 @@ async fn schedule_datastore_sync_jobs() { Ok((config, _digest)) => config, }; - for (job_id, (job_type, job_config)) in config.sections { - let sync_direction = match SyncDirection::from_config_type_str(&job_type) { - Ok(direction) => direction, - Err(err) => { - eprintln!("unexpected config type in sync job config - {err}"); - continue; - } - }; + for (job_id, (_, job_config)) in config.sections { let job_config: SyncJobConfig = match serde_json::from_value(job_config) { Ok(c) => c, Err(err) => { @@ -618,14 +611,7 @@ async fn schedule_datastore_sync_jobs() { }; let auth_id = Authid::root_auth_id().clone(); - if let Err(err) = do_sync_job( - job, - job_config, - &auth_id, - Some(event_str), - sync_direction, - false, - ) { + if let Err(err) = do_sync_job(job, job_config, &auth_id, Some(event_str), false) { eprintln!("unable to start datastore sync job {job_id} - {err}"); } }; diff --git a/src/bin/proxmox_backup_manager/sync.rs b/src/bin/proxmox_backup_manager/sync.rs index b08bfb58b..3ccaae943 100644 --- a/src/bin/proxmox_backup_manager/sync.rs +++ b/src/bin/proxmox_backup_manager/sync.rs @@ -4,9 +4,10 @@ use serde_json::Value; use proxmox_router::{cli::*, ApiHandler, RpcEnvironment}; use proxmox_schema::api; -use pbs_api_types::{SyncDirection, JOB_ID_SCHEMA}; +use pbs_api_types::JOB_ID_SCHEMA; use proxmox_backup::api2; +use crate::api2::admin::sync::ListSyncDirection; fn render_group_filter(value: &Value, _record: &Value) -> Result { if let Some(group_filters) = value.as_array() { @@ -21,7 +22,7 @@ fn render_group_filter(value: &Value, _record: &Value) -> Result input: { properties: { "sync-direction": { - type: SyncDirection, + type: ListSyncDirection, optional: true, }, "output-format": { diff --git a/src/server/sync.rs b/src/server/sync.rs index 4c6b43d24..0bd7a7a85 100644 --- a/src/server/sync.rs +++ b/src/server/sync.rs @@ -597,7 +597,6 @@ pub fn do_sync_job( sync_job: SyncJobConfig, auth_id: &Authid, schedule: Option, - sync_direction: SyncDirection, to_stdout: bool, ) -> Result { let job_id = format!( @@ -609,6 +608,7 @@ pub fn do_sync_job( job.jobname(), ); let worker_type = job.jobtype().to_string(); + let sync_direction = sync_job.sync_direction.unwrap_or_default(); if sync_job.remote.is_none() && sync_job.store == sync_job.remote_store { bail!("can't sync to same datastore"); -- 2.39.5 From t.lamprecht at proxmox.com Mon Nov 25 19:11:42 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 19:11:42 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v4 5/7] api: add consent api handler and config option In-Reply-To: <20240913131033.396324-6-g.goller@proxmox.com> References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-6-g.goller@proxmox.com> Message-ID: Am 13.09.24 um 15:10 schrieb Gabriel Goller: > Add consent_text option to the node.cfg config. Embed the value into > index.html file using handlebars. > > Signed-off-by: Gabriel Goller > --- > src/api2/node/config.rs | 8 ++++++++ > src/bin/proxmox-backup-proxy.rs | 11 ++++++++--- > src/config/node.rs | 4 ++++ > 3 files changed, 20 insertions(+), 3 deletions(-) > > applied, thanks! From t.lamprecht at proxmox.com Mon Nov 25 19:13:03 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 19:13:03 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v4 7/7] docs: add section about consent banner In-Reply-To: <20240913131033.396324-8-g.goller@proxmox.com> References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-8-g.goller@proxmox.com> Message-ID: <7b323ed2-12f8-4fd8-afc1-fa258d8da072@proxmox.com> Am 13.09.24 um 15:10 schrieb Gabriel Goller: > Add short section on how to enable consent banner. > > Signed-off-by: Gabriel Goller > --- > docs/gui.rst | 8 ++++++++ > 1 file changed, 8 insertions(+) > > applied, thanks! From t.lamprecht at proxmox.com Mon Nov 25 19:12:53 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 19:12:53 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v4 6/7] ui: show consent banner before login In-Reply-To: <20240913131033.396324-7-g.goller@proxmox.com> References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-7-g.goller@proxmox.com> Message-ID: <051476db-154f-49be-ae4b-fd764c22de5e@proxmox.com> Am 13.09.24 um 15:10 schrieb Gabriel Goller: > Before showing the LoginView, check if we got a non-empty consent text > from the template. If there is a non-empty text, display it in a modal. > > Signed-off-by: Gabriel Goller > --- > www/LoginView.js | 12 ++++++++++++ > www/config/NodeOptionView.js | 6 ++++++ > www/index.hbs | 1 + > 3 files changed, 19 insertions(+) > > applied, with the indentation mess in the init function a bit improved, thanks! btw. onnlineHelp link would be great, IMO it's a bit of a confusing setting as is. From t.lamprecht at proxmox.com Mon Nov 25 21:48:05 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 21:48:05 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup v15 00/26] add removable datastores In-Reply-To: <20241125162213.157011-1-h.laimer@proxmox.com> References: <20241125162213.157011-1-h.laimer@proxmox.com> Message-ID: Am 25.11.24 um 17:21 schrieb Hannes Laimer: > These patches add support for removable datastores. All removable > datastores have a backing-device(a UUID) associated with them. Removable > datastores work like normal ones, just that they can be unplugged. It is > possible to create a removable datastore, sync backups onto it, unplug > it and use it on a different PBS. > > The datastore path specified is relative to the root of the used device. > Removable datastores are bind mounted to /mnt/datastore/. > Multiple datastores can be created on a single device, but only device with > a single datastore on them will be auto-mounted. > > When a removable datastore is deleted and 'destroy-data' is set, the > device has to be mounted. If 'destroy-data' is not set the datastore > can be deleted even if the device is not present. Removable datastores > are automatically mounted when plugged in. > Dietmar Maurer (1): > maintenance: add 'Unmount' maintenance type > > Hannes Laimer (25): > pbs-api-types: add backing-device to DataStoreConfig > maintenance: make is_offline more generic > datastore: add helper for checking if a datastore is mounted > api: admin: add (un)mount endpoint for removable datastores > api: removable datastore creation > api: add check for nested datastores on creation > pbs-api-types: add mount_status field to DataStoreListItem > bin: manager: add (un)mount command > add auto-mounting for removable datastores > datastore: handle deletion of removable datastore properly > docs: add removable datastores section > ui: add partition selector form > ui: add removable datastore creation support > ui: add (un)mount button to summary > ui: tree: render unmounted datastores correctly > ui: utils: make parseMaintenanceMode more robust > ui: add datastore status mask for unmounted removable datastores > ui: maintenance: fix disable msg field if no type is selected > ui: render 'unmount' maintenance mode correctly > api: node: allow creation of removable datastore through directory > endpoint > api: node: include removable datastores in directory list > node: disks: replace BASE_MOUNT_DIR with DATASTORE_MOUNT_DIR > ui: support create removable datastore through directory creation > bin: debug: add inspect device command > api: disks: only return UUID of partitions if it actually is one > > debian/proxmox-backup-server.install | 1 + > debian/proxmox-backup-server.udev | 3 + > docs/storage.rst | 38 +++ > etc/Makefile | 1 + > etc/removable-device-attach at .service | 8 + > pbs-api-types/src/datastore.rs | 47 ++- > pbs-api-types/src/maintenance.rs | 12 +- > pbs-config/src/datastore.rs | 14 + > pbs-datastore/src/datastore.rs | 83 ++++- > pbs-datastore/src/lib.rs | 4 +- > src/api2/admin/datastore.rs | 316 +++++++++++++++++++- > src/api2/config/datastore.rs | 122 +++++++- > src/api2/node/disks/directory.rs | 74 +++-- > src/api2/status/mod.rs | 30 +- > src/bin/proxmox_backup_debug/inspect.rs | 149 +++++++++ > src/bin/proxmox_backup_manager/datastore.rs | 126 +++++++- > src/server/metric_collection/mod.rs | 4 + > src/tools/disks/mod.rs | 5 +- > www/DirectoryList.js | 13 + > www/Makefile | 1 + > www/NavigationTree.js | 18 +- > www/Utils.js | 33 +- > www/css/ext6-pbs.css | 20 ++ > www/datastore/DataStoreListSummary.js | 1 + > www/datastore/Summary.js | 122 +++++++- > www/form/PartitionSelector.js | 81 +++++ > www/window/CreateDirectory.js | 14 + > www/window/DataStoreEdit.js | 32 ++ > www/window/MaintenanceOptions.js | 17 +- > 29 files changed, 1298 insertions(+), 91 deletions(-) > create mode 100644 etc/removable-device-attach at .service > create mode 100644 www/form/PartitionSelector.js > applied series, thanks! I did a handful of follow-up commits for the UI behavior, especially w.r.t. how mount and unmount are processed. Just check the commits themselves, in general I tried to be descriptive. Please take a look at them to cross-check for any unforeseen regressions. What would be still good is adding to the docs how jobs like sync, GC, ... are handled when the datastore is not plugged and if they are caught up on or not when it's plugged again. That should have a bit higher priority. Additionally looking into a "remove" event udev hook to clean up a mount point, e.g. if a user just pulls out the USB pen drive; while that naturally can never be fully hedged against, especially in terms of data safety, it would be still nice if re-plugging it works again. For the use case where one just uses an external drive as cheap physical offsite copy, i.e. as cheap tape replacement, nothing should be permanently broken, as one can just reinsert the drive and trigger a new sync and then do a clean unmount. In my tests with some XFS formatted USB pen drive the mount entry is kept when I suddenly remove the drive (while the datastore is inactive though) and thus I get an error about the filesystem UUID already existing when the udev hook tries to mount it again when I re-plug the USB pen drive. We do not have to bend backwards for this, but atm. I basically only can resolve this by unmounting the old mountpoint manually, e.g. through a root shell (or reboot I guess), both not _that_ nice UX wise. In any way: thanks for going the distance here, this is definitively one of those features were initial envisioned amount of work was rather way off due to quite some bike shedding potential but also all the ugliness of dealing with transient mount points and resources that are not persistent. Lets fix rough edges and potential issues as follow-ups. From t.lamprecht at proxmox.com Mon Nov 25 22:09:25 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 22:09:25 +0100 Subject: [pbs-devel] [PATCH docs 1/3] docs: explain the working principle of the change detection modes In-Reply-To: <20241118092435.81880-2-c.ebner@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> <20241118092435.81880-2-c.ebner@proxmox.com> Message-ID: <5e5a54b1-3d33-402f-8033-089e63590b43@proxmox.com> Am 18.11.24 um 10:24 schrieb Christian Ebner: > Describe in more details how the different change detection modes > operate and give insights into the inner workings, especially for the > more complex `metadata` mode, which involves lookahead caching and > padding calculation for reused payload chunks. > > Suggested-by: Dietmar Maurer > Signed-off-by: Christian Ebner it would be additionally good to describe why mtime, not ctime and when metadata detection can fail (if user/tools do bad things). And for the overview it would be great to note why a data mode exists, as there can be some confusions from users interpreting this as three different ways to create the archive(s), not two ways to do that with two change detection modes. Could be also interesting to note that it works with older PBS in general beside some archive specific features like file-browsing. This mail is inspired a bit from the post I replied here: https://forum.proxmox.com/threads/proxmox-ve-8-3-released.157793/page-2#post-723212 From t.lamprecht at proxmox.com Mon Nov 25 22:42:27 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Mon, 25 Nov 2024 22:42:27 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup] fix #5710: api: backup: stat known chunks on backup finish In-Reply-To: <20241008094617.96273-1-c.ebner@proxmox.com> References: <20241008094617.96273-1-c.ebner@proxmox.com> Message-ID: Am 08.10.24 um 11:46 schrieb Christian Ebner: > Known chunks are expected to be present on the datastore a-priori, > allowing clients to only re-index these chunks without uploading the > raw chunk data. The list of reusable known chunks is send to the > client by the server, deduced from the indexed chunks of the previous > backup snapshot of the group. > > If however such a known chunk disappeared (the previous backup > snapshot having been verified before that or not verified just yet), > the backup will finish just fine, leading to a seemingly successful > backup. Only a subsequent verification job will detect the backup > snapshot as being corrupt. > > In order to reduce the impact, stat the list of previously known > chunks when finishing the backup. If a missing chunk is detected, the > backup run itself will fail and the previous backup snapshots verify > state is set to failed. > This prevents the same snapshot from being reused by another, > subsequent backup job. > > Note: > The current backup run might have been just fine, if the now missing > known chunk is not indexed. But since there is no straight forward > way to detect which known chunks have not been reused in the fast > incremental mode for fixed index backups, the backup run is > considered failed. > > link to issue in bugtracker: > https://bugzilla.proxmox.com/show_bug.cgi?id=5710 > > Signed-off-by: Christian Ebner > Tested-by: Gabriel Goller > Reviewed-by: Gabriel Goller > --- > Changes since version 3, thanks to Gabriel for additional comments: > - Use anyhow error context also for manifest update error > - Use `with_context` over mapping the error, which is more concise > > Changes since version 2, thanks to Gabriel for testing and review: > - Use and display anyhow error context > - s/backp/backup/ > > Changes since version 1, thanks to Dietmar and Gabriel for feedback: > - Only stat on backup finish > - Distinguish newly uploaded from previously known chunks, to be able > to only stat the latter. > > New test on my side show a performance degradation of ~2% for the VM > backup and about ~10% for the LXC backup as compared to an unpatched > server. > In contrast to version 1 of the patches the PBS datastore this time > was located on an NFS share backed by an NVME SSD. > > I did perform vzdump backups of a VM with a 32G disk attached and a > LXC container with a Debian install and rootfs of ca. 400M (both off, > no changes in data in-between backup runs). > Again performed 5 runs each after an initial run to assure full chunk > presence on server and valid previous snapshot. > > Here the updated figures: > > ----------------------------------------------------------- > patched | unpatched > ----------------------------------------------------------- > VM | LXC | VM | LXC > ----------------------------------------------------------- > 14.0s ? 0.8s | 2.2s ? 0.1s | 13.7s ? 0.5s | 2.0s ? 0.03s > ----------------------------------------------------------- please include this stuff in the actual commit message, it's nice to see as point-in-time sample when reading the git log. A comparison with bigger disks, say 1 TB, would be additionally great to see how this scales with big disk size. From c.ebner at proxmox.com Tue Nov 26 08:29:06 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 26 Nov 2024 08:29:06 +0100 Subject: [pbs-devel] [PATCH docs 1/3] docs: explain the working principle of the change detection modes In-Reply-To: <5e5a54b1-3d33-402f-8033-089e63590b43@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> <20241118092435.81880-2-c.ebner@proxmox.com> <5e5a54b1-3d33-402f-8033-089e63590b43@proxmox.com> Message-ID: <2f6448ed-e19d-4031-9636-2424c1e1bb75@proxmox.com> On 11/25/24 22:09, Thomas Lamprecht wrote: > Am 18.11.24 um 10:24 schrieb Christian Ebner: >> Describe in more details how the different change detection modes >> operate and give insights into the inner workings, especially for the >> more complex `metadata` mode, which involves lookahead caching and >> padding calculation for reused payload chunks. >> >> Suggested-by: Dietmar Maurer >> Signed-off-by: Christian Ebner > > it would be additionally good to describe why mtime, not ctime and when > metadata detection can fail (if user/tools do bad things). Agreed, will add more details on this as well. > > And for the overview it would be great to note why a data mode exists, as > there can be some confusions from users interpreting this as three different > ways to create the archive(s), not two ways to do that with two change detection > modes. > Could be also interesting to note that it works with older PBS in general beside > some archive specific features like file-browsing. > Okay, will also add a short note that since the legacy mode and the two other modes use different archive formats, they do not reuse chunks as efficiently when using mixed modes on the same datastore. > This mail is inspired a bit from the post I replied here: > > https://forum.proxmox.com/threads/proxmox-ve-8-3-released.157793/page-2#post-723212 From c.ebner at proxmox.com Tue Nov 26 08:36:02 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 26 Nov 2024 08:36:02 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup] fix #5710: api: backup: stat known chunks on backup finish In-Reply-To: References: <20241008094617.96273-1-c.ebner@proxmox.com> Message-ID: On 11/25/24 22:42, Thomas Lamprecht wrote: > please include this stuff in the actual commit message, it's nice to see as > point-in-time sample when reading the git log. > A comparison with bigger disks, say 1 TB, would be additionally great to see > how this scales with big disk size. Thanks for feedback! I decided to not include these in the patch directly, as the tests performed were limited in extend and setup, so I was unsure how representative they actually are. I will however keep this in mind for next time, as this has already been applied as is. From f.gruenbichler at proxmox.com Tue Nov 26 09:09:27 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Tue, 26 Nov 2024 09:09:27 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] reuse-datastore: avoid creating another default prune job In-Reply-To: <2pzigu3qbfxvkyi2q2jlhxq27xxlpen4cw5gbqvb6qamlqz4w5@ua4ebln6ipu3> References: <20241125085953.19828-1-g.goller@proxmox.com> <1732545364.j7koj2vyxl.astroid@yuna.none> <2pzigu3qbfxvkyi2q2jlhxq27xxlpen4cw5gbqvb6qamlqz4w5@ua4ebln6ipu3> Message-ID: <1732608399.v9wb05vwgw.astroid@yuna.none> On November 25, 2024 6:10 pm, Gabriel Goller wrote: > Was a bit too hasty on the previous reply. > > On 25.11.2024 15:37, Fabian Gr?nbichler wrote: >>On November 25, 2024 11:10 am, Christian Ebner wrote: >>> On 11/25/24 09:59, Gabriel Goller wrote: >>>> If a datastore with a default prune job is removed, the prune job is >>>> preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also >>>> create a default prune job for every datastore ? this means that when >>>> reusing a datastore that previously existed, you end up with duplicate >>>> prune jobs. >>> >>> Looking at this once more, I am not so sure anymore that this should >>> only check for the default prune job? Why not check if there is any >>> prune job configured at all for this datastore, and only if there is >>> none create the new default prune job? >> >>that would also work? >> >>- if no prune job exists for this store, create default one >>- if explicit prune job options where given, create that one >>- otherwise, don't add a prune job (no options given, and one exists >> already for this store) > > This is the behavior that we have now? > > What I intended with this patch was to ignore the default prune job > created by 'prune schedule' so that we don't create duplicated prune > jobs. no, if a non-default prune job exists already, the default one is still added even if just the schedule is set in the dialogue/parameters.. From c.ebner at proxmox.com Tue Nov 26 09:55:02 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 26 Nov 2024 09:55:02 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/1] docs: explain some further caveats of the change detection modes Message-ID: <20241126085502.77438-1-c.ebner@proxmox.com> Explain that the change detection mode data makes sure that no files are considered reusable, even if their metadata might match and that the use of ctime and inode number is not possible for detection of unchanged files if the filesystem was synced to a temporary location, therefore the mtime and size are used for detection. Also note the reduced deduplication when storing snaphshots with mixed archive formats on the same datastore. Further, mention the backwards compatibility to older version of the Proxmox Backup Server. Suggested-by: Thomas Lamprecht Signed-off-by: Christian Ebner --- docs/technical-overview.rst | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/docs/technical-overview.rst b/docs/technical-overview.rst index 21793c5c5..ac42da3a2 100644 --- a/docs/technical-overview.rst +++ b/docs/technical-overview.rst @@ -141,6 +141,25 @@ The change detection mode controls how to detect and act for files which did not change in-between subsequent backup runs as well as the archive file format used to encode the directory entries. +There are 3 modes available, the current default ``legacy`` mode, as well as the +``data`` and ``metadata`` mode. While the ``legacy`` mode encodes all contents +in a single ``pxar`` archive, the latter two modes split data and metadata into +``ppxar`` and ``mpxar`` archives. This is done to allow for fast comparison of +metadata with the previous snapshot, used by the ``metadata`` mode to detect +reusable files. The ``data`` mode refrains from reusing unchanged files by +rechunking the file uncoditionally. This mode therefore assures that no file +changes are missed even if the metadata are unchanged. + +.. NOTE:: ``pxar`` and ``mpxar``/``ppxar`` file formats are different and cannot + be deduplicated as efficiently if a datastore stores archive snapshots of + both types. + +As the change detection modes are client side changes, they are backwards +compatible with older versions of Proxmox Backup Server. Exploring the backup +contents for the new archive format via the web interface requires however a +Proxmox Backup Server with version 3.2.5 or higher. Upgrading to the latest +version is recommended for full feature compatibility. + .. _change-detection-mode-legacy: Legacy Mode @@ -182,6 +201,11 @@ chunks. This is used for example for entry lookups to list the archive contents or to navigate the mounted filesystem via the FUSE implementation. No dedicated catalog is therefore created for archives encoded using this mode. +By not comparing metadata to the previous backup snapshot, no files will be +considered reusable by this mode, in contrast to the ``metadata`` mode. +Latter can reuse files which have changed, but file size and mtime did not +change because restored after changing the files contents. + .. _change-detection-mode-metadata: Metadata Mode @@ -191,9 +215,15 @@ The ``metadata`` mode detects files whose file metadata did not change in-between subsequent backup runs. The metadata comparison includes file size, file type, ownership and permission information, as well as acls and attributes and most importantly the file's mtime, for details see the -:ref:`pxar metadata archive format `. This mode will avoid -reading and rechunking the file contents whenever possible by reusing the file -content chunks of unchanged files from the previous backup snapshot. +:ref:`pxar metadata archive format `. Files ctime and inode +number are not stored and used for comparison, since some tools (e.g. +``vzdump``) might sync the contents of the filesystem to a temporary location +before actually performing the backup via the Proxmox backup client. For these +cases, ctime and inode number will always change. + +This mode will avoid reading and rechunking the file contents whenever possible +by reusing the file content chunks of unchanged files from the previous backup +snapshot. To compare the metadata, the previous snapshots ``mpxar`` metadata archive is downloaded at the start of the backup run and used as a reference. Further, the -- 2.39.5 From f.gruenbichler at proxmox.com Tue Nov 26 10:20:29 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=) Date: Tue, 26 Nov 2024 10:20:29 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] (List)SyncDirection: extract match check into impl fn In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com> References: <20241125174012.678523-1-c.ebner@proxmox.com> Message-ID: <20241126092029.207319-1-f.gruenbichler@proxmox.com> in case we add another direction or another call site, doing it without a wildcard match arm seems cleaner. Signed-off-by: Fabian Gr?nbichler --- small cleanup as potential follow-up src/api2/admin/sync.rs | 16 ++++++++++++---- src/api2/config/sync.rs | 11 +++-------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs index 965be8d06..089e6f50d 100644 --- a/src/api2/admin/sync.rs +++ b/src/api2/admin/sync.rs @@ -47,6 +47,16 @@ impl From for ListSyncDirection { } } +impl ListSyncDirection { + /// Checks whether a `ListSyncDirection` matches a given `SyncDirection` + pub fn matches(&self, other: SyncDirection) -> bool { + if *self == ListSyncDirection::All { + return true; + } + *self == other.into() + } +} + #[api( input: { properties: { @@ -94,10 +104,8 @@ pub fn list_config_sync_jobs( _ => {} } - match &sync_direction { - ListSyncDirection::Pull if direction != SyncDirection::Pull => continue, - ListSyncDirection::Push if direction != SyncDirection::Push => continue, - _ => {} + if !sync_direction.matches(direction) { + continue; } if !check_sync_job_read_access(&user_info, &auth_id, &job) { diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs index e8a1ad076..bc012744a 100644 --- a/src/api2/config/sync.rs +++ b/src/api2/config/sync.rs @@ -15,9 +15,9 @@ use pbs_api_types::{ }; use pbs_config::sync; +use crate::api2::admin::sync::ListSyncDirection; use pbs_config::CachedUserInfo; use pbs_datastore::check_backup_owner; -use crate::api2::admin::sync::ListSyncDirection; pub fn check_sync_job_read_access( user_info: &CachedUserInfo, @@ -185,13 +185,8 @@ pub fn list_sync_jobs( let list = list .into_iter() .filter(|sync_job| { - let direction = sync_job.sync_direction.unwrap_or_default(); - match &sync_direction { - ListSyncDirection::Pull if direction != SyncDirection::Pull => return false, - ListSyncDirection::Push if direction != SyncDirection::Push => return false, - _ => {} - } - check_sync_job_read_access(&user_info, &auth_id, sync_job) + sync_direction.matches(sync_job.sync_direction.unwrap_or_default()) + && check_sync_job_read_access(&user_info, &auth_id, sync_job) }) .collect(); Ok(list) -- 2.39.5 From f.gruenbichler at proxmox.com Tue Nov 26 10:21:34 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Tue, 26 Nov 2024 10:21:34 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 0/4] use same config section type for all sync jobs In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com> References: <20241125174012.678523-1-c.ebner@proxmox.com> Message-ID: <1732612836.peebn527cp.astroid@yuna.none> FWIW, did a quick do-over and testing and found no issues with these two series combined. On November 25, 2024 6:40 pm, Christian Ebner wrote: > This patch series drops the `sync-push` config section type in favor of > using the same `sync` for both, sync jobs in push and pull direction. > Instead, encode the sync direction as optional parameter in the sync job > config, defaulting to sync in pull direction. This reduces complexity by > allowing to drop the optional parameter for most function calls. > For api methods, the default remains to only show sync directions in > pull direction, if no ListSyncDirection::All is passed, or the direction > explicitly selected. This allows to default to show both directions in > future Proxmox Backup Server version. > > This patch series depends on Dominik's patch series found here: > https://lore.proxmox.com/pbs-devel/377618fd-0ea9-46ba-9aec-a47387eca50d at proxmox.com/T > > Christian Ebner (4): > config: sync: use same config section type `sync` for push and pull > api: admin/config: introduce sync direction as job config parameter > bin: show direction in sync job list output > api types: drop unused config type helpers for sync direction > > pbs-api-types/src/jobs.rs | 25 ++-- > pbs-config/src/sync.rs | 17 +-- > src/api2/admin/sync.rs | 18 +-- > src/api2/config/datastore.rs | 16 +-- > src/api2/config/notifications/mod.rs | 19 ++-- > src/api2/config/sync.rs | 151 ++++++++----------------- > src/bin/proxmox-backup-proxy.rs | 22 +--- > src/bin/proxmox_backup_manager/sync.rs | 6 +- > src/server/sync.rs | 2 +- > 9 files changed, 88 insertions(+), 188 deletions(-) > > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From t.lamprecht at proxmox.com Tue Nov 26 10:22:13 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 26 Nov 2024 10:22:13 +0100 Subject: [pbs-devel] [PATCH docs 1/3] docs: explain the working principle of the change detection modes In-Reply-To: <2f6448ed-e19d-4031-9636-2424c1e1bb75@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> <20241118092435.81880-2-c.ebner@proxmox.com> <5e5a54b1-3d33-402f-8033-089e63590b43@proxmox.com> <2f6448ed-e19d-4031-9636-2424c1e1bb75@proxmox.com> Message-ID: <5990610f-700c-45ed-b47c-06e214abc918@proxmox.com> Am 26.11.24 um 08:29 schrieb Christian Ebner: > On 11/25/24 22:09, Thomas Lamprecht wrote: > Okay, will also add a short note that since the legacy mode and the two > other modes use different archive formats, they do not reuse chunks as > efficiently when using mixed modes on the same datastore. Fine I guess, but this wasn't really what I had in mind here, and I would not "bad-mouth" your nice work here too much hehe. In the forum there was just some confusion about three change-modes maybe meaning three formats. I.e., more that the data one replaces the legacy one with the benefit of being able to seamlessly switch to metadata mode and back again, as both use the same archive format/structure. >> This mail is inspired a bit from the post I replied here: >> >> https://forum.proxmox.com/threads/proxmox-ve-8-3-released.157793/page-2#post-723212 ^- if my reply was (hopefully) somewhat clear maybe something of it can be used as base. From l.wagner at proxmox.com Tue Nov 26 10:30:25 2024 From: l.wagner at proxmox.com (Lukas Wagner) Date: Tue, 26 Nov 2024 10:30:25 +0100 Subject: [pbs-devel] [PATCH proxmox] notify: remove irritating 'html template not found' log message Message-ID: <20241126093025.44591-1-l.wagner@proxmox.com> The proxmox-notify crate can render notification text based on two different templates, plaintext and html. The html template is at the moment only used for email-based notifications. If we try to render a html-formatted message but there is no html template, we try to fall back to the plaintext template and wrap the rendered message in
 tags.
As a preparation for user-supplied/overridden templates, I added a log
message "html template not found, falling back to plaintext ..." to
educate the user about this behavior.

In Proxmox Backup Server, we only ship plaintext templates at the
moment, meaning that this log message will be shown for every single
(email) notification that is sent out. This might be a bit confusing,
because the log message can be interpreted as an error, which it isn't.

This commit removes the log message completely for now. Once we add
support for user-overridable notification templates we could consider
adding it back it, but maybe phrased a bit differently, to avoid it
being interpreted as an error.

Signed-off-by: Lukas Wagner 
---
 proxmox-notify/src/renderer/mod.rs | 1 -
 1 file changed, 1 deletion(-)

diff --git a/proxmox-notify/src/renderer/mod.rs b/proxmox-notify/src/renderer/mod.rs
index 82473d03..393cbbf2 100644
--- a/proxmox-notify/src/renderer/mod.rs
+++ b/proxmox-notify/src/renderer/mod.rs
@@ -290,7 +290,6 @@ pub fn render_template(
         (None, TemplateType::HtmlBody) => {
             ty = TemplateType::PlaintextBody;
             let plaintext_filename = format!("{template}-{suffix}", suffix = ty.file_suffix());
-            log::info!("html template '{filename}' not found, falling back to plain text template '{plaintext_filename}'");
             (
                 context::context().lookup_template(&plaintext_filename, None)?,
                 true,
-- 
2.39.5




From f.gruenbichler at proxmox.com  Tue Nov 26 10:36:49 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 10:36:49 +0100
Subject: [pbs-devel] applied: [PATCH pxar] decoder: fix incorrect accounting
 for header in payload input
In-Reply-To: <20240918150047.485551-1-c.ebner@proxmox.com>
References: <20240918150047.485551-1-c.ebner@proxmox.com>
Message-ID: <1732613796.h9gwxsmx2k.astroid@yuna.none>

thanks (and also thanks for the reminder!)

On September 18, 2024 5:00 pm, Christian Ebner wrote:
> Payload entries are separated by headers of type PAYLOAD within the
> payload stream of split pxar archives, used for consistency checks
> when accessing the file contents via a reader instance.
> 
> Commit 5b8204d0 moved these consistency checks, so they only happen
> when actually accessing the content, thereby drastically improving
> performance when navigating contents via the metadata archive.
> 
> The commit however also incorrectly increased the `Decoder`s
> `payload_consumed` field by the size of the header, in case the file
> payload has not been accessed by the `content_reader`.
> 
> As this filed is used to account for consumed bytes while sequentially
> reading to possibly skip over entries, this leads to incorrectly
> skipping of bytes in the stream (less than required). The main
> manifestation being that a pxar extract with provided match pattern
> failed.
> 
> Therefore, drop the incorrect accounting of the payload header.
> 
> Fixes: 5b8204d0 ("decoder: move payload header check for split input")
> 
> Signed-off-by: Christian Ebner 
> ---
> Stumbled accorss this one while working on issue 2996.
> Without this a
> ```
> pxar extract archive.mpxar target --payload-input archive.ppxar
> --pattern 
> ```
> will fail the payload header checks.
> 
>  src/decoder/mod.rs | 5 -----
>  1 file changed, 5 deletions(-)
> 
> diff --git a/src/decoder/mod.rs b/src/decoder/mod.rs
> index 6191627..613ec12 100644
> --- a/src/decoder/mod.rs
> +++ b/src/decoder/mod.rs
> @@ -299,14 +299,9 @@ impl DecoderImpl {
>                  }
>                  State::InPayload {
>                      offset,
> -                    header_checked,
>                      ..
>                  } => {
>                      if self.input.payload().is_some() {
> -                        if !header_checked {
> -                            // header is only checked if payload has been accessed
> -                            self.payload_consumed += size_of::
() as u64; > - } > // Update consumed payload as given by the offset referenced by the content reader > self.payload_consumed += offset; > } else { > -- > 2.39.2 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From c.ebner at proxmox.com Tue Nov 26 10:45:28 2024 From: c.ebner at proxmox.com (Christian Ebner) Date: Tue, 26 Nov 2024 10:45:28 +0100 Subject: [pbs-devel] [PATCH docs 1/3] docs: explain the working principle of the change detection modes In-Reply-To: <5990610f-700c-45ed-b47c-06e214abc918@proxmox.com> References: <20241118092435.81880-1-c.ebner@proxmox.com> <20241118092435.81880-2-c.ebner@proxmox.com> <5e5a54b1-3d33-402f-8033-089e63590b43@proxmox.com> <2f6448ed-e19d-4031-9636-2424c1e1bb75@proxmox.com> <5990610f-700c-45ed-b47c-06e214abc918@proxmox.com> Message-ID: On 11/26/24 10:22, Thomas Lamprecht wrote: > Am 26.11.24 um 08:29 schrieb Christian Ebner: >> On 11/25/24 22:09, Thomas Lamprecht wrote: >> Okay, will also add a short note that since the legacy mode and the two >> other modes use different archive formats, they do not reuse chunks as >> efficiently when using mixed modes on the same datastore. > > Fine I guess, but this wasn't really what I had in mind here, and I would > not "bad-mouth" your nice work here too much hehe. Well, no intention in bad mouthing it ;) Nevertheless, I would not refrain from mentioning this as there was some feedback regarding increased size usage after switching modes (cannot find the relevant forum thread at the moment). So letting users know that they might take a closer look at the datastore disk usage after switching to one of the new modes can only help. > In the forum there was just some confusion about three change-modes maybe > meaning three formats. I.e., more that the data one replaces the legacy one > with the benefit of being able to seamlessly switch to metadata mode and back > again, as both use the same archive format/structure. > >>> This mail is inspired a bit from the post I replied here: >>> >>> https://forum.proxmox.com/threads/proxmox-ve-8-3-released.157793/page-2#post-723212 > > ^- if my reply was (hopefully) somewhat clear maybe something of it can be > used as base. > I did already send a patch [0], but only loosely followed your reply in the forum thread. Maybe we can improve based on that? [0] https://lore.proxmox.com/pbs-devel/20241126085502.77438-1-c.ebner at proxmox.com/T/#u From g.goller at proxmox.com Tue Nov 26 10:51:12 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 26 Nov 2024 10:51:12 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] reuse-datastore: avoid creating another default prune job In-Reply-To: <1732608399.v9wb05vwgw.astroid@yuna.none> References: <20241125085953.19828-1-g.goller@proxmox.com> <1732545364.j7koj2vyxl.astroid@yuna.none> <2pzigu3qbfxvkyi2q2jlhxq27xxlpen4cw5gbqvb6qamlqz4w5@ua4ebln6ipu3> <1732608399.v9wb05vwgw.astroid@yuna.none> Message-ID: On 26.11.2024 09:09, Fabian Gr?nbichler wrote: >On November 25, 2024 6:10 pm, Gabriel Goller wrote: >> Was a bit too hasty on the previous reply. >> >> On 25.11.2024 15:37, Fabian Gr?nbichler wrote: >>>On November 25, 2024 11:10 am, Christian Ebner wrote: >>>> On 11/25/24 09:59, Gabriel Goller wrote: >>>>> If a datastore with a default prune job is removed, the prune job is >>>>> preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also >>>>> create a default prune job for every datastore ? this means that when >>>>> reusing a datastore that previously existed, you end up with duplicate >>>>> prune jobs. >>>> >>>> Looking at this once more, I am not so sure anymore that this should >>>> only check for the default prune job? Why not check if there is any >>>> prune job configured at all for this datastore, and only if there is >>>> none create the new default prune job? >>> >>>that would also work? >>> >>>- if no prune job exists for this store, create default one >>>- if explicit prune job options where given, create that one >>>- otherwise, don't add a prune job (no options given, and one exists >>> already for this store) >> >> This is the behavior that we have now? >> >> What I intended with this patch was to ignore the default prune job >> created by 'prune schedule' so that we don't create duplicated prune >> jobs. > >no, if a non-default prune job exists already, the default one is still >added even if just the schedule is set in the dialogue/parameters.. Oh, got it. Will send a patch soon! From g.goller at proxmox.com Tue Nov 26 10:54:28 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 26 Nov 2024 10:54:28 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v3] reuse-datastore: avoid creating another prune job Message-ID: <20241126095428.101682-1-g.goller@proxmox.com> If a datastore with a prune job is removed, the prune job is preserverd as it is stored in /etc/proxmox-backup/prune.cfg. We also create a default prune job for every datastore ? this means that when reusing a datastore that previously existed, you end up with duplicate prune jobs. To avoid this we check if a prune job already exists, and when it does, we refrain from creating the default one. (We also check if specific keep-options have been added, if yes, then we create the job nevertheless.) Reported-by: Fabian Gr?nbichler Signed-off-by: Gabriel Goller --- v3, thanks @Christian and @Fabian: - don't rely on default-prune-jobs but check all - check if specific keep-options have been added v2, thanks @Christian: - convert if-statement to inline condition src/api2/config/datastore.rs | 39 +++++++++++++++++++----------------- src/api2/config/prune.rs | 11 ++++++++++ 2 files changed, 32 insertions(+), 18 deletions(-) diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs index 9f2dac4b22ee..121222c40396 100644 --- a/src/api2/config/datastore.rs +++ b/src/api2/config/datastore.rs @@ -24,7 +24,7 @@ use crate::api2::admin::{ datastore::do_mount_device, prune::list_prune_jobs, sync::list_config_sync_jobs, verify::list_verification_jobs, }; -use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; +use crate::api2::config::prune::{delete_prune_job, do_create_prune_job, has_prune_job}; use crate::api2::config::sync::delete_sync_job; use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs}; use crate::api2::config::verify::delete_verification_job; @@ -204,23 +204,26 @@ pub fn create_datastore( let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; - let prune_job_config = config.prune_schedule.as_ref().map(|schedule| { - let mut id = format!("default-{}-{}", config.name, Uuid::generate()); - id.truncate(32); - - PruneJobConfig { - id, - store: config.name.clone(), - comment: None, - disable: false, - schedule: schedule.clone(), - options: PruneJobOptions { - keep: config.keep.clone(), - max_depth: None, - ns: None, - }, - } - }); + let mut prune_job_config = None; + if config.keep.keeps_something() || !has_prune_job(&config.name)? { + prune_job_config = config.prune_schedule.as_ref().map(|schedule| { + let mut id = format!("default-{}-{}", config.name, Uuid::generate()); + id.truncate(32); + + PruneJobConfig { + id, + store: config.name.clone(), + comment: None, + disable: false, + schedule: schedule.clone(), + options: PruneJobOptions { + keep: config.keep.clone(), + max_depth: None, + ns: None, + }, + } + }); + } // clearing prune settings in the datastore config, as they are now handled by prune jobs let config = DataStoreConfig { diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs index ce7b8ce565ce..b433c248ac5a 100644 --- a/src/api2/config/prune.rs +++ b/src/api2/config/prune.rs @@ -77,6 +77,17 @@ pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { Ok(()) } +pub fn has_prune_job(datastore: &str) -> Result { + let (section_config, _digest) = prune::config()?; + for (_, (_, job_config)) in section_config.sections { + let job_config: PruneJobConfig = serde_json::from_value(job_config)?; + if job_config.store == datastore { + return Ok(true); + } + } + Ok(false) +} + #[api( protected: true, input: { -- 2.39.5 From g.goller at proxmox.com Tue Nov 26 11:01:52 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 26 Nov 2024 11:01:52 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v4 6/7] ui: show consent banner before login In-Reply-To: <051476db-154f-49be-ae4b-fd764c22de5e@proxmox.com> References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-7-g.goller@proxmox.com> <051476db-154f-49be-ae4b-fd764c22de5e@proxmox.com> Message-ID: On 25.11.2024 19:12, Thomas Lamprecht wrote: >Am 13.09.24 um 15:10 schrieb Gabriel Goller: >> Before showing the LoginView, check if we got a non-empty consent text >> from the template. If there is a non-empty text, display it in a modal. >> >> Signed-off-by: Gabriel Goller >> --- >> www/LoginView.js | 12 ++++++++++++ >> www/config/NodeOptionView.js | 6 ++++++ >> www/index.hbs | 1 + >> 3 files changed, 19 insertions(+) >> >> > >applied, with the indentation mess in the init function a bit improved, thanks! Thanks! >btw. onnlineHelp link would be great, IMO it's a bit of a confusing setting as >is. Will send two follow-up patches with this one and the small nit on the first commit! From t.lamprecht at proxmox.com Tue Nov 26 11:12:10 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 26 Nov 2024 11:12:10 +0100 Subject: [pbs-devel] [PATCH v4 proxmox-backup] fix #5710: api: backup: stat known chunks on backup finish In-Reply-To: References: <20241008094617.96273-1-c.ebner@proxmox.com> Message-ID: Am 26.11.24 um 08:36 schrieb Christian Ebner: > On 11/25/24 22:42, Thomas Lamprecht wrote: >> please include this stuff in the actual commit message, it's nice to see as >> point-in-time sample when reading the git log. >> A comparison with bigger disks, say 1 TB, would be additionally great to see >> how this scales with big disk size. > > Thanks for feedback! > > I decided to not include these in the patch directly, as the tests > performed were limited in extend and setup, so I was unsure how > representative they actually are. In such cases it's fine to include them with exactly such a disclaimer. > I will however keep this in mind for next time, as this has already been > applied as is. The data is out there so already good, I just have a strong preference of having these things in the commit log too. From d.csapak at proxmox.com Tue Nov 26 11:15:45 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Tue, 26 Nov 2024 11:15:45 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: utils: add task description for mounting/unmounting Message-ID: <20241126101545.1105047-1-d.csapak@proxmox.com> Signed-off-by: Dominik Csapak --- www/Utils.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/www/Utils.js b/www/Utils.js index 6bae9b709..484c607bd 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -417,6 +417,7 @@ Ext.define('PBS.Utils', { 'label-media': [gettext('Drive'), gettext('Label Media')], 'load-media': (type, id) => PBS.Utils.render_drive_load_media_id(id, gettext('Load Media')), logrotate: [null, gettext('Log Rotation')], + 'mount-device': [gettext('Datastore'), gettext('Mount Device')], prune: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Prune')), prunejob: (type, id) => PBS.Utils.render_prune_job_worker_id(id, gettext('Prune Job')), reader: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Read Objects')), @@ -427,6 +428,7 @@ Ext.define('PBS.Utils', { 'tape-backup-job': (type, id) => PBS.Utils.render_tape_backup_id(id, gettext('Tape Backup Job')), 'tape-restore': ['Datastore', gettext('Tape Restore')], 'unload-media': [gettext('Drive'), gettext('Unload Media')], + 'unmount-device': [gettext('Datastore'), gettext('Un-mount Device')], verificationjob: [gettext('Verify Job'), gettext('Scheduled Verification')], verify: ['Datastore', gettext('Verification')], verify_group: ['Group', gettext('Verification')], -- 2.39.5 From g.goller at proxmox.com Tue Nov 26 11:31:56 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 26 Nov 2024 11:31:56 +0100 Subject: [pbs-devel] [PATCH widget-toolkit v4 1/7] utils: add base64 conversion helper In-Reply-To: References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-2-g.goller@proxmox.com> Message-ID: On 17.09.2024 09:37, Thomas Lamprecht wrote: >On 13/09/2024 15:10, Gabriel Goller wrote: >> Add helper functions to convert from a utf8 string to a base64 string >> and vice-versa. Using the TextEncoder/TextDecoder we can support unicode >> such as emojis as well [0]. >> >> [0]: https://developer.mozilla.org/en-US/docs/Glossary/Base64#the_unicode_problem >> >> Signed-off-by: Gabriel Goller > >Reviewed-by: Thomas Lamprecht > >One small nit inline though. > >> --- >> src/Utils.js | 18 ++++++++++++++++++ >> 1 file changed, 18 insertions(+) >> >> diff --git a/src/Utils.js b/src/Utils.js >> index 7dd034a5e56f..3badb6aaf606 100644 >> --- a/src/Utils.js >> +++ b/src/Utils.js >> @@ -1356,6 +1356,24 @@ utilities: { >> ); >> }, >> >> + // Convert utf-8 string to base64. >> + // This also escapes unicode characters such as emojis. >> + utf8ToBase64: function(string) { >> + let bytes = new TextEncoder().encode(string); >> + const escapedString = Array.from(bytes, (byte) => >> + String.fromCodePoint(byte), >> + ).join(""); > >FWIW this could be a bit shorter by using map (which typed arrays >also support [0]): > > const escapedString = bytes.map(b => String.fromCodePoint(b)).join(''); > >But for that we really need no new revision. > >[0]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/map I just tried this and it sadly doesn't work. `String.fromCodePoint` returns a string, which can't be stored in the `Uint8Array` which is `bytes`. From g.goller at proxmox.com Tue Nov 26 11:34:22 2024 From: g.goller at proxmox.com (Gabriel Goller) Date: Tue, 26 Nov 2024 11:34:22 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: add onlineHelp for consent-banner option Message-ID: <20241126103422.145926-1-g.goller@proxmox.com> Add onlineHelp link to the consent-banner docs section in the popup when inserting the consent-banner text. Reported-by: Thomas Lamprecht Signed-off-by: Gabriel Goller --- docs/gui.rst | 2 ++ www/config/NodeOptionView.js | 1 + 2 files changed, 3 insertions(+) diff --git a/docs/gui.rst b/docs/gui.rst index acecb8bc7193..7e59ac1024bc 100644 --- a/docs/gui.rst +++ b/docs/gui.rst @@ -40,6 +40,8 @@ Proxmox Backup Server supports various languages and authentication back ends .. note:: For convenience, you can save the username on the client side, by selecting the "Save User name" checkbox at the bottom of the window. +.. _consent_banner: + Consent Banner ^^^^^^^^^^^^^^ diff --git a/www/config/NodeOptionView.js b/www/config/NodeOptionView.js index 35938f9a3781..c327356f7f24 100644 --- a/www/config/NodeOptionView.js +++ b/www/config/NodeOptionView.js @@ -59,6 +59,7 @@ Ext.define('PBS.NodeOptionView', { name: 'consent-text', text: gettext('Consent Text'), deleteEmpty: true, + onlineHelp: 'consent_banner', }, ], }); -- 2.39.5 From f.gruenbichler at proxmox.com Tue Nov 26 11:38:12 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Tue, 26 Nov 2024 11:38:12 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v3] reuse-datastore: avoid creating another prune job In-Reply-To: <20241126095428.101682-1-g.goller@proxmox.com> References: <20241126095428.101682-1-g.goller@proxmox.com> Message-ID: <1732617445.l0pvxi3hp3.astroid@yuna.none> thanks! On November 26, 2024 10:54 am, Gabriel Goller wrote: > If a datastore with a prune job is removed, the prune job is preserverd > as it is stored in /etc/proxmox-backup/prune.cfg. We also create a > default prune job for every datastore ? this means that when reusing a > datastore that previously existed, you end up with duplicate prune jobs. > To avoid this we check if a prune job already exists, and when it does, > we refrain from creating the default one. (We also check if specific > keep-options have been added, if yes, then we create the job > nevertheless.) > > Reported-by: Fabian Gr?nbichler > Signed-off-by: Gabriel Goller > --- > > v3, thanks @Christian and @Fabian: > - don't rely on default-prune-jobs but check all > - check if specific keep-options have been added > > v2, thanks @Christian: > - convert if-statement to inline condition > > src/api2/config/datastore.rs | 39 +++++++++++++++++++----------------- > src/api2/config/prune.rs | 11 ++++++++++ > 2 files changed, 32 insertions(+), 18 deletions(-) > > diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs > index 9f2dac4b22ee..121222c40396 100644 > --- a/src/api2/config/datastore.rs > +++ b/src/api2/config/datastore.rs > @@ -24,7 +24,7 @@ use crate::api2::admin::{ > datastore::do_mount_device, prune::list_prune_jobs, sync::list_config_sync_jobs, > verify::list_verification_jobs, > }; > -use crate::api2::config::prune::{delete_prune_job, do_create_prune_job}; > +use crate::api2::config::prune::{delete_prune_job, do_create_prune_job, has_prune_job}; > use crate::api2::config::sync::delete_sync_job; > use crate::api2::config::tape_backup_job::{delete_tape_backup_job, list_tape_backup_jobs}; > use crate::api2::config::verify::delete_verification_job; > @@ -204,23 +204,26 @@ pub fn create_datastore( > let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; > let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI; > > - let prune_job_config = config.prune_schedule.as_ref().map(|schedule| { > - let mut id = format!("default-{}-{}", config.name, Uuid::generate()); > - id.truncate(32); > - > - PruneJobConfig { > - id, > - store: config.name.clone(), > - comment: None, > - disable: false, > - schedule: schedule.clone(), > - options: PruneJobOptions { > - keep: config.keep.clone(), > - max_depth: None, > - ns: None, > - }, > - } > - }); > + let mut prune_job_config = None; > + if config.keep.keeps_something() || !has_prune_job(&config.name)? { > + prune_job_config = config.prune_schedule.as_ref().map(|schedule| { > + let mut id = format!("default-{}-{}", config.name, Uuid::generate()); > + id.truncate(32); > + > + PruneJobConfig { > + id, > + store: config.name.clone(), > + comment: None, > + disable: false, > + schedule: schedule.clone(), > + options: PruneJobOptions { > + keep: config.keep.clone(), > + max_depth: None, > + ns: None, > + }, > + } > + }); > + } > > // clearing prune settings in the datastore config, as they are now handled by prune jobs > let config = DataStoreConfig { > diff --git a/src/api2/config/prune.rs b/src/api2/config/prune.rs > index ce7b8ce565ce..b433c248ac5a 100644 > --- a/src/api2/config/prune.rs > +++ b/src/api2/config/prune.rs > @@ -77,6 +77,17 @@ pub fn do_create_prune_job(config: PruneJobConfig) -> Result<(), Error> { > Ok(()) > } > > +pub fn has_prune_job(datastore: &str) -> Result { > + let (section_config, _digest) = prune::config()?; > + for (_, (_, job_config)) in section_config.sections { > + let job_config: PruneJobConfig = serde_json::from_value(job_config)?; > + if job_config.store == datastore { > + return Ok(true); > + } > + } > + Ok(false) > +} > + > #[api( > protected: true, > input: { > -- > 2.39.5 > > From f.gruenbichler at proxmox.com Tue Nov 26 11:39:42 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Tue, 26 Nov 2024 11:39:42 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: utils: add task description for mounting/unmounting In-Reply-To: <20241126101545.1105047-1-d.csapak@proxmox.com> References: <20241126101545.1105047-1-d.csapak@proxmox.com> Message-ID: <1732617544.qdqoqeoh5f.astroid@yuna.none> On November 26, 2024 11:15 am, Dominik Csapak wrote: > Signed-off-by: Dominik Csapak > --- > www/Utils.js | 2 ++ > 1 file changed, 2 insertions(+) > > diff --git a/www/Utils.js b/www/Utils.js > index 6bae9b709..484c607bd 100644 > --- a/www/Utils.js > +++ b/www/Utils.js > @@ -417,6 +417,7 @@ Ext.define('PBS.Utils', { > 'label-media': [gettext('Drive'), gettext('Label Media')], > 'load-media': (type, id) => PBS.Utils.render_drive_load_media_id(id, gettext('Load Media')), > logrotate: [null, gettext('Log Rotation')], > + 'mount-device': [gettext('Datastore'), gettext('Mount Device')], > prune: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Prune')), > prunejob: (type, id) => PBS.Utils.render_prune_job_worker_id(id, gettext('Prune Job')), > reader: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Read Objects')), > @@ -427,6 +428,7 @@ Ext.define('PBS.Utils', { > 'tape-backup-job': (type, id) => PBS.Utils.render_tape_backup_id(id, gettext('Tape Backup Job')), > 'tape-restore': ['Datastore', gettext('Tape Restore')], > 'unload-media': [gettext('Drive'), gettext('Unload Media')], > + 'unmount-device': [gettext('Datastore'), gettext('Un-mount Device')], nit: I'd prefer "Unmount" instead of "Un-mount". we don't have a single "Un-" in the whole codebase AFAICT ;) > verificationjob: [gettext('Verify Job'), gettext('Scheduled Verification')], > verify: ['Datastore', gettext('Verification')], > verify_group: ['Group', gettext('Verification')], > -- > 2.39.5 > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From d.csapak at proxmox.com Tue Nov 26 11:45:49 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Tue, 26 Nov 2024 11:45:49 +0100 Subject: [pbs-devel] [PATCH proxmox-backup v2] ui: utils: add task description for mounting/unmounting Message-ID: <20241126104549.1288016-1-d.csapak@proxmox.com> Signed-off-by: Dominik Csapak --- changes from v1: * change 'Un-mount' to 'Unmount' www/Utils.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/www/Utils.js b/www/Utils.js index 6bae9b709..52d7e1d37 100644 --- a/www/Utils.js +++ b/www/Utils.js @@ -417,6 +417,7 @@ Ext.define('PBS.Utils', { 'label-media': [gettext('Drive'), gettext('Label Media')], 'load-media': (type, id) => PBS.Utils.render_drive_load_media_id(id, gettext('Load Media')), logrotate: [null, gettext('Log Rotation')], + 'mount-device': [gettext('Datastore'), gettext('Mount Device')], prune: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Prune')), prunejob: (type, id) => PBS.Utils.render_prune_job_worker_id(id, gettext('Prune Job')), reader: (type, id) => PBS.Utils.render_datastore_worker_id(id, gettext('Read Objects')), @@ -427,6 +428,7 @@ Ext.define('PBS.Utils', { 'tape-backup-job': (type, id) => PBS.Utils.render_tape_backup_id(id, gettext('Tape Backup Job')), 'tape-restore': ['Datastore', gettext('Tape Restore')], 'unload-media': [gettext('Drive'), gettext('Unload Media')], + 'unmount-device': [gettext('Datastore'), gettext('Unmount Device')], verificationjob: [gettext('Verify Job'), gettext('Scheduled Verification')], verify: ['Datastore', gettext('Verification')], verify_group: ['Group', gettext('Verification')], -- 2.39.5 From d.csapak at proxmox.com Tue Nov 26 11:46:08 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Tue, 26 Nov 2024 11:46:08 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] ui: utils: add task description for mounting/unmounting In-Reply-To: <1732617544.qdqoqeoh5f.astroid@yuna.none> References: <20241126101545.1105047-1-d.csapak@proxmox.com> <1732617544.qdqoqeoh5f.astroid@yuna.none> Message-ID: <69ffc8b5-896b-4982-8be1-846d90e8940f@proxmox.com> done, sent a v2: https://lore.proxmox.com/pbs-devel/20241126104549.1288016-1-d.csapak at proxmox.com/ From t.lamprecht at proxmox.com Tue Nov 26 11:59:00 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 26 Nov 2024 11:59:00 +0100 Subject: [pbs-devel] applied-series: [PATCH proxmox-backup v3 11/14] api: notification: add API routes for webhook targets In-Reply-To: <20241108144124.273550-12-l.wagner@proxmox.com> References: <20241108144124.273550-1-l.wagner@proxmox.com> <20241108144124.273550-12-l.wagner@proxmox.com> Message-ID: <07e41e1f-137f-4d0d-a757-c08997bfba08@proxmox.com> Am 08.11.24 um 15:41 schrieb Lukas Wagner: > Copied and adapted from the Gotify ones. > > Signed-off-by: Lukas Wagner > Tested-By: Stefan Hanreich > --- > src/api2/config/notifications/mod.rs | 2 + > src/api2/config/notifications/webhook.rs | 175 +++++++++++++++++++++++ > 2 files changed, 177 insertions(+) > create mode 100644 src/api2/config/notifications/webhook.rs > > applied the four remaining PBS patches, thanks! FYI I used the `--whitespace=fix` git apply/am option to drop a few trailing whitespaces in the docs patches, just noting to avoid any surprise why this cannot be directly rebased on. From t.lamprecht at proxmox.com Tue Nov 26 11:59:43 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 26 Nov 2024 11:59:43 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup v2] ui: utils: add task description for mounting/unmounting In-Reply-To: <20241126104549.1288016-1-d.csapak@proxmox.com> References: <20241126104549.1288016-1-d.csapak@proxmox.com> Message-ID: <8f595f18-b4cb-4358-91c7-d0cc1d915d71@proxmox.com> Am 26.11.24 um 11:45 schrieb Dominik Csapak: > Signed-off-by: Dominik Csapak > --- > changes from v1: > * change 'Un-mount' to 'Unmount' > > www/Utils.js | 2 ++ > 1 file changed, 2 insertions(+) > > applied, thanks! From s.hanreich at proxmox.com Tue Nov 26 12:04:28 2024 From: s.hanreich at proxmox.com (Stefan Hanreich) Date: Tue, 26 Nov 2024 12:04:28 +0100 Subject: [pbs-devel] [PATCH proxmox-backup 1/1] client: fix example commands for client usage Message-ID: <20241126110428.71348-1-s.hanreich@proxmox.com> The example commands in the Change Detection Mode / File Exclusion section are missing the command in the client invocation. Add the backup command to the examples, so they are actually valid. Signed-off-by: Stefan Hanreich --- docs/backup-client.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/backup-client.rst b/docs/backup-client.rst index 3c6941ae..f2b5273c 100644 --- a/docs/backup-client.rst +++ b/docs/backup-client.rst @@ -272,13 +272,13 @@ parameter. For example: .. code-block:: console - # proxmox-backup-client backup.pxar:./linux --exclude /usr + # proxmox-backup-client backup backup.pxar:./linux --exclude /usr Multiple paths can be excluded like this: .. code-block:: console - # proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust + # proxmox-backup-client backup backup.pxar:./linux --exclude=/usr --exclude=/rust .. _client_change_detection_mode: @@ -329,7 +329,7 @@ mode: .. code-block:: console - # proxmox-backup-client backup.pxar:./linux --change-detection-mode=metadata + # proxmox-backup-client backup backup.pxar:./linux --change-detection-mode=metadata .. _client_encryption: -- 2.39.5 From t.lamprecht at proxmox.com Tue Nov 26 12:06:24 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 26 Nov 2024 12:06:24 +0100 Subject: [pbs-devel] [PATCH widget-toolkit v4 1/7] utils: add base64 conversion helper In-Reply-To: References: <20240913131033.396324-1-g.goller@proxmox.com> <20240913131033.396324-2-g.goller@proxmox.com> Message-ID: <738f24b2-c679-43f7-8fd0-e242676dcc4e@proxmox.com> Am 26.11.24 um 11:31 schrieb Gabriel Goller: > On 17.09.2024 09:37, Thomas Lamprecht wrote: >> FWIW this could be a bit shorter by using map (which typed arrays >> also support [0]): >> >> const escapedString = bytes.map(b => String.fromCodePoint(b)).join(''); >> >> But for that we really need no new revision. >> >> [0]: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/TypedArray/map > > I just tried this and it sadly doesn't work. `String.fromCodePoint` > returns a string, which can't be stored in the `Uint8Array` which is `bytes`. You are right, sorry for the misdirection. One would need the non-typed array method for that, i.e.: var escapedString = Array.prototype.map.call(bytes, b => String.fromCodePoint(b)).join(''); Which really is not nicer than what you did, so let's go with that. From t.lamprecht at proxmox.com Tue Nov 26 12:08:16 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 26 Nov 2024 12:08:16 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup] ui: add onlineHelp for consent-banner option In-Reply-To: <20241126103422.145926-1-g.goller@proxmox.com> References: <20241126103422.145926-1-g.goller@proxmox.com> Message-ID: <09f5612e-b058-4d39-b799-c584b4e9ab7d@proxmox.com> Am 26.11.24 um 11:34 schrieb Gabriel Goller: > Add onlineHelp link to the consent-banner docs section in the popup when > inserting the consent-banner text. > > Reported-by: Thomas Lamprecht > Signed-off-by: Gabriel Goller > --- > docs/gui.rst | 2 ++ > www/config/NodeOptionView.js | 1 + > 2 files changed, 3 insertions(+) > > applied, thanks! From f.gruenbichler at proxmox.com Tue Nov 26 12:09:09 2024 From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=) Date: Tue, 26 Nov 2024 12:09:09 +0100 Subject: [pbs-devel] applied: [PATCH proxmox-backup 1/1] client: fix example commands for client usage In-Reply-To: <20241126110428.71348-1-s.hanreich@proxmox.com> References: <20241126110428.71348-1-s.hanreich@proxmox.com> Message-ID: <1732619341.smylkvjjfx.astroid@yuna.none> thanks! On November 26, 2024 12:04 pm, Stefan Hanreich wrote: > The example commands in the Change Detection Mode / File Exclusion > section are missing the command in the client invocation. Add the > backup command to the examples, so they are actually valid. > > Signed-off-by: Stefan Hanreich > --- > docs/backup-client.rst | 6 +++--- > 1 file changed, 3 insertions(+), 3 deletions(-) > > diff --git a/docs/backup-client.rst b/docs/backup-client.rst > index 3c6941ae..f2b5273c 100644 > --- a/docs/backup-client.rst > +++ b/docs/backup-client.rst > @@ -272,13 +272,13 @@ parameter. For example: > > .. code-block:: console > > - # proxmox-backup-client backup.pxar:./linux --exclude /usr > + # proxmox-backup-client backup backup.pxar:./linux --exclude /usr > > Multiple paths can be excluded like this: > > .. code-block:: console > > - # proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust > + # proxmox-backup-client backup backup.pxar:./linux --exclude=/usr --exclude=/rust > > .. _client_change_detection_mode: > > @@ -329,7 +329,7 @@ mode: > > .. code-block:: console > > - # proxmox-backup-client backup.pxar:./linux --change-detection-mode=metadata > + # proxmox-backup-client backup backup.pxar:./linux --change-detection-mode=metadata > > .. _client_encryption: > > -- > 2.39.5 > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel > > > From t.lamprecht at proxmox.com Tue Nov 26 12:09:57 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Tue, 26 Nov 2024 12:09:57 +0100 Subject: [pbs-devel] applied: [PATCH proxmox] notify: remove irritating 'html template not found' log message In-Reply-To: <20241126093025.44591-1-l.wagner@proxmox.com> References: <20241126093025.44591-1-l.wagner@proxmox.com> Message-ID: <3407d59d-480c-407a-bfc4-f4cc28f49a9e@proxmox.com> Am 26.11.24 um 10:30 schrieb Lukas Wagner: > The proxmox-notify crate can render notification text based on two > different templates, plaintext and html. The html template is at the > moment only used for email-based notifications. If we try to render > a html-formatted message but there is no html template, we try to > fall back to the plaintext template and wrap the rendered message > in
 tags.
> As a preparation for user-supplied/overridden templates, I added a log
> message "html template not found, falling back to plaintext ..." to
> educate the user about this behavior.
> 
> In Proxmox Backup Server, we only ship plaintext templates at the
> moment, meaning that this log message will be shown for every single
> (email) notification that is sent out. This might be a bit confusing,
> because the log message can be interpreted as an error, which it isn't.
> 
> This commit removes the log message completely for now. Once we add
> support for user-overridable notification templates we could consider
> adding it back it, but maybe phrased a bit differently, to avoid it
> being interpreted as an error.
> 
> Signed-off-by: Lukas Wagner 
> ---
>  proxmox-notify/src/renderer/mod.rs | 1 -
>  1 file changed, 1 deletion(-)
> 
>

applied, thanks!

Anything pending for the notify crate? As else I'd do a bump now already.



From t.lamprecht at proxmox.com  Tue Nov 26 12:12:42 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 12:12:42 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/1] client: fix example
 commands for client usage
In-Reply-To: <20241126110428.71348-1-s.hanreich@proxmox.com>
References: <20241126110428.71348-1-s.hanreich@proxmox.com>
Message-ID: <645e40b1-5b8a-4620-b3d3-fb1c19cf87bd@proxmox.com>

Am 26.11.24 um 12:04 schrieb Stefan Hanreich:
> The example commands in the Change Detection Mode / File Exclusion
> section are missing the command in the client invocation. Add the
> backup command to the examples, so they are actually valid.
> 
> Signed-off-by: Stefan Hanreich 
> ---
>  docs/backup-client.rst | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/docs/backup-client.rst b/docs/backup-client.rst
> index 3c6941ae..f2b5273c 100644
> --- a/docs/backup-client.rst
> +++ b/docs/backup-client.rst
> @@ -272,13 +272,13 @@ parameter. For example:
>  
>  .. code-block:: console
>  
> -    # proxmox-backup-client backup.pxar:./linux --exclude /usr
> +    # proxmox-backup-client backup backup.pxar:./linux --exclude /usr
>  
>  Multiple paths can be excluded like this:
>  
>  .. code-block:: console
>  
> -    # proxmox-backup-client backup.pxar:./linux --exclude=/usr --exclude=/rust
> +    # proxmox-backup-client backup backup.pxar:./linux --exclude=/usr --exclude=/rust
>  
>  .. _client_change_detection_mode:
>  
> @@ -329,7 +329,7 @@ mode:
>  
>  .. code-block:: console
>  
> -    # proxmox-backup-client backup.pxar:./linux --change-detection-mode=metadata
> +    # proxmox-backup-client backup backup.pxar:./linux --change-detection-mode=metadata

Fabian already applied this, but I'd also change the target pxar file name,
using "backup" for that is probably the the source  of the original mistake
and is IMO quite likely to confuse users, double words are often skipped
unconsciously by the brain (for some proven if they have missed the double
the in the second line ;)

>  
>  .. _client_encryption:
>  




From c.ebner at proxmox.com  Tue Nov 26 12:14:25 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Tue, 26 Nov 2024 12:14:25 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: use same label for removable
 datastore created from disk
Message-ID: <20241126111425.171791-1-c.ebner@proxmox.com>

The `Add datastore` window labels the flag for creating a removable
datastore as `Removable datastore`, while creating the datastore via the
storage/disks interface will refer to it as `is removable`.

Use the same `Removable datastore` as label for both locations.

Signed-off-by: Christian Ebner 
---
 www/window/CreateDirectory.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/window/CreateDirectory.js b/www/window/CreateDirectory.js
index 38d6979d9..56dae6c8f 100644
--- a/www/window/CreateDirectory.js
+++ b/www/window/CreateDirectory.js
@@ -56,7 +56,7 @@ Ext.define('PBS.window.CreateDirectory', {
 	{
 	    xtype: 'proxmoxcheckbox',
 	    name: 'removable-datastore',
-	    fieldLabel: gettext('is removable'),
+	    fieldLabel: gettext('Removable datastore'),
 	},
     ],
 });
-- 
2.39.5




From t.lamprecht at proxmox.com  Tue Nov 26 12:25:19 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 12:25:19 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] ui: use same label for
 removable datastore created from disk
In-Reply-To: <20241126111425.171791-1-c.ebner@proxmox.com>
References: <20241126111425.171791-1-c.ebner@proxmox.com>
Message-ID: <9c728573-8037-4f27-b80b-f6b16f1aaaf4@proxmox.com>

Am 26.11.24 um 12:14 schrieb Christian Ebner:
> The `Add datastore` window labels the flag for creating a removable
> datastore as `Removable datastore`, while creating the datastore via the
> storage/disks interface will refer to it as `is removable`.
> 
> Use the same `Removable datastore` as label for both locations.
> 
> Signed-off-by: Christian Ebner 
> ---
>  www/window/CreateDirectory.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Tue Nov 26 12:24:49 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 12:24:49 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup 1/1] docs: explain some
 further caveats of the change detection modes
In-Reply-To: <20241126085502.77438-1-c.ebner@proxmox.com>
References: <20241126085502.77438-1-c.ebner@proxmox.com>
Message-ID: <51c2fda9-f509-46b0-9162-c34c3057ca84@proxmox.com>

Am 26.11.24 um 09:55 schrieb Christian Ebner:
> Explain that the change detection mode data makes sure that no files
> are considered reusable, even if their metadata might match and that
> the use of ctime and inode number is not possible for detection of
> unchanged files if the filesystem was synced to a temporary location,
> therefore the mtime and size are used for detection.
> 
> Also note the reduced deduplication when storing snaphshots with
> mixed archive formats on the same datastore.
> 
> Further, mention the backwards compatibility to older version of the
> Proxmox Backup Server.
> 
> Suggested-by: Thomas Lamprecht 
> Signed-off-by: Christian Ebner 
> ---
>  docs/technical-overview.rst | 36 +++++++++++++++++++++++++++++++++---
>  1 file changed, 33 insertions(+), 3 deletions(-)
> 
>

applied, thanks!




From s.hanreich at proxmox.com  Tue Nov 26 12:29:07 2024
From: s.hanreich at proxmox.com (Stefan Hanreich)
Date: Tue, 26 Nov 2024 12:29:07 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/1] client: change disk name
 from backup to disk
Message-ID: <20241126112907.86817-1-s.hanreich@proxmox.com>

The same word occuring twice in succession can lead to the brain
skipping the second occurence. Change the name of the archives in the
example from backup.pxar to disk.pxar to avoid that effect.

Suggested-by: Thomas Lamprecht 
Signed-off-by: Stefan Hanreich 
---
 docs/backup-client.rst | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/docs/backup-client.rst b/docs/backup-client.rst
index f2b5273c..b9e91017 100644
--- a/docs/backup-client.rst
+++ b/docs/backup-client.rst
@@ -272,13 +272,13 @@ parameter. For example:
 
 .. code-block:: console
 
-    # proxmox-backup-client backup backup.pxar:./linux --exclude /usr
+    # proxmox-backup-client backup disk.pxar:./linux --exclude /usr
 
 Multiple paths can be excluded like this:
 
 .. code-block:: console
 
-    # proxmox-backup-client backup backup.pxar:./linux --exclude=/usr --exclude=/rust
+    # proxmox-backup-client backup disk.pxar:./linux --exclude=/usr --exclude=/rust
 
 .. _client_change_detection_mode:
 
@@ -329,7 +329,7 @@ mode:
 
 .. code-block:: console
 
-    # proxmox-backup-client backup backup.pxar:./linux --change-detection-mode=metadata
+    # proxmox-backup-client backup disk.pxar:./linux --change-detection-mode=metadata
 
 .. _client_encryption:
 
-- 
2.39.5



From s.hanreich at proxmox.com  Tue Nov 26 12:29:35 2024
From: s.hanreich at proxmox.com (Stefan Hanreich)
Date: Tue, 26 Nov 2024 12:29:35 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/1] client: fix example
 commands for client usage
In-Reply-To: <645e40b1-5b8a-4620-b3d3-fb1c19cf87bd@proxmox.com>
References: <20241126110428.71348-1-s.hanreich@proxmox.com>
 <645e40b1-5b8a-4620-b3d3-fb1c19cf87bd@proxmox.com>
Message-ID: 



On 11/26/24 12:12, Thomas Lamprecht wrote:
> Fabian already applied this, but I'd also change the target pxar file name,
> using "backup" for that is probably the the source  of the original mistake
> and is IMO quite likely to confuse users, double words are often skipped
> unconsciously by the brain (for some proven if they have missed the double
> the in the second line ;)

that is true indeed, sent another patch ;)



From c.ebner at proxmox.com  Tue Nov 26 12:34:59 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Tue, 26 Nov 2024 12:34:59 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/1] client: change disk name
 from backup to disk
In-Reply-To: <20241126112907.86817-1-s.hanreich@proxmox.com>
References: <20241126112907.86817-1-s.hanreich@proxmox.com>
Message-ID: <938543f3-8297-4c41-b053-030475958518@proxmox.com>

On 11/26/24 12:29, Stefan Hanreich wrote:
> The same word occuring twice in succession can lead to the brain
> skipping the second occurence. Change the name of the archives in the
> example from backup.pxar to disk.pxar to avoid that effect.

nit: I would suggest to use `archive-name.pxar` instead of `disk.pxar` 
as this is a file level backup creating a pxar archive.



From h.laimer at proxmox.com  Tue Nov 26 12:43:23 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 12:43:23 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 5/5] ui: allow resetting
 unmounting maintenance
In-Reply-To: <20241126114323.105838-1-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
Message-ID: <20241126114323.105838-6-h.laimer@proxmox.com>

Signed-off-by: Hannes Laimer 
---
optional, just added it in case we want it

 www/window/MaintenanceOptions.js | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js
index 896d6a58e..8a88ba2b7 100644
--- a/www/window/MaintenanceOptions.js
+++ b/www/window/MaintenanceOptions.js
@@ -89,12 +89,11 @@ Ext.define('PBS.window.MaintenanceOptions', {
         let unmounting = options['maintenance-type'] === 'unmount';
         let defaultType = options['maintenance-type'] === '__default__';
         if (unmounting) {
-            options['maintenance-type'] = '';
+            options['maintenance-type'] = gettext('Unmounting');
         }
 
 	me.callParent([options]);
 
-        me.lookupReference('type-field').setDisabled(unmounting);
         me.lookupReference('message-field').setDisabled(unmounting || defaultType);
     },
 });
-- 
2.39.5




From h.laimer at proxmox.com  Tue Nov 26 12:43:18 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 12:43:18 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 0/5] removable datastore follow-up
Message-ID: <20241126114323.105838-1-h.laimer@proxmox.com>

Some follow-ups for removable datastore, including
 * fix sync with older pbs versions
 * add Sys.Modify permission for endpoints
 * add docs
 * allow resetting 'unmount' maintenance mode trough API/CLI
    last patch also allows it through UI, just put it in, in case
    we want to allow that 


Hannes Laimer (5):
  api: mainatenance: allow setting of maintenance mode if 'unmounting'
  api: add Sys.Modify on /system/disks as permission to endpoints
    handling removable datastores
  api: types: add 'mount_status' to schema
  docs: add information for removable datastores
  ui: allow resetting unmounting maintenance

 docs/storage.rst                 | 31 ++++++++++++++++++++++++++-----
 pbs-api-types/src/datastore.rs   | 10 +++++++++-
 src/api2/admin/datastore.rs      | 12 +++++++++---
 src/api2/config/datastore.rs     | 12 +++++++++---
 www/window/MaintenanceOptions.js |  3 +--
 5 files changed, 54 insertions(+), 14 deletions(-)

-- 
2.39.5




From h.laimer at proxmox.com  Tue Nov 26 12:43:19 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 12:43:19 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/5] api: mainatenance: allow
 setting of maintenance mode if 'unmounting'
In-Reply-To: <20241126114323.105838-1-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
Message-ID: <20241126114323.105838-2-h.laimer@proxmox.com>

So it is possible to reset it after a failed unmount, or abort an
unmount task by restting it through the API.

Signed-off-by: Hannes Laimer 
---
 pbs-api-types/src/datastore.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
index 4927f3724..203e75e38 100644
--- a/pbs-api-types/src/datastore.rs
+++ b/pbs-api-types/src/datastore.rs
@@ -416,7 +416,7 @@ impl DataStoreConfig {
             Some(MaintenanceType::ReadOnly) => { /* always OK  */ }
             Some(MaintenanceType::Offline) => { /* always OK  */ }
             Some(MaintenanceType::Unmount) => {
-                bail!("datastore is being unmounted");
+                /* used to reset it after failed unmount, or alternative for aborting unmount task */
             }
             Some(MaintenanceType::Delete) => {
                 match new_type {
-- 
2.39.5




From h.laimer at proxmox.com  Tue Nov 26 12:43:22 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 12:43:22 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 4/5] docs: add information for
 removable datastores
In-Reply-To: <20241126114323.105838-1-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
Message-ID: <20241126114323.105838-5-h.laimer@proxmox.com>

Specifically about jobs and how they behave when the datastore is not
mounted, how to create and use deivices with multiple datatstores on
multiple PBS instances and options how to handle failed unmounts.

Signed-off-by: Hannes Laimer 
---
 docs/storage.rst | 31 ++++++++++++++++++++++++++-----
 1 file changed, 26 insertions(+), 5 deletions(-)

diff --git a/docs/storage.rst b/docs/storage.rst
index 361af4420..5cd8704c4 100644
--- a/docs/storage.rst
+++ b/docs/storage.rst
@@ -176,16 +176,32 @@ datastores, should be either ``ext4`` or ``xfs``.  It is also possible to create
 on completely unused disks through "Administration" > "Disks / Storage" > "Directory",
 using this method the disk will be partitioned and formatted automatically for the datastore.
 
-Devices with only one datastore on them will be mounted automatically. It is possible to create a
-removable datastore on one PBS and use it on multiple instances, the device just has to be added
-on each instance as a removable datastore by checking "reuse datastore" on creation.
-If the device already contains a datastore at the specified path it'll just be added as
-a new datastore to the PBS instance and will be mounted whenever plugged in. Unmounting has
+Devices with only one datastore on them will be mounted automatically. Unmounting has
 to be done through the UI by clicking "Unmount" on the summary page or using the CLI.
+If unmounting should fail, the reason is logged in the unmount-task, and the datastore
+will stay in maintenance mode ``unmounting``, which prevents any IO operations. If that should
+happen, the maintenace mode has to be reset manually using:
+
+.. code-block:: console
+
+  # proxmox-backup-manager datastore update --maintenance-mode offline
+
+to prevent any IO, or to clear it use:
+
+.. code-block:: console
+
+  # proxmox-backup-manager datastore update --delete maintenance-mode
+
 
 A single device can house multiple datastores, they only limitation is that they are not
 allowed to be nested.
 
+Removable datastores are created on the the device with the given relative path that is specified
+on creation. In order to use a datastore on multiple PBS instances, it has to be created on one,
+and added with ``Reuse existing datastore`` checked on the others. The path you set on creation
+is how multiple datastores on a signle device are identified. So When adding on a new PBS instance,
+it has to match what was set on creation.
+
 .. code-block:: console
 
   # proxmox-backup-manager datastore unmount store1
@@ -202,6 +218,11 @@ All datastores present on a device can be listed using ``proxmox-backup-debug``.
   # proxmox-backup-debug inspect device /dev/...
 
 
+Verify jobs are skipped if the removable datastore should not be mounted when they are scheduled,
+Sync jobs start, but fail with an error saying the datastore was not mounted. The reason is that
+syncs not happening as schduled should at least be noticable. GC and pruning, like verification,
+is skipped without a failed task if the datastore should not be mounted.
+
 
 Managing Datastores
 ^^^^^^^^^^^^^^^^^^^
-- 
2.39.5




From h.laimer at proxmox.com  Tue Nov 26 12:43:20 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 12:43:20 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/5] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <20241126114323.105838-1-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
Message-ID: <20241126114323.105838-3-h.laimer@proxmox.com>

Suggested-by: Fabian Gr?nbichler 
Signed-off-by: Hannes Laimer 
---
 src/api2/admin/datastore.rs  | 12 +++++++++---
 src/api2/config/datastore.rs | 12 +++++++++---
 2 files changed, 18 insertions(+), 6 deletions(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 1c939bc20..cae7eb89c 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -45,7 +45,7 @@ use pbs_api_types::{
     BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
     IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
     PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
-    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
+    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA,
     VERIFICATION_OUTDATED_AFTER_SCHEMA,
 };
 use pbs_client::pxar::{create_tar, create_zip};
@@ -2512,7 +2512,10 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
         schema: UPID_SCHEMA,
     },
     access: {
-        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
+        ]),
     },
 )]
 /// Mount removable datastore.
@@ -2625,7 +2628,10 @@ fn do_unmount_device(
         schema: UPID_SCHEMA,
     },
     access: {
-        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
+            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
+        ]),
     }
 )]
 /// Unmount a removable device that is associated with the datastore
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index 121222c40..359b676a5 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -14,7 +14,7 @@ use proxmox_uuid::Uuid;
 use pbs_api_types::{
     Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
     MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
-    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
+    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY,
     PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
 };
 use pbs_config::BackupLockGuard;
@@ -173,7 +173,10 @@ pub(crate) fn do_create_datastore(
         },
     },
     access: {
-        permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
+            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
+        ]),
     },
 )]
 /// Create new datastore config.
@@ -551,7 +554,10 @@ pub fn update_datastore(
         },
     },
     access: {
-        permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
+            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
+        ]),
     },
     returns: {
         schema: UPID_SCHEMA,
-- 
2.39.5




From h.laimer at proxmox.com  Tue Nov 26 12:43:21 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 12:43:21 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 3/5] api: types: add
 'mount_status' to schema
In-Reply-To: <20241126114323.105838-1-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
Message-ID: <20241126114323.105838-4-h.laimer@proxmox.com>

... and deserialize with default if field is missing in data.

Reported-by: Aaron Lauterer 
Fixes: 76609915d6 ("pbs-api-types: add mount_status field to DataStoreListItem")
Signed-off-by: Hannes Laimer 
---
 pbs-api-types/src/datastore.rs | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
index 203e75e38..90f1195bf 100644
--- a/pbs-api-types/src/datastore.rs
+++ b/pbs-api-types/src/datastore.rs
@@ -452,6 +452,9 @@ impl DataStoreConfig {
             optional: true,
             schema: SINGLE_LINE_COMMENT_SCHEMA,
         },
+        "mount-status": {
+            type: DataStoreMountStatus,
+        },
         maintenance: {
             optional: true,
             format: &ApiStringFormat::PropertyString(&MaintenanceMode::API_SCHEMA),
@@ -465,6 +468,7 @@ impl DataStoreConfig {
 pub struct DataStoreListItem {
     pub store: String,
     pub comment: Option,
+    #[serde(default)]
     pub mount_status: DataStoreMountStatus,
     /// If the datastore is in maintenance mode, information about it
     #[serde(skip_serializing_if = "Option::is_none")]
@@ -1447,6 +1451,9 @@ pub struct DataStoreStatus {
         store: {
             schema: DATASTORE_SCHEMA,
         },
+        "mount-status": {
+            type: DataStoreMountStatus,
+        },
         history: {
             type: Array,
             optional: true,
@@ -1471,6 +1478,7 @@ pub struct DataStoreStatusListItem {
     /// The available bytes of the underlying storage. (-1 on error)
     #[serde(skip_serializing_if = "Option::is_none")]
     pub avail: Option,
+    #[serde(default)]
     pub mount_status: DataStoreMountStatus,
     /// A list of usages of the past (last Month).
     #[serde(skip_serializing_if = "Option::is_none")]
-- 
2.39.5




From t.lamprecht at proxmox.com  Tue Nov 26 12:46:05 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 12:46:05 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup 1/1] client: change disk
 name from backup to disk
In-Reply-To: <20241126112907.86817-1-s.hanreich@proxmox.com>
References: <20241126112907.86817-1-s.hanreich@proxmox.com>
Message-ID: 

Am 26.11.24 um 12:29 schrieb Stefan Hanreich:
> The same word occuring twice in succession can lead to the brain
> skipping the second occurence. Change the name of the archives in the
> example from backup.pxar to disk.pxar to avoid that effect.
> 
> Suggested-by: Thomas Lamprecht 
> Signed-off-by: Stefan Hanreich 
> ---
>  docs/backup-client.rst | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
>

applied, with Chris' suggestion to use 'archive-name' squashed in and two
missing 'r' typos for occurring/occurrence in the commit message fixed
while at it, thanks!



From l.wagner at proxmox.com  Tue Nov 26 12:46:57 2024
From: l.wagner at proxmox.com (Lukas Wagner)
Date: Tue, 26 Nov 2024 12:46:57 +0100
Subject: [pbs-devel] applied: [PATCH proxmox] notify: remove irritating
 'html template not found' log message
In-Reply-To: <3407d59d-480c-407a-bfc4-f4cc28f49a9e@proxmox.com>
References: <20241126093025.44591-1-l.wagner@proxmox.com>
 <3407d59d-480c-407a-bfc4-f4cc28f49a9e@proxmox.com>
Message-ID: <77604e17-2e4c-46c4-8483-77106765f78d@proxmox.com>

On  2024-11-26 12:09, Thomas Lamprecht wrote:
> applied, thanks!
> 
> Anything pending for the notify crate? As else I'd do a bump now already.
> 
> 

As mentioned off-list, the only thing left open for proxmox-notify at the moment is

https://lore.proxmox.com/pve-devel/20241122100815.67255-1-l.wagner at proxmox.com/T/

Also requires a bump for proxmox-http, not sure if we want to do this at the moment
or later.

-- 
- Lukas



From s.ivanov at proxmox.com  Tue Nov 26 13:03:57 2024
From: s.ivanov at proxmox.com (Stoiko Ivanov)
Date: Tue, 26 Nov 2024 13:03:57 +0100
Subject: [pbs-devel] [PATCH] kernel: enable codepage 437 for vfat support
Message-ID: <20241126120357.389046-1-s.ivanov@proxmox.com>

ran into an issue when clicking on the ESP of a VM while trying
single-file restore.

the added config-config options are taken from config-6.5.13-6-pve
(the restore-image is still based on kernel 6.5) - and I tried copying
a small set around the needed options.

with the patch the ESP contents are shown successfully

Signed-off-by: Stoiko Ivanov 
---
 src/config-base | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/src/config-base b/src/config-base
index 1581b09..238d93c 100644
--- a/src/config-base
+++ b/src/config-base
@@ -144,6 +144,11 @@ CONFIG_ISO9660_FS=y
 CONFIG_NTFS3_FS=y
 CONFIG_MSDOS_FS=y
 CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
 
 # memory hotplug
 CONFIG_MEMORY_HOTPLUG=y
-- 
2.39.5




From f.gruenbichler at proxmox.com  Tue Nov 26 13:07:36 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 13:07:36 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/5] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <20241126114323.105838-3-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
 <20241126114323.105838-3-h.laimer@proxmox.com>
Message-ID: <1732622272.pgtz2hjshk.astroid@yuna.none>

On November 26, 2024 12:43 pm, Hannes Laimer wrote:
> Suggested-by: Fabian Gr?nbichler 
> Signed-off-by: Hannes Laimer 
> ---
>  src/api2/admin/datastore.rs  | 12 +++++++++---
>  src/api2/config/datastore.rs | 12 +++++++++---
>  2 files changed, 18 insertions(+), 6 deletions(-)
> 
> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
> index 1c939bc20..cae7eb89c 100644
> --- a/src/api2/admin/datastore.rs
> +++ b/src/api2/admin/datastore.rs
> @@ -45,7 +45,7 @@ use pbs_api_types::{
>      BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
>      IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
>      PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
> -    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
> +    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA,
>      VERIFICATION_OUTDATED_AFTER_SCHEMA,
>  };
>  use pbs_client::pxar::{create_tar, create_zip};
> @@ -2512,7 +2512,10 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
>          schema: UPID_SCHEMA,
>      },
>      access: {
> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
> +        permission: &Permission::And(&[
> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
> +        ]),

I am not 100% sure this part should require Sys.Modify.. somebody needs
to have set up the datastore already, just mounting seems benign in that
case?

>      },
>  )]
>  /// Mount removable datastore.
> @@ -2625,7 +2628,10 @@ fn do_unmount_device(
>          schema: UPID_SCHEMA,
>      },
>      access: {
> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
> +        permission: &Permission::And(&[
> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
> +        ]),

same logic would apply here..

>      }
>  )]
>  /// Unmount a removable device that is associated with the datastore
> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
> index 121222c40..359b676a5 100644
> --- a/src/api2/config/datastore.rs
> +++ b/src/api2/config/datastore.rs
> @@ -14,7 +14,7 @@ use proxmox_uuid::Uuid;
>  use pbs_api_types::{
>      Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
>      MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
> -    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
> +    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY,
>      PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
>  };
>  use pbs_config::BackupLockGuard;
> @@ -173,7 +173,10 @@ pub(crate) fn do_create_datastore(
>          },
>      },
>      access: {
> -        permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
> +        permission: &Permission::And(&[
> +            &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
> +        ]),

this now affects regular datastores as well, it should probably be
inside the API handler and conditionalized on backing_device being set?

>      },
>  )]
>  /// Create new datastore config.
> @@ -551,7 +554,10 @@ pub fn update_datastore(
>          },
>      },
>      access: {
> -        permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
> +        permission: &Permission::And(&[
> +            &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
> +        ]),

and this is not needed at all, since path and backing_device are fixed
after creation?

>      },
>      returns: {
>          schema: UPID_SCHEMA,
> -- 
> 2.39.5
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 



From f.gruenbichler at proxmox.com  Tue Nov 26 13:09:54 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 13:09:54 +0100
Subject: [pbs-devel] partially applied: [PATCH proxmox-backup 0/5] removable
 datastore follow-up
In-Reply-To: <20241126114323.105838-1-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
Message-ID: <1732622803.aqmf4bo5sg.astroid@yuna.none>

all but patch #2 and #5 and with a small follow-up for the docs patch
and some typos in commit one fixed up ;)

On November 26, 2024 12:43 pm, Hannes Laimer wrote:
> Some follow-ups for removable datastore, including
>  * fix sync with older pbs versions
>  * add Sys.Modify permission for endpoints
>  * add docs
>  * allow resetting 'unmount' maintenance mode trough API/CLI
>     last patch also allows it through UI, just put it in, in case
>     we want to allow that 
> 
> 
> Hannes Laimer (5):
>   api: mainatenance: allow setting of maintenance mode if 'unmounting'
>   api: add Sys.Modify on /system/disks as permission to endpoints
>     handling removable datastores
>   api: types: add 'mount_status' to schema
>   docs: add information for removable datastores
>   ui: allow resetting unmounting maintenance
> 
>  docs/storage.rst                 | 31 ++++++++++++++++++++++++++-----
>  pbs-api-types/src/datastore.rs   | 10 +++++++++-
>  src/api2/admin/datastore.rs      | 12 +++++++++---
>  src/api2/config/datastore.rs     | 12 +++++++++---
>  www/window/MaintenanceOptions.js |  3 +--
>  5 files changed, 54 insertions(+), 14 deletions(-)
> 
> -- 
> 2.39.5
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 
> 
> 



From f.gruenbichler at proxmox.com  Tue Nov 26 13:11:14 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 13:11:14 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 5/5] ui: allow resetting
 unmounting maintenance
In-Reply-To: <20241126114323.105838-6-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
 <20241126114323.105838-6-h.laimer@proxmox.com>
Message-ID: <1732622998.1oln17dk58.astroid@yuna.none>

Acked-by: Fabian Gr?nbichler 

but not applied in case somebody else wants to keep this away from the
UI for now ;)

On November 26, 2024 12:43 pm, Hannes Laimer wrote:
> Signed-off-by: Hannes Laimer 
> ---
> optional, just added it in case we want it
> 
>  www/window/MaintenanceOptions.js | 3 +--
>  1 file changed, 1 insertion(+), 2 deletions(-)
> 
> diff --git a/www/window/MaintenanceOptions.js b/www/window/MaintenanceOptions.js
> index 896d6a58e..8a88ba2b7 100644
> --- a/www/window/MaintenanceOptions.js
> +++ b/www/window/MaintenanceOptions.js
> @@ -89,12 +89,11 @@ Ext.define('PBS.window.MaintenanceOptions', {
>          let unmounting = options['maintenance-type'] === 'unmount';
>          let defaultType = options['maintenance-type'] === '__default__';
>          if (unmounting) {
> -            options['maintenance-type'] = '';
> +            options['maintenance-type'] = gettext('Unmounting');
>          }
>  
>  	me.callParent([options]);
>  
> -        me.lookupReference('type-field').setDisabled(unmounting);
>          me.lookupReference('message-field').setDisabled(unmounting || defaultType);
>      },
>  });
> -- 
> 2.39.5
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 
> 
> 



From c.ebner at proxmox.com  Tue Nov 26 13:24:19 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Tue, 26 Nov 2024 13:24:19 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api types: add missing conf to
 blob archive name mapping
Message-ID: <20241126122419.235890-1-c.ebner@proxmox.com>

Commit addfae26 ("api types: introduce `BackupArchiveName` type")
introduced a dedicated archive name api type to add rust type
checking and bundle helpers to the api type. Since this, the backup
archive name to server archive name mapping is handled by its parser.

This however did not cover the `.conf` extension used for VM config
files. Add the missing `.conf` to `.conf.blob` to the match statement
and the test cases.

Fixes: addfae26 ("api types: introduce `BackupArchiveName` type")
Reported-by: Stoiko Ivanov 
Signed-off-by: Christian Ebner 
---
 pbs-api-types/src/datastore.rs | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
index 4927f3724..688b7dd03 100644
--- a/pbs-api-types/src/datastore.rs
+++ b/pbs-api-types/src/datastore.rs
@@ -1833,6 +1833,7 @@ impl BackupArchiveName {
             Some("ppxar") => ArchiveType::DynamicIndex,
             Some("pcat1") => ArchiveType::DynamicIndex,
             Some("img") => ArchiveType::FixedIndex,
+            Some("conf") => ArchiveType::Blob,
             Some("json") => ArchiveType::Blob,
             Some("key") => ArchiveType::Blob,
             Some("log") => ArchiveType::Blob,
@@ -1910,6 +1911,8 @@ mod tests {
             "/valid/rsa-encrypted.key.blob",
             "/valid/archive-name.log",
             "/valid/archive-name.log.blob",
+            "/valid/qemu-server.conf",
+            "/valid/qemu-server.conf.blob",
         ];
 
         for archive_name in valid_archive_names {
-- 
2.39.5




From t.lamprecht at proxmox.com  Tue Nov 26 13:26:56 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 13:26:56 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/5] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <1732622272.pgtz2hjshk.astroid@yuna.none>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
 <20241126114323.105838-3-h.laimer@proxmox.com>
 <1732622272.pgtz2hjshk.astroid@yuna.none>
Message-ID: <69210925-3325-491a-a057-dc5c096a4025@proxmox.com>

This is missing a commit message explaining the rationale.

Am 26.11.24 um 13:07 schrieb Fabian Gr?nbichler:
>> @@ -2512,7 +2512,10 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
>>          schema: UPID_SCHEMA,
>>      },
>>      access: {
>> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
> I am not 100% sure this part should require Sys.Modify.. somebody needs
> to have set up the datastore already, just mounting seems benign in that
> case?

Mounting is always a bit of an involved operation as it can result in
IO hangs, just requiring audit on the store seems IMO rather to low of a
requirement. The Audit privs are not for things that alter the system state,
but rather for pure observation. Sys.Modify might not be ideal but IMO
definitively better than the status quo, "Datastore.Modify" might be fine too
though.

>>      },
>>  )]
>>  /// Mount removable datastore.
>> @@ -2625,7 +2628,10 @@ fn do_unmount_device(
>>          schema: UPID_SCHEMA,
>>      },
>>      access: {
>> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
> same logic would apply here..
> 
>>      }
>>  )]
>>  /// Unmount a removable device that is associated with the datastore

here the status quo requires "Datastore.Modify", which is better, but if we
go for Sys.Modify above I'd not have any objection to use it also here.



From f.gruenbichler at proxmox.com  Tue Nov 26 13:26:47 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 13:26:47 +0100
Subject: [pbs-devel] [PATCH proxmox-backup-restore-image] kernel: enable
 codepage 437 for vfat support
In-Reply-To: <20241126120357.389046-1-s.ivanov@proxmox.com>
References: <20241126120357.389046-1-s.ivanov@proxmox.com>
Message-ID: <1732623296.vf1tluw47l.astroid@yuna.none>

adding missing subject prefix ;)

On November 26, 2024 1:03 pm, Stoiko Ivanov wrote:
> ran into an issue when clicking on the ESP of a VM while trying
> single-file restore.
> 
> the added config-config options are taken from config-6.5.13-6-pve
> (the restore-image is still based on kernel 6.5) - and I tried copying
> a small set around the needed options.

maybe we should upgrade to a newer one at some point (might also benefit
from NTFS bug fixes and added features..)

> 
> with the patch the ESP contents are shown successfully
> 
> Signed-off-by: Stoiko Ivanov 
> ---
>  src/config-base | 5 +++++
>  1 file changed, 5 insertions(+)
> 
> diff --git a/src/config-base b/src/config-base
> index 1581b09..238d93c 100644
> --- a/src/config-base
> +++ b/src/config-base
> @@ -144,6 +144,11 @@ CONFIG_ISO9660_FS=y
>  CONFIG_NTFS3_FS=y
>  CONFIG_MSDOS_FS=y
>  CONFIG_VFAT_FS=y
> +CONFIG_FAT_DEFAULT_CODEPAGE=437
> +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"

these two are the default values anyway and already set without this
patch?

> +CONFIG_NLS=y

this one is set as well

> +CONFIG_NLS_DEFAULT="utf8"

the upstream default for this is iso8859-1, but it also says this is
"the NLS used by your console, not the NLS used by a specific file
system (if different) to store data (filenames) on disk." - whatever
that means exactly?

> +CONFIG_NLS_CODEPAGE_437=y

shouldn't we also enable NLS_UTF8 ?

I mean, I guess it doesn't really matter as long as we don't start
setting the corresponding mount options to force a specific codepage?

the last option seems to be the only one that is actually missing from
our config ;) althoug hit doesn't hurt to set any of the above
explicitly I guess..

>  
>  # memory hotplug
>  CONFIG_MEMORY_HOTPLUG=y
> -- 
> 2.39.5
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 
> 
> 



From t.lamprecht at proxmox.com  Tue Nov 26 13:30:17 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 13:30:17 +0100
Subject: [pbs-devel] [PATCH proxmox-backup-restore-image] kernel: enable
 codepage 437 for vfat support
In-Reply-To: <1732623296.vf1tluw47l.astroid@yuna.none>
References: <20241126120357.389046-1-s.ivanov@proxmox.com>
 <1732623296.vf1tluw47l.astroid@yuna.none>
Message-ID: <4cf62736-6394-482a-9f30-0621b6405ed3@proxmox.com>

Am 26.11.24 um 13:26 schrieb Fabian Gr?nbichler:
> On November 26, 2024 1:03 pm, Stoiko Ivanov wrote:
>> ran into an issue when clicking on the ESP of a VM while trying
>> single-file restore.
>>
>> the added config-config options are taken from config-6.5.13-6-pve
>> (the restore-image is still based on kernel 6.5) - and I tried copying
>> a small set around the needed options.
> maybe we should upgrade to a newer one at some point (might also benefit
> from NTFS bug fixes and added features..)

Yes, I would go directly to 6.11 once we got a new tag from ubuntu and
after the releases, maybe ZFS 2.3 is done by then, which could be also
nice to have then (albeit I did not check if that would provide new
pool-features that 2.2 does not understands)



From f.gruenbichler at proxmox.com  Tue Nov 26 13:32:06 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 13:32:06 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] api types: add missing
 conf to blob archive name mapping
In-Reply-To: <20241126122419.235890-1-c.ebner@proxmox.com>
References: <20241126122419.235890-1-c.ebner@proxmox.com>
Message-ID: <1732624320.03tnpelt4j.astroid@yuna.none>

On November 26, 2024 1:24 pm, Christian Ebner wrote:
> Commit addfae26 ("api types: introduce `BackupArchiveName` type")
> introduced a dedicated archive name api type to add rust type
> checking and bundle helpers to the api type. Since this, the backup
> archive name to server archive name mapping is handled by its parser.
> 
> This however did not cover the `.conf` extension used for VM config
> files. Add the missing `.conf` to `.conf.blob` to the match statement
> and the test cases.
> 
> Fixes: addfae26 ("api types: introduce `BackupArchiveName` type")
> Reported-by: Stoiko Ivanov 
> Signed-off-by: Christian Ebner 
> ---
>  pbs-api-types/src/datastore.rs | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
> index 4927f3724..688b7dd03 100644
> --- a/pbs-api-types/src/datastore.rs
> +++ b/pbs-api-types/src/datastore.rs
> @@ -1833,6 +1833,7 @@ impl BackupArchiveName {
>              Some("ppxar") => ArchiveType::DynamicIndex,
>              Some("pcat1") => ArchiveType::DynamicIndex,
>              Some("img") => ArchiveType::FixedIndex,
> +            Some("conf") => ArchiveType::Blob,
>              Some("json") => ArchiveType::Blob,
>              Some("key") => ArchiveType::Blob,
>              Some("log") => ArchiveType::Blob,
> @@ -1910,6 +1911,8 @@ mod tests {
>              "/valid/rsa-encrypted.key.blob",
>              "/valid/archive-name.log",
>              "/valid/archive-name.log.blob",
> +            "/valid/qemu-server.conf",
> +            "/valid/qemu-server.conf.blob",
>          ];
>  
>          for archive_name in valid_archive_names {
> -- 
> 2.39.5
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 
> 
> 



From t.lamprecht at proxmox.com  Tue Nov 26 13:35:42 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 13:35:42 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api types: add missing conf
 to blob archive name mapping
In-Reply-To: <20241126122419.235890-1-c.ebner@proxmox.com>
References: <20241126122419.235890-1-c.ebner@proxmox.com>
Message-ID: <1d5d8a7b-f31c-4f1a-bc69-814e57455e3d@proxmox.com>

Am 26.11.24 um 13:24 schrieb Christian Ebner:
> Commit addfae26 ("api types: introduce `BackupArchiveName` type")
> introduced a dedicated archive name api type to add rust type
> checking and bundle helpers to the api type. Since this, the backup
> archive name to server archive name mapping is handled by its parser.

This is mostly relevant for the client or? I.e., this has no impact on
community implementations/experiments adding completely different archive
types?



From f.gruenbichler at proxmox.com  Tue Nov 26 13:41:35 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 13:41:35 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api types: add missing conf
 to blob archive name mapping
In-Reply-To: <1d5d8a7b-f31c-4f1a-bc69-814e57455e3d@proxmox.com>
References: <20241126122419.235890-1-c.ebner@proxmox.com>
 <1d5d8a7b-f31c-4f1a-bc69-814e57455e3d@proxmox.com>
Message-ID: <1732624756.2bm5wbhpr8.astroid@yuna.none>

On November 26, 2024 1:35 pm, Thomas Lamprecht wrote:
> Am 26.11.24 um 13:24 schrieb Christian Ebner:
>> Commit addfae26 ("api types: introduce `BackupArchiveName` type")
>> introduced a dedicated archive name api type to add rust type
>> checking and bundle helpers to the api type. Since this, the backup
>> archive name to server archive name mapping is handled by its parser.
> 
> This is mostly relevant for the client or? I.e., this has no impact on
> community implementations/experiments adding completely different archive
> types?

yes, this is just a UX shortcut that allows leaving out the .blob
extension for blob types used by our stack. if you use a custom blob
type, you need to specify the full name including .blob when passing the
name to the client.

the reason is that with a generic catch-all matching to blob, we'd take
away our ability to add new index types without breaking clients relying
on that magic.

i.e., if a client could do `restore ... my_custom_blob.foobar`, we can't
add a `foobar` index/archive type ourselves later on without breaking
that client.



From c.ebner at proxmox.com  Tue Nov 26 13:48:06 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Tue, 26 Nov 2024 13:48:06 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api types: add missing conf
 to blob archive name mapping
In-Reply-To: <1d5d8a7b-f31c-4f1a-bc69-814e57455e3d@proxmox.com>
References: <20241126122419.235890-1-c.ebner@proxmox.com>
 <1d5d8a7b-f31c-4f1a-bc69-814e57455e3d@proxmox.com>
Message-ID: <2cae33de-6348-46b1-8986-98d2e45e8a1c@proxmox.com>

On 11/26/24 13:35, Thomas Lamprecht wrote:
> Am 26.11.24 um 13:24 schrieb Christian Ebner:
>> Commit addfae26 ("api types: introduce `BackupArchiveName` type")
>> introduced a dedicated archive name api type to add rust type
>> checking and bundle helpers to the api type. Since this, the backup
>> archive name to server archive name mapping is handled by its parser.
> 
> This is mostly relevant for the client or? I.e., this has no impact on
> community implementations/experiments adding completely different archive
> types?

No, this is not limited to the client. This is mostly used server side 
to map the archive name extension to the server archive name extension 
(.blob, .fidx, .didx).

The current mappings were already enforced/assumed by the server to some 
extend, and other archive types must use the full server archive name 
extension anyways, for the server to recognize it.

For community implementations: they will be affected by this as well, 
but they would already have need to pass the full server archive name 
extensions anyways. So this should not break anything for them.




From t.lamprecht at proxmox.com  Tue Nov 26 13:52:53 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 13:52:53 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api types: add missing conf
 to blob archive name mapping
In-Reply-To: <2cae33de-6348-46b1-8986-98d2e45e8a1c@proxmox.com>
References: <20241126122419.235890-1-c.ebner@proxmox.com>
 <1d5d8a7b-f31c-4f1a-bc69-814e57455e3d@proxmox.com>
 <2cae33de-6348-46b1-8986-98d2e45e8a1c@proxmox.com>
Message-ID: <8bf0f5de-65ec-45f0-ab67-4ed76a89977c@proxmox.com>

Am 26.11.24 um 13:48 schrieb Christian Ebner:
> On 11/26/24 13:35, Thomas Lamprecht wrote:
>> Am 26.11.24 um 13:24 schrieb Christian Ebner:
>>> Commit addfae26 ("api types: introduce `BackupArchiveName` type")
>>> introduced a dedicated archive name api type to add rust type
>>> checking and bundle helpers to the api type. Since this, the backup
>>> archive name to server archive name mapping is handled by its parser.
>>
>> This is mostly relevant for the client or? I.e., this has no impact on
>> community implementations/experiments adding completely different archive
>> types?
> 
> No, this is not limited to the client. This is mostly used server side 
> to map the archive name extension to the server archive name extension 
> (.blob, .fidx, .didx).
> 
> The current mappings were already enforced/assumed by the server to some 
> extend, and other archive types must use the full server archive name 
> extension anyways, for the server to recognize it.
> 
> For community implementations: they will be affected by this as well, 
> but they would already have need to pass the full server archive name 
> extensions anyways. So this should not break anything for them.
> 

Ok, thank you for your explanation!



From s.sterz at proxmox.com  Tue Nov 26 14:07:23 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Tue, 26 Nov 2024 14:07:23 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] datastore: re-phrase error
 message when datastore is unavailable
Message-ID: <20241126130723.146166-1-s.sterz@proxmox.com>

the current phrase leads to clumsy log messages such as:

> datastore 'store' is in datastore is being unmounted

this commit re-phrases that too:

> datastore 'store' is unavailable: datastore is being unmounted

Signed-off-by: Shannon Sterz 
---
 pbs-datastore/src/datastore.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index adf29f18..33bc1f72 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -218,7 +218,7 @@ impl DataStore {
 
         if let Some(maintenance_mode) = config.get_maintenance_mode() {
             if let Err(error) = maintenance_mode.check(operation) {
-                bail!("datastore '{name}' is in {error}");
+                bail!("datastore '{name}' is unavailable: {error}");
             }
         }
 
-- 
2.39.5




From f.schauer at proxmox.com  Tue Nov 26 14:17:51 2024
From: f.schauer at proxmox.com (Filip Schauer)
Date: Tue, 26 Nov 2024 14:17:51 +0100
Subject: [pbs-devel] [PATCH vma-to-pbs] Add missing implementation of the
 --version option
Message-ID: <20241126131751.86049-1-f.schauer@proxmox.com>

This was left in the help text but was not implemented during the
transition from clap to picoargs in 80fb0a4a.

Signed-off-by: Filip Schauer 
---
 src/main.rs | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/src/main.rs b/src/main.rs
index f942a73..4f6691e 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -65,6 +65,7 @@ Options:
   -V, --version
           Print version
 ";
+const VERSION: &str = env!("CARGO_PKG_VERSION");
 
 fn parse_args() -> Result {
     let mut args: Vec<_> = std::env::args_os().collect();
@@ -74,6 +75,8 @@ fn parse_args() -> Result {
     let options = [
         "-h",
         "--help",
+        "-V",
+        "--version",
         "-c",
         "--compress",
         "-e",
@@ -112,6 +115,9 @@ fn parse_args() -> Result {
     if args.contains(["-h", "--help"]) {
         print!("{CMD_HELP}");
         std::process::exit(0);
+    } else if args.contains(["-V", "--version"]) {
+        println!("Version: {VERSION}");
+        std::process::exit(0);
     }
 
     let pbs_repository = args.value_from_str("--repository")?;
-- 
2.39.5




From s.ivanov at proxmox.com  Tue Nov 26 14:26:02 2024
From: s.ivanov at proxmox.com (Stoiko Ivanov)
Date: Tue, 26 Nov 2024 14:26:02 +0100
Subject: [pbs-devel] [PATCH v2] kernel: enable codepage 437 for vfat support
Message-ID: <20241126132602.576027-1-s.ivanov@proxmox.com>

ran into an issue when clicking on the ESP of a VM while trying
single-file restore. Enabling the single option fixed the issue, and
got the partition successfully mounted.

Signed-off-by: Stoiko Ivanov 
---
changes v1->v2:
* only enabled the single needed option, and put it in the block where
  the remaining NLS options were set already (overlooked that on the
  first attempt) - Thanks @Fabian for the feedback!

As the issue seems to have been around for around one year - I don't
think we need to rush applying it (and can do so when upgrading to a
new kernel+ZFS version)

 src/config-base | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/config-base b/src/config-base
index 1581b09..edd95c1 100644
--- a/src/config-base
+++ b/src/config-base
@@ -102,6 +102,7 @@ CONFIG_SYSFS=y
 CONFIG_NLS=y
 CONFIG_NLS_UTF8=y
 CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_CODEPAGE_437=y
 CONFIG_MSDOS_PARTITION=y
 CONFIG_EFI_PARTITION=y
 CONFIG_FILE_LOCKING=y
-- 
2.39.5




From s.ivanov at proxmox.com  Tue Nov 26 14:42:45 2024
From: s.ivanov at proxmox.com (Stoiko Ivanov)
Date: Tue, 26 Nov 2024 14:42:45 +0100
Subject: [pbs-devel] [PATCH proxmox-backup-restore-image] kernel: enable
 codepage 437 for vfat support
In-Reply-To: <1732623296.vf1tluw47l.astroid@yuna.none>
References: <20241126120357.389046-1-s.ivanov@proxmox.com>
 <1732623296.vf1tluw47l.astroid@yuna.none>
Message-ID: <20241126144245.2cc5a8d0@rosa.proxmox.com>

Thanks for the quick feedback!

On Tue, 26 Nov 2024 13:26:47 +0100
Fabian Gr?nbichler  wrote:

> adding missing subject prefix ;)
missed that part before sending a v2:
https://lore.proxmox.com/pbs-devel/20241126132602.576027-1-s.ivanov at proxmox.com/T/#u
(fixed now)

> 
> On November 26, 2024 1:03 pm, Stoiko Ivanov wrote:
> > ran into an issue when clicking on the ESP of a VM while trying
> > single-file restore.
> > 
> > the added config-config options are taken from config-6.5.13-6-pve
> > (the restore-image is still based on kernel 6.5) - and I tried copying
> > a small set around the needed options.  
> 
> maybe we should upgrade to a newer one at some point (might also benefit
> from NTFS bug fixes and added features..)
sounds sensible! - anyone got an idea how wide-spread EXFAT is (because it
is set in our regular-kernels (just after the VFAT-options))?
quick skim through the config for 6.11 did not yield anything else new that
seems too common (on guests).



> 
> > 
> > with the patch the ESP contents are shown successfully
> > 
> > Signed-off-by: Stoiko Ivanov 
> > ---
> >  src/config-base | 5 +++++
> >  1 file changed, 5 insertions(+)
> > 
> > diff --git a/src/config-base b/src/config-base
> > index 1581b09..238d93c 100644
> > --- a/src/config-base
> > +++ b/src/config-base
> > @@ -144,6 +144,11 @@ CONFIG_ISO9660_FS=y
> >  CONFIG_NTFS3_FS=y
> >  CONFIG_MSDOS_FS=y
> >  CONFIG_VFAT_FS=y
> > +CONFIG_FAT_DEFAULT_CODEPAGE=437
> > +CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"  
> 
> these two are the default values anyway and already set without this
> patch?
yes - I sent the patch a bit too soon after my first test and before
taking the 2 minutes to look around the config file

> 
> > +CONFIG_NLS=y  
> 
> this one is set as well
> 
> > +CONFIG_NLS_DEFAULT="utf8"  
> 
> the upstream default for this is iso8859-1, but it also says this is
> "the NLS used by your console, not the NLS used by a specific file
> system (if different) to store data (filenames) on disk." - whatever
> that means exactly?
> 
> > +CONFIG_NLS_CODEPAGE_437=y  
> 
> shouldn't we also enable NLS_UTF8 ?
> 
> I mean, I guess it doesn't really matter as long as we don't start
> setting the corresponding mount options to force a specific codepage?
can't remember ever needing to set any code-page specific options for vfat
in quite a long time (and mounting various images for uncommon uses)
so I'd say - let's see if anyone runs into needing this.

> 
> the last option seems to be the only one that is actually missing from
> our config ;) althoug hit doesn't hurt to set any of the above
> explicitly I guess..
I think keeping this as minimal as needed makes sense, thus tested with
only `CONFIG_NLS_CODEPAGE_437=y` set - and decided to stick with that, as
it worked with a partition created with mkfs.vfat (the ESP from debian).



> 
> >  
> >  # memory hotplug
> >  CONFIG_MEMORY_HOTPLUG=y
> > -- 
> > 2.39.5
> > 
> > 
> > 
> > _______________________________________________
> > pbs-devel mailing list
> > pbs-devel at lists.proxmox.com
> > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> > 
> > 
> >   
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 
> 




From f.gruenbichler at proxmox.com  Tue Nov 26 14:51:11 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 14:51:11 +0100
Subject: [pbs-devel] [PATCH proxmox-backup-restore-image] kernel: enable
 codepage 437 for vfat support
In-Reply-To: <20241126144245.2cc5a8d0@rosa.proxmox.com>
References: <20241126120357.389046-1-s.ivanov@proxmox.com>
 <1732623296.vf1tluw47l.astroid@yuna.none>
 <20241126144245.2cc5a8d0@rosa.proxmox.com>
Message-ID: <1732629004.i4muvod5yb.astroid@yuna.none>

On November 26, 2024 2:42 pm, Stoiko Ivanov wrote:
> Thanks for the quick feedback!
> 
> On Tue, 26 Nov 2024 13:26:47 +0100
> Fabian Gr?nbichler  wrote:
> 
>> adding missing subject prefix ;)
> missed that part before sending a v2:
> https://lore.proxmox.com/pbs-devel/20241126132602.576027-1-s.ivanov at proxmox.com/T/#u
> (fixed now)
> 
>> 
>> On November 26, 2024 1:03 pm, Stoiko Ivanov wrote:
>> > ran into an issue when clicking on the ESP of a VM while trying
>> > single-file restore.
>> > 
>> > the added config-config options are taken from config-6.5.13-6-pve
>> > (the restore-image is still based on kernel 6.5) - and I tried copying
>> > a small set around the needed options.  
>> 
>> maybe we should upgrade to a newer one at some point (might also benefit
>> from NTFS bug fixes and added features..)
> sounds sensible! - anyone got an idea how wide-spread EXFAT is (because it
> is set in our regular-kernels (just after the VFAT-options))?
> quick skim through the config for 6.11 did not yield anything else new that
> seems too common (on guests).

I mostly encounter it as the factory default for some external storage
media, but I am a 99,5% Linux user so I don't know whether there's some
Windows corner that makes it more widespread in a VM context..



From s.ivanov at proxmox.com  Tue Nov 26 14:52:50 2024
From: s.ivanov at proxmox.com (Stoiko Ivanov)
Date: Tue, 26 Nov 2024 14:52:50 +0100
Subject: [pbs-devel] [PATCH proxmox-backup-restore-image] kernel: enable
 codepage 437 for vfat support
In-Reply-To: <4cf62736-6394-482a-9f30-0621b6405ed3@proxmox.com>
References: <20241126120357.389046-1-s.ivanov@proxmox.com>
 <1732623296.vf1tluw47l.astroid@yuna.none>
 <4cf62736-6394-482a-9f30-0621b6405ed3@proxmox.com>
Message-ID: <20241126145250.64a9ba4d@rosa.proxmox.com>

On Tue, 26 Nov 2024 13:30:17 +0100
Thomas Lamprecht  wrote:

> Am 26.11.24 um 13:26 schrieb Fabian Gr?nbichler:
> 
> Yes, I would go directly to 6.11 once we got a new tag from ubuntu and
> after the releases, maybe ZFS 2.3 is done by then, which could be also
> nice to have then (albeit I did not check if that would provide new
> pool-features that 2.2 does not understands)
sounds sensible - the issue has been around for at least one year (change
to kernel 6.5) - and I'm not aware that anyone has run into it yet, apart
from my test - so no particular rush there

as for ZFS 2.3 - I assume that fast dedup[0] and long-names [1] might be
features that are not read-only compatible (but did not check the code
closely) - for raid-Z expansion - the pull-request mentions that it's not
read-only compatible.

I'll try to update the kernel+zfs here when looking into ZFS-2.3.0

[0] https://github.com/openzfs/zfs/discussions/15896
[1] https://github.com/openzfs/zfs/pull/15921
[2] https://github.com/openzfs/zfs/pull/15022



From h.laimer at proxmox.com  Tue Nov 26 14:53:31 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 14:53:31 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/5] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <1732622272.pgtz2hjshk.astroid@yuna.none>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
 <20241126114323.105838-3-h.laimer@proxmox.com>
 <1732622272.pgtz2hjshk.astroid@yuna.none>
Message-ID: <66f523fc-7c9a-4074-9c97-5331d9951e29@proxmox.com>



On 11/26/24 13:07, Fabian Gr?nbichler wrote:
> On November 26, 2024 12:43 pm, Hannes Laimer wrote:
>> Suggested-by: Fabian Gr?nbichler 
>> Signed-off-by: Hannes Laimer 
>> ---
>>   src/api2/admin/datastore.rs  | 12 +++++++++---
>>   src/api2/config/datastore.rs | 12 +++++++++---
>>   2 files changed, 18 insertions(+), 6 deletions(-)
>>
>> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
>> index 1c939bc20..cae7eb89c 100644
>> --- a/src/api2/admin/datastore.rs
>> +++ b/src/api2/admin/datastore.rs
>> @@ -45,7 +45,7 @@ use pbs_api_types::{
>>       BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
>>       IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
>>       PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
>> -    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
>> +    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA,
>>       VERIFICATION_OUTDATED_AFTER_SCHEMA,
>>   };
>>   use pbs_client::pxar::{create_tar, create_zip};
>> @@ -2512,7 +2512,10 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
>>           schema: UPID_SCHEMA,
>>       },
>>       access: {
>> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
> 
> I am not 100% sure this part should require Sys.Modify.. somebody needs
> to have set up the datastore already, just mounting seems benign in that
> case?
> 
>>       },
>>   )]
>>   /// Mount removable datastore.
>> @@ -2625,7 +2628,10 @@ fn do_unmount_device(
>>           schema: UPID_SCHEMA,
>>       },
>>       access: {
>> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
> 
> same logic would apply here..
> 
>>       }
>>   )]
>>   /// Unmount a removable device that is associated with the datastore
>> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
>> index 121222c40..359b676a5 100644
>> --- a/src/api2/config/datastore.rs
>> +++ b/src/api2/config/datastore.rs
>> @@ -14,7 +14,7 @@ use proxmox_uuid::Uuid;
>>   use pbs_api_types::{
>>       Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
>>       MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
>> -    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
>> +    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY,
>>       PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
>>   };
>>   use pbs_config::BackupLockGuard;
>> @@ -173,7 +173,10 @@ pub(crate) fn do_create_datastore(
>>           },
>>       },
>>       access: {
>> -        permission: &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore"], PRIV_DATASTORE_ALLOCATE, false),
>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
> 
> this now affects regular datastores as well, it should probably be
> inside the API handler and conditionalized on backing_device being set?
> 
>>       },
>>   )]
>>   /// Create new datastore config.
>> @@ -551,7 +554,10 @@ pub fn update_datastore(
>>           },
>>       },
>>       access: {
>> -        permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
> 
> and this is not needed at all, since path and backing_device are fixed
> after creation?
> 

not sure why git diff shows `update_datastore` this is for the delete
endpoint. But I'll chnage that to only check when it is actually
removable(as above)
>>       },
>>       returns: {
>>           schema: UPID_SCHEMA,
>> -- 
>> 2.39.5
>>
>>
>>
>> _______________________________________________
>> pbs-devel mailing list
>> pbs-devel at lists.proxmox.com
>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>>
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel




From f.gruenbichler at proxmox.com  Tue Nov 26 15:14:28 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 15:14:28 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/5] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <66f523fc-7c9a-4074-9c97-5331d9951e29@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
 <20241126114323.105838-3-h.laimer@proxmox.com>
 <1732622272.pgtz2hjshk.astroid@yuna.none>
 <66f523fc-7c9a-4074-9c97-5331d9951e29@proxmox.com>
Message-ID: <1732630377.w73iqukgdv.astroid@yuna.none>

On November 26, 2024 2:53 pm, Hannes Laimer wrote:
> 
> 
> On 11/26/24 13:07, Fabian Gr?nbichler wrote:
>> On November 26, 2024 12:43 pm, Hannes Laimer wrote:
>>> @@ -551,7 +554,10 @@ pub fn update_datastore(
>>>           },
>>>       },
>>>       access: {
>>> -        permission: &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
>>> +        permission: &Permission::And(&[
>>> +            &Permission::Privilege(&["datastore", "{name}"], PRIV_DATASTORE_ALLOCATE, false),
>>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>>> +        ]),
>> 
>> and this is not needed at all, since path and backing_device are fixed
>> after creation?
>> 
> 
> not sure why git diff shows `update_datastore` this is for the delete
> endpoint. But I'll chnage that to only check when it is actually
> removable(as above)

oh, missed that. yeah, for deletion one can argue that mirroring the
creation ACL checks makes sense..

I think the API macro often confuses `git diff/format-patch` and gets
the context wrong, not sure whether a different diff algorithm or other
settings might help?



From h.laimer at proxmox.com  Tue Nov 26 15:28:40 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 15:28:40 +0100
Subject: [pbs-devel] [PATCH proxmox-backup v2] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
Message-ID: <20241126142840.136533-1-h.laimer@proxmox.com>

Suggested-by: Fabian Gr?nbichler 
Signed-off-by: Hannes Laimer 
---
changes since v1:
 * config: create/delete: only check for Sys.Modify on /system/disks if
    removable

 src/api2/admin/datastore.rs  | 12 +++++++++---
 src/api2/config/datastore.rs | 13 ++++++++++++-
 2 files changed, 21 insertions(+), 4 deletions(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 1c939bc20..cae7eb89c 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -45,7 +45,7 @@ use pbs_api_types::{
     BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
     IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
     PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
-    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
+    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA,
     VERIFICATION_OUTDATED_AFTER_SCHEMA,
 };
 use pbs_client::pxar::{create_tar, create_zip};
@@ -2512,7 +2512,10 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
         schema: UPID_SCHEMA,
     },
     access: {
-        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
+        ]),
     },
 )]
 /// Mount removable datastore.
@@ -2625,7 +2628,10 @@ fn do_unmount_device(
         schema: UPID_SCHEMA,
     },
     access: {
-        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
+        permission: &Permission::And(&[
+            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
+            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
+        ]),
     }
 )]
 /// Unmount a removable device that is associated with the datastore
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index 121222c40..d23d7c455 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -14,7 +14,7 @@ use proxmox_uuid::Uuid;
 use pbs_api_types::{
     Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
     MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
-    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
+    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY,
     PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
 };
 use pbs_config::BackupLockGuard;
@@ -204,6 +204,11 @@ pub fn create_datastore(
     let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
     let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
 
+    if config.backing_device.is_some() {
+        let user_info = CachedUserInfo::new()?;
+        user_info.check_privs(&auth_id, &["system", "disks"], PRIV_SYS_MODIFY, false)?;
+    }
+
     let mut prune_job_config = None;
     if config.keep.keeps_something() || !has_prune_job(&config.name)? {
         prune_job_config = config.prune_schedule.as_ref().map(|schedule| {
@@ -580,6 +585,12 @@ pub async fn delete_datastore(
 
     let store_config: DataStoreConfig = config.lookup("datastore", &name)?;
 
+    if store_config.backing_device.is_some() {
+        let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
+        let user_info = CachedUserInfo::new()?;
+        user_info.check_privs(&auth_id, &["system", "disks"], PRIV_SYS_MODIFY, false)?;
+    }
+
     if destroy_data && get_datastore_mount_status(&store_config) == Some(false) {
         http_bail!(
             BAD_REQUEST,
-- 
2.39.5




From d.csapak at proxmox.com  Tue Nov 26 15:47:34 2024
From: d.csapak at proxmox.com (Dominik Csapak)
Date: Tue, 26 Nov 2024 15:47:34 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] sync jobs: remove superfluous
 direction property
In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com>
References: <20241125174012.678523-1-c.ebner@proxmox.com>
Message-ID: <20241126144734.2858189-1-d.csapak@proxmox.com>

since the SyncJobConfig struct now contains a 'sync-direction' property, we can
omit the 'direction' property of the SyncJobStatus struct. This makes a
few adaptions in the ui necessary:

* use the correct field
* handle 'pull' as default (since we don't necessarily get a
  'sync-direction' in that case)

Signed-off-by: Dominik Csapak 
---
based on:

https://lore.proxmox.com/pbs-devel/20241125174012.678523-1-c.ebner at proxmox.com/
and
https://lore.proxmox.com/pbs-devel/20241126092029.207319-1-f.gruenbichler at proxmox.com/


 pbs-api-types/src/jobs.rs |  6 ------
 src/api2/admin/sync.rs    |  1 -
 www/config/SyncView.js    | 16 ++++++++--------
 3 files changed, 8 insertions(+), 15 deletions(-)

diff --git a/pbs-api-types/src/jobs.rs b/pbs-api-types/src/jobs.rs
index 16b16dd84..04631d920 100644
--- a/pbs-api-types/src/jobs.rs
+++ b/pbs-api-types/src/jobs.rs
@@ -649,9 +649,6 @@ impl SyncJobConfig {
         status: {
             type: JobScheduleStatus,
         },
-        direction: {
-            type: SyncDirection,
-        },
     },
 )]
 #[derive(Serialize, Deserialize, Clone, PartialEq)]
@@ -662,9 +659,6 @@ pub struct SyncJobStatus {
     pub config: SyncJobConfig,
     #[serde(flatten)]
     pub status: JobScheduleStatus,
-
-    /// The direction of the job
-    pub direction: SyncDirection,
 }
 
 /// These are used separately without `ns`/`max-depth` sometimes in the API, specifically in the API
diff --git a/src/api2/admin/sync.rs b/src/api2/admin/sync.rs
index 089e6f50d..6722ebea0 100644
--- a/src/api2/admin/sync.rs
+++ b/src/api2/admin/sync.rs
@@ -120,7 +120,6 @@ pub fn list_config_sync_jobs(
         list.push(SyncJobStatus {
             config: job,
             status,
-            direction,
         });
     }
 
diff --git a/www/config/SyncView.js b/www/config/SyncView.js
index ca1f7ecd6..503bdc6df 100644
--- a/www/config/SyncView.js
+++ b/www/config/SyncView.js
@@ -45,7 +45,7 @@ Ext.define('PBS.config.SyncJobView', {
 
 	    store.clearFilter();
 
-	    let fieldsToSearch = ['direction', 'id', 'remote', 'remote-store', 'owner'];
+	    let fieldsToSearch = ['sync-direction', 'id', 'remote', 'remote-store', 'owner'];
 	    if (!view.datastore) {
 		fieldsToSearch.push('store');
 	    }
@@ -96,7 +96,7 @@ Ext.define('PBS.config.SyncJobView', {
             Ext.create('PBS.window.SyncJobEdit', {
 		datastore: view.datastore,
                 id: selection[0].data.id,
-		syncDirection: selection[0].data.direction,
+		syncDirection: selection[0].data['sync-direction'],
 		listeners: {
 		    destroy: function() {
 			me.reload();
@@ -174,7 +174,7 @@ Ext.define('PBS.config.SyncJobView', {
 	type: 'diff',
 	autoDestroy: true,
 	autoDestroyRstore: true,
-	sorters: ['store', 'direction', 'id'],
+	sorters: ['store', 'sync-direction', 'id'],
 	rstore: {
 	    type: 'update',
 	    storeid: 'pbs-sync-jobs-status',
@@ -277,15 +277,15 @@ Ext.define('PBS.config.SyncJobView', {
 	},
 	{
 	    header: gettext('Direction'),
-	    dataIndex: 'direction',
+	    dataIndex: 'sync-direction',
 	    renderer: function(value) {
 		let iconCls, text;
-		if (value === 'pull') {
-		    iconCls = 'download';
-		    text = gettext('Pull');
-		} else {
+		if (value === 'push') {
 		    iconCls = 'upload';
 		    text = gettext('Push');
+		} else {
+		    iconCls = 'download';
+		    text = gettext('Pull');
 		}
 		return ` ${text}`;
 	    },
-- 
2.39.5




From d.csapak at proxmox.com  Tue Nov 26 15:50:00 2024
From: d.csapak at proxmox.com (Dominik Csapak)
Date: Tue, 26 Nov 2024 15:50:00 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 0/4] use same config section
 type for all sync jobs
In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com>
References: <20241125174012.678523-1-c.ebner@proxmox.com>
Message-ID: <06a99aaf-700c-48b3-b96a-c6f665fa9b41@proxmox.com>

look good to me beside one small issue with the leftover direction
property of the status, but sent a follow up for that:

https://lore.proxmox.com/pbs-devel/20241126144734.2858189-1-d.csapak at proxmox.com/

(both christians series and fabians patch should be applied)

with that consider christians/fabians patches:

Reviewed-by: Dominik Csapak 
Tested-by: Dominik Csapak 





From t.lamprecht at proxmox.com  Tue Nov 26 16:04:08 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:04:08 +0100
Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 0/4] use same
 config section type for all sync jobs
In-Reply-To: <20241125174012.678523-1-c.ebner@proxmox.com>
References: <20241125174012.678523-1-c.ebner@proxmox.com>
Message-ID: <7e6120e1-3b9b-424b-92d2-67a747f75009@proxmox.com>

Am 25.11.24 um 18:40 schrieb Christian Ebner:
> This patch series drops the `sync-push` config section type in favor of
> using the same `sync` for both, sync jobs in push and pull direction.
> Instead, encode the sync direction as optional parameter in the sync job
> config, defaulting to sync in pull direction. This reduces complexity by
> allowing to drop the optional parameter for most function calls.
> For api methods, the default remains to only show sync directions in
> pull direction, if no ListSyncDirection::All is passed, or the direction
> explicitly selected. This allows to default to show both directions in
> future Proxmox Backup Server version.
> 
> This patch series depends on Dominik's patch series found here:
> https://lore.proxmox.com/pbs-devel/377618fd-0ea9-46ba-9aec-a47387eca50d at proxmox.com/T
> 
> Christian Ebner (4):
>   config: sync: use same config section type `sync` for push and pull
>   api: admin/config: introduce sync direction as job config parameter
>   bin: show direction in sync job list output
>   api types: drop unused config type helpers for sync direction
> 
>  pbs-api-types/src/jobs.rs              |  25 ++--
>  pbs-config/src/sync.rs                 |  17 +--
>  src/api2/admin/sync.rs                 |  18 +--
>  src/api2/config/datastore.rs           |  16 +--
>  src/api2/config/notifications/mod.rs   |  19 ++--
>  src/api2/config/sync.rs                | 151 ++++++++-----------------
>  src/bin/proxmox-backup-proxy.rs        |  22 +---
>  src/bin/proxmox_backup_manager/sync.rs |   6 +-
>  src/server/sync.rs                     |   2 +-
>  9 files changed, 88 insertions(+), 188 deletions(-)
> 


applied series with both Fabian's and Dominik's follow-ups and from the latter
also his review trailers, thanks!



From t.lamprecht at proxmox.com  Tue Nov 26 16:03:56 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:03:56 +0100
Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 0/6] sync job ui
 improvements
In-Reply-To: <20241125111537.1504618-1-d.csapak@proxmox.com>
References: <20241125111537.1504618-1-d.csapak@proxmox.com>
Message-ID: <9bc39e63-5b13-481f-a3c0-1fef790986f0@proxmox.com>

Am 25.11.24 um 12:15 schrieb Dominik Csapak:
> this series aims to improve the pull/push sync job ui a bit, by:
> 
> * unifying both types into one list
> * adding a helpful tooltip for local owner/user
> * adding a filter for the sync jobs
> * adding a 'all' mode for listing all jobs on the /admin/sync api
> 
> Dominik Csapak (6):
>   api: admin: sync: add direction to sync job status
>   api: admin: sync: add optional 'all' sync type for listing
>   cli: manager: sync: add 'sync-direction' parameter to list
>   ui: sync jobs: revert to single list for pull/push jobs
>   ui: sync jobs: change default sorting to 'store' -> 'direction' ->
>     'id'
>   ui: sync jobs: add search box
> 
>  pbs-api-types/src/jobs.rs              |   6 ++
>  src/api2/admin/sync.rs                 |  65 ++++++++----
>  src/api2/config/datastore.rs           |   9 +-
>  src/api2/config/notifications/mod.rs   |   2 +-
>  src/bin/proxmox_backup_manager/sync.rs |   6 +-
>  www/Makefile                           |   1 -
>  www/config/SyncPullPushView.js         |  61 -----------
>  www/config/SyncView.js                 | 134 ++++++++++++++++++++-----
>  www/datastore/DataStoreList.js         |   2 +-
>  www/datastore/Panel.js                 |   2 +-
>  10 files changed, 178 insertions(+), 110 deletions(-)
>  delete mode 100644 www/config/SyncPullPushView.js
> 


applied series, thanks!



From f.ebner at proxmox.com  Tue Nov 26 16:13:00 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 16:13:00 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 3/3] ui: datastore edit: fix
 emptytext for path field
In-Reply-To: <20241126151300.71000-1-f.ebner@proxmox.com>
References: <20241126151300.71000-1-f.ebner@proxmox.com>
Message-ID: <20241126151300.71000-4-f.ebner@proxmox.com>

It is a relative path for removable datastores.

Signed-off-by: Fiona Ebner 
---

Dependency bump for widget-toolkit needed.

 www/window/DataStoreEdit.js | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js
index 40ccd20a..4a0b8d81 100644
--- a/www/window/DataStoreEdit.js
+++ b/www/window/DataStoreEdit.js
@@ -114,8 +114,10 @@ Ext.define('PBS.DataStoreEdit', {
 				uuidEditField.setValue('');
 				if (isRemovable) {
 				    pathField.setFieldLabel(gettext('Path on Device'));
+				    pathField.setEmptyText(gettext('A relative path'));
 				} else {
 				    pathField.setFieldLabel(gettext('Backing Path'));
+				    pathField.setEmptyText(gettext('An absolute path'));
 				}
 			    },
 			},
-- 
2.39.5




From f.gruenbichler at proxmox.com  Tue Nov 26 16:12:55 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Tue, 26 Nov 2024 16:12:55 +0100
Subject: [pbs-devel] [PATCH proxmox-backup v2] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <20241126142840.136533-1-h.laimer@proxmox.com>
References: <20241126142840.136533-1-h.laimer@proxmox.com>
Message-ID: <1732633903.fq4vqr0l7k.astroid@yuna.none>

two small (easily done as follow-up) nits below, otherwise:

Reviewed-by: Fabian Gr?nbichler 

On November 26, 2024 3:28 pm, Hannes Laimer wrote:
> Suggested-by: Fabian Gr?nbichler 
> Signed-off-by: Hannes Laimer 
> ---
> changes since v1:
>  * config: create/delete: only check for Sys.Modify on /system/disks if
>     removable
> 
>  src/api2/admin/datastore.rs  | 12 +++++++++---
>  src/api2/config/datastore.rs | 13 ++++++++++++-
>  2 files changed, 21 insertions(+), 4 deletions(-)
> 
> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
> index 1c939bc20..cae7eb89c 100644
> --- a/src/api2/admin/datastore.rs
> +++ b/src/api2/admin/datastore.rs
> @@ -45,7 +45,7 @@ use pbs_api_types::{
>      BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
>      IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
>      PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
> -    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
> +    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA,
>      VERIFICATION_OUTDATED_AFTER_SCHEMA,
>  };
>  use pbs_client::pxar::{create_tar, create_zip};
> @@ -2512,7 +2512,10 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
>          schema: UPID_SCHEMA,
>      },
>      access: {
> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
> +        permission: &Permission::And(&[
> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),

so should we adapt this AUDIT to be MODIFY as well, so that this

> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
> +        ]),
>      },
>  )]
>  /// Mount removable datastore.
> @@ -2625,7 +2628,10 @@ fn do_unmount_device(
>          schema: UPID_SCHEMA,
>      },
>      access: {
> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
> +        permission: &Permission::And(&[
> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),

and this lines up?

> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
> +        ]),
>      }
>  )]
>  /// Unmount a removable device that is associated with the datastore
> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
> index 121222c40..d23d7c455 100644
> --- a/src/api2/config/datastore.rs
> +++ b/src/api2/config/datastore.rs
> @@ -14,7 +14,7 @@ use proxmox_uuid::Uuid;
>  use pbs_api_types::{
>      Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
>      MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
> -    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
> +    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY,
>      PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
>  };
>  use pbs_config::BackupLockGuard;
> @@ -204,6 +204,11 @@ pub fn create_datastore(
>      let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>      let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
>  
> +    if config.backing_device.is_some() {
> +        let user_info = CachedUserInfo::new()?;
> +        user_info.check_privs(&auth_id, &["system", "disks"], PRIV_SYS_MODIFY, false)?;
> +    }

this might be added to the permissions description in the schema, so
that it's contained in the api-viewer

> +
>      let mut prune_job_config = None;
>      if config.keep.keeps_something() || !has_prune_job(&config.name)? {
>          prune_job_config = config.prune_schedule.as_ref().map(|schedule| {
> @@ -580,6 +585,12 @@ pub async fn delete_datastore(
>  
>      let store_config: DataStoreConfig = config.lookup("datastore", &name)?;
>  
> +    if store_config.backing_device.is_some() {
> +        let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
> +        let user_info = CachedUserInfo::new()?;
> +        user_info.check_privs(&auth_id, &["system", "disks"], PRIV_SYS_MODIFY, false)?;
> +    }
> +

same here

>      if destroy_data && get_datastore_mount_status(&store_config) == Some(false) {
>          http_bail!(
>              BAD_REQUEST,
> -- 
> 2.39.5
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
> 



From f.ebner at proxmox.com  Tue Nov 26 16:12:57 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 16:12:57 +0100
Subject: [pbs-devel] [PATCH proxmox-widget-toolkit/proxmox-backup 0/3] ui:
 slightly improve removable datastore dialog
Message-ID: <20241126151300.71000-1-f.ebner@proxmox.com>

proxmox-widget-toolkit:

Fiona Ebner (1):
  form: display-edit: support emptyText

 src/form/DisplayEdit.js | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

proxmox-backup:

Fiona Ebner (2):
  ui: datastore edit: improve field label name
  ui: datastore edit: fix emptytext for path field

 www/window/DataStoreEdit.js | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

-- 
2.39.5




From f.ebner at proxmox.com  Tue Nov 26 16:12:58 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 16:12:58 +0100
Subject: [pbs-devel] [PATCH widget-toolkit 1/1] form: display-edit: support
 emptyText
In-Reply-To: <20241126151300.71000-1-f.ebner@proxmox.com>
References: <20241126151300.71000-1-f.ebner@proxmox.com>
Message-ID: <20241126151300.71000-2-f.ebner@proxmox.com>

First user is intended to be the path field for datastores in PBS
where the emptyText should dynamically be for a relative or absolute
path.

Signed-off-by: Fiona Ebner 
---
 src/form/DisplayEdit.js | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/src/form/DisplayEdit.js b/src/form/DisplayEdit.js
index fe1b83b..3f6a9bb 100644
--- a/src/form/DisplayEdit.js
+++ b/src/form/DisplayEdit.js
@@ -7,6 +7,7 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 	data: {
 	    editable: false,
 	    value: undefined,
+	    emptyText: undefined,
 	},
     },
 
@@ -41,6 +42,19 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 	vm.get('value');
     },
 
+    setEmptyText: function(emptyText) {
+	let me = this;
+	let vm = me.getViewModel();
+
+	me.emptyText = emptyText;
+	vm.set('emptyText', emptyText);
+    },
+    getEmptyText: function() {
+	let me = this;
+	let vm = me.getViewModel();
+	return vm.get('emptyText');
+    },
+
     layout: 'fit',
     defaults: {
 	hideLabel: true,
@@ -77,11 +91,13 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 	    hidden: '{editable}',
 	    disabled: '{editable}',
 	    value: '{value}',
+	    // doesn't have a setEmptyText() method, so don't bind that
 	});
 	Ext.applyIf(editConfig.bind, {
 	    hidden: '{!editable}',
 	    disabled: '{!editable}',
 	    value: '{value}',
+	    emptyText: '{emptyText}',
 	});
 
 	// avoid glitch, start off correct even before viewmodel fixes it
-- 
2.39.5




From f.ebner at proxmox.com  Tue Nov 26 16:12:59 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 16:12:59 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/3] ui: datastore edit: improve
 field label name
In-Reply-To: <20241126151300.71000-1-f.ebner@proxmox.com>
References: <20241126151300.71000-1-f.ebner@proxmox.com>
Message-ID: <20241126151300.71000-3-f.ebner@proxmox.com>

And use title case to be consistent with the other field labels.

Signed-off-by: Fiona Ebner 
---
 www/window/DataStoreEdit.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js
index 23970083..40ccd20a 100644
--- a/www/window/DataStoreEdit.js
+++ b/www/window/DataStoreEdit.js
@@ -113,7 +113,7 @@ Ext.define('PBS.DataStoreEdit', {
 				uuidEditField.allowBlank = !isRemovable;
 				uuidEditField.setValue('');
 				if (isRemovable) {
-				    pathField.setFieldLabel(gettext('On device path'));
+				    pathField.setFieldLabel(gettext('Path on Device'));
 				} else {
 				    pathField.setFieldLabel(gettext('Backing Path'));
 				}
-- 
2.39.5




From h.laimer at proxmox.com  Tue Nov 26 16:20:14 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 16:20:14 +0100
Subject: [pbs-devel] [PATCH proxmox-backup v2] api: add Sys.Modify on
 /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <1732633903.fq4vqr0l7k.astroid@yuna.none>
References: <20241126142840.136533-1-h.laimer@proxmox.com>
 <1732633903.fq4vqr0l7k.astroid@yuna.none>
Message-ID: <8762f2c1-0739-4735-bb8f-49ec1bfb61d0@proxmox.com>



On 11/26/24 16:12, Fabian Gr?nbichler wrote:
> two small (easily done as follow-up) nits below, otherwise:
> 
> Reviewed-by: Fabian Gr?nbichler 
> 
> On November 26, 2024 3:28 pm, Hannes Laimer wrote:
>> Suggested-by: Fabian Gr?nbichler 
>> Signed-off-by: Hannes Laimer 
>> ---
>> changes since v1:
>>   * config: create/delete: only check for Sys.Modify on /system/disks if
>>      removable
>>
>>   src/api2/admin/datastore.rs  | 12 +++++++++---
>>   src/api2/config/datastore.rs | 13 ++++++++++++-
>>   2 files changed, 21 insertions(+), 4 deletions(-)
>>
>> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
>> index 1c939bc20..cae7eb89c 100644
>> --- a/src/api2/admin/datastore.rs
>> +++ b/src/api2/admin/datastore.rs
>> @@ -45,7 +45,7 @@ use pbs_api_types::{
>>       BACKUP_TYPE_SCHEMA, CATALOG_NAME, CLIENT_LOG_BLOB_NAME, DATASTORE_SCHEMA,
>>       IGNORE_VERIFIED_BACKUPS_SCHEMA, MANIFEST_BLOB_NAME, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
>>       PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
>> -    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID, UPID_SCHEMA,
>> +    PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_SYS_MODIFY, UPID, UPID_SCHEMA,
>>       VERIFICATION_OUTDATED_AFTER_SCHEMA,
>>   };
>>   use pbs_client::pxar::{create_tar, create_zip};
>> @@ -2512,7 +2512,10 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
>>           schema: UPID_SCHEMA,
>>       },
>>       access: {
>> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
> 
> so should we adapt this AUDIT to be MODIFY as well, so that this
> 

we can, my reasoning was that if we make this MODIFY there is basically
no "can only view" for removable datastores, being allowed to look at
something kind of implies being allowed to open it. But I get why MODIFY
would also make sense

>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
>>       },
>>   )]
>>   /// Mount removable datastore.
>> @@ -2625,7 +2628,10 @@ fn do_unmount_device(
>>           schema: UPID_SCHEMA,
>>       },
>>       access: {
>> -        permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
>> +        permission: &Permission::And(&[
>> +            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, true),
> 
> and this lines up?
> 
>> +            &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
>> +        ]),
>>       }
>>   )]
>>   /// Unmount a removable device that is associated with the datastore
>> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
>> index 121222c40..d23d7c455 100644
>> --- a/src/api2/config/datastore.rs
>> +++ b/src/api2/config/datastore.rs
>> @@ -14,7 +14,7 @@ use proxmox_uuid::Uuid;
>>   use pbs_api_types::{
>>       Authid, DataStoreConfig, DataStoreConfigUpdater, DatastoreNotify, DatastoreTuning, KeepOptions,
>>       MaintenanceMode, PruneJobConfig, PruneJobOptions, SyncDirection, DATASTORE_SCHEMA,
>> -    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY,
>> +    PRIV_DATASTORE_ALLOCATE, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_MODIFY, PRIV_SYS_MODIFY,
>>       PROXMOX_CONFIG_DIGEST_SCHEMA, UPID_SCHEMA,
>>   };
>>   use pbs_config::BackupLockGuard;
>> @@ -204,6 +204,11 @@ pub fn create_datastore(
>>       let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>>       let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
>>   
>> +    if config.backing_device.is_some() {
>> +        let user_info = CachedUserInfo::new()?;
>> +        user_info.check_privs(&auth_id, &["system", "disks"], PRIV_SYS_MODIFY, false)?;
>> +    }
> 
> this might be added to the permissions description in the schema, so
> that it's contained in the api-viewer
> 
>> +
>>       let mut prune_job_config = None;
>>       if config.keep.keeps_something() || !has_prune_job(&config.name)? {
>>           prune_job_config = config.prune_schedule.as_ref().map(|schedule| {
>> @@ -580,6 +585,12 @@ pub async fn delete_datastore(
>>   
>>       let store_config: DataStoreConfig = config.lookup("datastore", &name)?;
>>   
>> +    if store_config.backing_device.is_some() {
>> +        let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>> +        let user_info = CachedUserInfo::new()?;
>> +        user_info.check_privs(&auth_id, &["system", "disks"], PRIV_SYS_MODIFY, false)?;
>> +    }
>> +
> 
> same here
> 
>>       if destroy_data && get_datastore_mount_status(&store_config) == Some(false) {
>>           http_bail!(
>>               BAD_REQUEST,
>> -- 
>> 2.39.5
>>
>>
>>
>> _______________________________________________
>> pbs-devel mailing list
>> pbs-devel at lists.proxmox.com
>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>>
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel




From t.lamprecht at proxmox.com  Tue Nov 26 16:25:01 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:25:01 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup v2] api: add Sys.Modify
 on /system/disks as permission to endpoints handling removable datastores
In-Reply-To: <20241126142840.136533-1-h.laimer@proxmox.com>
References: <20241126142840.136533-1-h.laimer@proxmox.com>
Message-ID: <556ea392-f2fc-4ba7-9e4b-372bfb090c1b@proxmox.com>

Am 26.11.24 um 15:28 schrieb Hannes Laimer:
> Suggested-by: Fabian Gr?nbichler 
> Signed-off-by: Hannes Laimer 
> ---
> changes since v1:
>  * config: create/delete: only check for Sys.Modify on /system/disks if
>     removable
> 
>  src/api2/admin/datastore.rs  | 12 +++++++++---
>  src/api2/config/datastore.rs | 13 ++++++++++++-
>  2 files changed, 21 insertions(+), 4 deletions(-)
> 
>

applied, with subject slightly improved and the descriptions for the dynamic
access checks squashed in, thanks!

You can send Fabian's other suggestion to switch to Datastore.Modify as
follow-up.



From t.lamprecht at proxmox.com  Tue Nov 26 16:28:53 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:28:53 +0100
Subject: [pbs-devel] [PATCH widget-toolkit 1/1] form: display-edit:
 support emptyText
In-Reply-To: <20241126151300.71000-2-f.ebner@proxmox.com>
References: <20241126151300.71000-1-f.ebner@proxmox.com>
 <20241126151300.71000-2-f.ebner@proxmox.com>
Message-ID: <0d6fba4f-8f22-4813-9657-8148cdfb125d@proxmox.com>

Am 26.11.24 um 16:12 schrieb Fiona Ebner:
> First user is intended to be the path field for datastores in PBS
> where the emptyText should dynamically be for a relative or absolute
> path.
> 
> Signed-off-by: Fiona Ebner 
> ---
>  src/form/DisplayEdit.js | 16 ++++++++++++++++
>  1 file changed, 16 insertions(+)
> 
> diff --git a/src/form/DisplayEdit.js b/src/form/DisplayEdit.js
> index fe1b83b..3f6a9bb 100644
> --- a/src/form/DisplayEdit.js
> +++ b/src/form/DisplayEdit.js
> @@ -7,6 +7,7 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
>  	data: {
>  	    editable: false,
>  	    value: undefined,
> +	    emptyText: undefined,
>  	},
>      },
>  
> @@ -41,6 +42,19 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
>  	vm.get('value');
>      },
>  
> +    setEmptyText: function(emptyText) {
> +	let me = this;
> +	let vm = me.getViewModel();
> +
> +	me.emptyText = emptyText;
> +	vm.set('emptyText', emptyText);

did you try to skip this and just directly call the setEmptyText from the
edit field?

> +    },
> +    getEmptyText: function() {
> +	let me = this;
> +	let vm = me.getViewModel();
> +	return vm.get('emptyText');

same here but with getEmptyText from the underlying editField?

I mean, it can be fine as is, but if we can skip tracking this twice (here and
on editField level) it would IMO be a bit more robust.



From t.lamprecht at proxmox.com  Tue Nov 26 16:35:23 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:35:23 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup 5/5] ui: allow resetting
 unmounting maintenance
In-Reply-To: <20241126114323.105838-6-h.laimer@proxmox.com>
References: <20241126114323.105838-1-h.laimer@proxmox.com>
 <20241126114323.105838-6-h.laimer@proxmox.com>
Message-ID: 

Am 26.11.24 um 12:43 schrieb Hannes Laimer:
> Signed-off-by: Hannes Laimer 
> ---
> optional, just added it in case we want it
> 
>  www/window/MaintenanceOptions.js | 3 +--
>  1 file changed, 1 insertion(+), 2 deletions(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Tue Nov 26 16:44:07 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:44:07 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup 2/3] ui: datastore edit:
 improve field label name
In-Reply-To: <20241126151300.71000-3-f.ebner@proxmox.com>
References: <20241126151300.71000-1-f.ebner@proxmox.com>
 <20241126151300.71000-3-f.ebner@proxmox.com>
Message-ID: <323a84f7-47e5-4508-a43c-8fc02b18b996@proxmox.com>

Am 26.11.24 um 16:12 schrieb Fiona Ebner:
> And use title case to be consistent with the other field labels.
> 
> Signed-off-by: Fiona Ebner 
> ---
>  www/window/DataStoreEdit.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied this one for now, thanks!



From t.lamprecht at proxmox.com  Tue Nov 26 16:44:37 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:44:37 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] datastore: re-phrase
 error message when datastore is unavailable
In-Reply-To: <20241126130723.146166-1-s.sterz@proxmox.com>
References: <20241126130723.146166-1-s.sterz@proxmox.com>
Message-ID: 

Am 26.11.24 um 14:07 schrieb Shannon Sterz:
> the current phrase leads to clumsy log messages such as:
> 
>> datastore 'store' is in datastore is being unmounted
> 
> this commit re-phrases that too:
> 
>> datastore 'store' is unavailable: datastore is being unmounted
> 
> Signed-off-by: Shannon Sterz 
> ---
>  pbs-datastore/src/datastore.rs | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, thanks!



From g.goller at proxmox.com  Tue Nov 26 16:55:00 2024
From: g.goller at proxmox.com (Gabriel Goller)
Date: Tue, 26 Nov 2024 16:55:00 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] log: adjust log level on sync
 job messages
Message-ID: <20241126155500.457797-1-g.goller@proxmox.com>

Some messages printed on a sync push job should have been logged in the
syslog as well. These are clearly errors, so they should also be printed
accordingly.

Signed-off-by: Gabriel Goller 
---
 src/server/pull.rs |  4 ++--
 src/server/push.rs | 28 ++++++++++++++--------------
 2 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/src/server/pull.rs b/src/server/pull.rs
index 9abb673aea00..8afeec46350a 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -8,7 +8,7 @@ use std::time::SystemTime;
 
 use anyhow::{bail, format_err, Error};
 use proxmox_human_byte::HumanByte;
-use tracing::info;
+use tracing::{info, error};
 
 use pbs_api_types::{
     print_store_and_ns, ArchiveType, Authid, BackupArchiveName, BackupDir, BackupGroup,
@@ -837,7 +837,7 @@ pub(crate) async fn pull_store(mut params: PullParameters) -> Result {
                 errors = true;
-                info!(
+                error!(
                     "Encountered errors while syncing namespace {} - {err}",
                     &namespace,
                 );
diff --git a/src/server/push.rs b/src/server/push.rs
index 99757a3cc355..dcb238c06190 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -7,7 +7,7 @@ use anyhow::{bail, Context, Error};
 use futures::stream::{self, StreamExt, TryStreamExt};
 use tokio::sync::mpsc;
 use tokio_stream::wrappers::ReceiverStream;
-use tracing::{info, warn};
+use tracing::{info, warn, error};
 
 use pbs_api_types::{
     print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName,
@@ -429,8 +429,8 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result {
                 errors = true;
-                info!("Encountered errors: {err:#}");
-                info!("Failed to sync {source_store_and_ns} into {target_store_and_ns}!");
+                error!("Encountered errors: {err:#}");
+                error!("Failed to sync {source_store_and_ns} into {target_store_and_ns}!");
             }
         }
     }
@@ -478,8 +478,8 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result {
-                    warn!("Encountered errors: {err:#}");
-                    warn!("Failed to remove vanished namespace {target_namespace} from remote!");
+                    error!("Encountered errors: {err:#}");
+                    error!("Failed to remove vanished namespace {target_namespace} from remote!");
                     continue;
                 }
             }
@@ -554,8 +554,8 @@ pub(crate) async fn push_namespace(
         match push_group(params, namespace, &group, &mut progress).await {
             Ok(sync_stats) => stats.add(sync_stats),
             Err(err) => {
-                warn!("Encountered errors: {err:#}");
-                warn!("Failed to push group {group} to remote!");
+                error!("Encountered errors: {err:#}");
+                error!("Failed to push group {group} to remote!");
                 errors = true;
             }
         }
@@ -587,8 +587,8 @@ pub(crate) async fn push_namespace(
                     }));
                 }
                 Err(err) => {
-                    warn!("Encountered errors: {err:#}");
-                    warn!("Failed to remove vanished group {target_group} from remote!");
+                    error!("Encountered errors: {err:#}");
+                    error!("Failed to remove vanished group {target_group} from remote!");
                     errors = true;
                     continue;
                 }
@@ -748,8 +748,8 @@ pub(crate) async fn push_group(
                     );
                 }
                 Err(err) => {
-                    warn!("Encountered errors: {err:#}");
-                    warn!(
+                    error!("Encountered errors: {err:#}");
+                    error!(
                         "Failed to remove vanished snapshot {name} from remote!",
                         name = snapshot.backup
                     );
@@ -793,8 +793,8 @@ pub(crate) async fn push_snapshot(
         Ok((manifest, _raw_size)) => manifest,
         Err(err) => {
             // No manifest in snapshot or failed to read, warn and skip
-            log::warn!("Encountered errors: {err:#}");
-            log::warn!("Failed to load manifest for '{snapshot}'!");
+            error!("Encountered errors: {err:#}");
+            error!("Failed to load manifest for '{snapshot}'!");
             return Ok(stats);
         }
     };
@@ -816,7 +816,7 @@ pub(crate) async fn push_snapshot(
     if fetch_previous_manifest {
         match backup_writer.download_previous_manifest().await {
             Ok(manifest) => previous_manifest = Some(Arc::new(manifest)),
-            Err(err) => log::info!("Could not download previous manifest - {err}"),
+            Err(err) => info!("Could not download previous manifest - {err}"),
         }
     };
 
-- 
2.39.5




From t.lamprecht at proxmox.com  Tue Nov 26 16:55:14 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:55:14 +0100
Subject: [pbs-devel] applied: [PATCH v2 proxmox 1/5] time: fix typos in
 `TimeSpan` related docstring
In-Reply-To: <20241023091103.80792-2-c.ebner@proxmox.com>
References: <20241023091103.80792-1-c.ebner@proxmox.com>
 <20241023091103.80792-2-c.ebner@proxmox.com>
Message-ID: <0fe21322-fcad-4332-92c0-512d4ec25428@proxmox.com>

Am 23.10.24 um 11:10 schrieb Christian Ebner:
> Signed-off-by: Christian Ebner 
> ---
> changes since version 1:
> - not present in previous version
> 
>  proxmox-time/src/time_span.rs | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Tue Nov 26 16:55:23 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 16:55:23 +0100
Subject: [pbs-devel] applied: [PATCH v2 proxmox 2/5] time: also implement
 `From<&TimeSpan> for f64`
In-Reply-To: <20241023091103.80792-3-c.ebner@proxmox.com>
References: <20241023091103.80792-1-c.ebner@proxmox.com>
 <20241023091103.80792-3-c.ebner@proxmox.com>
Message-ID: <3de2326e-e392-493c-ab86-7238df7ba0cb@proxmox.com>

Am 23.10.24 um 11:11 schrieb Christian Ebner:
> Extend the already present `From for f64` implementation to
> allow using the reference as well. There is no need to take ownership
> and consume the `TimeSpan` object for conversion.
> 
> Signed-off-by: Christian Ebner 
> ---
> changes since version 1:
> - not present in previous version
> 
>  proxmox-time/src/time_span.rs | 10 ++++++++--
>  1 file changed, 8 insertions(+), 2 deletions(-)
> 
>

applied, thanks!



From a.zeidler at proxmox.com  Tue Nov 26 16:57:18 2024
From: a.zeidler at proxmox.com (Alexander Zeidler)
Date: Tue, 26 Nov 2024 16:57:18 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] docs: installation: several
 small fixes/improvements
Message-ID: <20241126155718.3-1-a.zeidler@proxmox.com>

* consistently use "medium" (singular), as only one is needed for
  installation (installation-media.rst not renamed)
* add short introduction to recently added chapter "Installation Media"
* update minimum required flash drive storage space to 2 GB
* remove CD-ROM (too less storage space available) but keep DVD
* mention explicitly that data get overwritten on installation media /
  installation target disks
* mention that using `dd` requires to be the root user or use `sudo`
* add accidentally cut off text when copying from PVE docs
* add reference labels to currently needed section titles
* reword some paragraphs for completeness and readability
* mention all installation methods in the intro of "Server Installation"
* add the boot order as possible boot issue
* remove recently added redundant product website hyperlinks (as earlier
  with commit 34407477e2)
* fix broken heading level of APT-based PBC repo

* slightly reorder sub-chapters of "Installation":

After adding the chapter "Installation Media" (d363818641), the chapter
order under "Installation" is:

1. System Requirements
2. Installation Media
3. Debian Package Repositories
4. Server Installation
5. Client Installation

But repos are more likely to be configured after installation, and for
other installation methods chapter links exist anyway. So to keep the
chapter order more logical, "Debian Package Repositories" is now moved
after "Client Installation".

Signed-off-by: Alexander Zeidler 
---
 docs/installation-media.rst   | 56 +++++++++++++++++++++--------------
 docs/installation.rst         | 41 +++++++++++++++++--------
 docs/package-repositories.rst |  2 +-
 docs/using-the-installer.rst  | 25 ++++++++--------
 4 files changed, 75 insertions(+), 49 deletions(-)

diff --git a/docs/installation-media.rst b/docs/installation-media.rst
index e109f2ba..ce293e4c 100644
--- a/docs/installation-media.rst
+++ b/docs/installation-media.rst
@@ -1,44 +1,53 @@
-.. _installation_media:
+.. _installation_medium:
 
-Installation Media
-------------------
+Installation Medium
+-------------------
 
-Prepare Installation Media
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+Proxmox Backup Server can be installed via
+:ref:`different methods `. The recommended method is the
+usage of a installation medium, to simply boot the interactive
+installer.
+
+
+Prepare Installation Medium
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Download the installer ISO image from |DOWNLOADS|.
 
-The `Proxmox Backup`_ Server installation media is a hybrid ISO image. It works
-in two ways:
+The Proxmox Backup Server installation medium is a hybrid ISO image.
+It works in two ways:
 
-- An ISO image file ready to burn to a CD or DVD.
+- An ISO image file ready to burn to a DVD.
 
 - A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
 
-Using a USB flash drive to install `Proxmox Backup`_ Server is the recommended
-way since it is the faster option.
+Using a USB flash drive to install Proxmox Backup Server is the
+recommended way since it is the faster and more frequently available
+option these days.
 
 Prepare a USB Flash Drive as Installation Medium
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The flash drive needs to have at least 1 GB of storage available.
+The flash drive needs to have at least 2 GB of storage space.
 
 .. note::
 
-   Do not use *UNetbootin*. It does not work with the `Proxmox Backup`_ Server
-   installation image.
+   Do not use *UNetbootin*. It does not work with the Proxmox Backup
+   Server installation image.
 
 .. important::
 
-   Make sure that the USB flash drive is not mounted and does not
-   contain any important data.
+   Existing data on the USB flash drive will be overwritten. Therefore
+   make sure that it does not contain any still needed data and
+   unmount it afterwards again before proceeding.
 
 Instructions for GNU/Linux
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-On Unix-like operating system use the ``dd`` command to copy the ISO
+On Unix-like operating systems use the ``dd`` command to copy the ISO
 image to the USB flash drive. First find the correct device name of the
-USB flash drive (see below). Then run the ``dd`` command.
+USB flash drive (see below). Then run the ``dd`` command when logged
+in as root user, or use ``sudo dd `` otherwise.
 
 .. code-block:: console
 
@@ -132,16 +141,17 @@ Using Rufus
 
 Rufus is a more lightweight alternative, but you need to use the **DD
 mode** to make it work. Download Rufus from https://rufus.ie/. Either
-install it or use
+install it or use the portable version. Select the destination drive
+and the downloaded Proxmox ISO file.
 
 .. important::
 
    Once you click *Start*, you have to click *No* on the dialog asking to
    download a different version of Grub. In the next dialog select **DD mode**.
 
-Boot your Server from the USB Flash Drive
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Use the Installation Medium
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Connect the USB flash drive to your server and make sure that booting from USB
-is enabled (check your servers firmware settings). Then follow the steps of the
-:ref:`installation wizard `.
+Insert the created USB flash drive (or DVD) into your server. Continue
+by reading the :ref:`installer ` chapter, which
+also describes possible boot issues.
diff --git a/docs/installation.rst b/docs/installation.rst
index 79cba840..2e93713a 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -9,7 +9,7 @@ Debian_ from the provided package repository.
 
 .. include:: installation-media.rst
 
-.. include:: package-repositories.rst
+.. _install_pbs:
 
 Server Installation
 -------------------
@@ -20,24 +20,34 @@ for various management tasks such as disk management.
 .. note:: You always need a backup server. It is not possible to use
    Proxmox Backup without the server part.
 
-The disk image (ISO file) provided by Proxmox includes a complete Debian system
-as well as all necessary packages for the Proxmox Backup Server.
+Using our provided disk image (ISO file) is the recommended
+installation method, as it includes a convenient installer, a complete
+Debian system as well as all necessary packages for the Proxmox Backup
+Server.
+
+Once you created a :ref:`installation_medium`, the booted
+:ref:`installer ` will guide you through the
+setup process. Thereby you partition the local disks, apply basic
+system configuration like the timezone, language and network, and
+finally install all required packages within minutes.
 
-The installer will guide you through the setup process and allow
-you to partition the local disk(s), apply basic system configuration
-(for example timezone, language, network), and install all required packages.
-The provided ISO will get you started in just a few minutes, and is the
-recommended method for new and existing users.
+As an alternative to the interactive installer, advanced users may
+wish to install Proxmox Backup Server
+:ref:`unattended `.
 
-Alternatively, Proxmox Backup Server can be installed on top of an
-existing Debian system. This option is only recommended for advanced users
-because detailed knowledge about Proxmox Backup Server is required.
+With sufficient Debian knowledge, you can also install Proxmox Backup
+Server :ref:`on top of Debian ` yourself.
+
+While not recommended, Proxmox Backup Server could also be installed
+:ref:`on Proxmox VE `.
 
 .. include:: using-the-installer.rst
 
+.. _install_pbs_unattended:
+
 Install `Proxmox Backup`_ Server Unattended
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-It is possible to install `Proxmox Backup`_ Server automatically in an
+It is possible to install Proxmox Backup Server automatically in an
 unattended manner. This enables you to fully automate the setup process on
 bare-metal. Once the installation is complete and the host has booted up,
 automation tools like Ansible can be used to further configure the installation.
@@ -51,6 +61,7 @@ installation ISO.  For more details and information on the unattended
 installation see `our wiki
 `_.
 
+.. _install_pbs_on_debian:
 
 Install `Proxmox Backup`_ Server on Debian
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -88,6 +99,8 @@ support, and a set of common and useful packages.
    your web browser, using HTTPS on port 8007. For example at
    ``https://:8007``
 
+.. _install_pbs_on_pve:
+
 Install Proxmox Backup Server on `Proxmox VE`_
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -108,6 +121,8 @@ After configuring the
    your web browser, using HTTPS on port 8007. For example at
    ``https://:8007``
 
+.. _install_pbc:
+
 Client Installation
 -------------------
 
@@ -123,7 +138,7 @@ you need to run:
   # apt update
   # apt install proxmox-backup-client
 
-
 .. note:: The client-only repository should be usable by most recent Debian and
    Ubuntu derivatives.
 
+.. include:: package-repositories.rst
diff --git a/docs/package-repositories.rst b/docs/package-repositories.rst
index b429b4b4..aecd6c64 100644
--- a/docs/package-repositories.rst
+++ b/docs/package-repositories.rst
@@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
 .. _package_repositories_client_only_apt:
 
 APT-based Proxmox Backup Client Repository
-++++++++++++++++++++++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 For modern Linux distributions using `apt` as package manager, like all Debian
 and Ubuntu Derivative do, you may be able to use the APT-based repository.
diff --git a/docs/using-the-installer.rst b/docs/using-the-installer.rst
index cec640c5..85d7c75b 100644
--- a/docs/using-the-installer.rst
+++ b/docs/using-the-installer.rst
@@ -17,26 +17,27 @@ It includes the following:
 
 * Web-based management interface
 
-.. note:: All existing data on the selected drives will be removed during the
-   installation process. The installer does not add boot menu entries for other
-   operating systems.
+.. note:: Any existing data on the selected drives will be overwritten
+   during the installation process. The installer does not add boot
+   menu entries for other operating systems.
 
-Please insert the :ref:`installation_media` (for example, USB flash drive or
-CD-ROM) and boot from it.
+Please insert the :ref:`installation_medium` (for example, USB flash
+drive or DVD) and boot from it.
 
-.. note:: Make sure that booting from the installation medium (for example, USB)
-   is enabled in your server's firmware settings. Secure boot needs to be
-   disabled when booting an installer prior to `Proxmox Backup`_ Server version
-   3.1.
+.. note:: You may need to go into your server's firmware settings, to
+   enable booting from your installation medium (for example, USB) and
+   set the desired boot order. When booting an installer prior to
+   `Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
+   disabled.
 
 .. image:: images/screenshots/pbs-installer-grub-menu.png
   :target: _images/pbs-installer-grub-menu.png
   :align: right
   :alt: Proxmox Backup Server Installer GRUB Menu
 
-After choosing the correct entry (for example, *Boot from USB*) the `Proxmox
-Backup`_ Server menu will be displayed, and one of the following options can be
-selected:
+After choosing the correct entry (for example, *Boot from USB*) the
+Proxmox Backup Server menu will be displayed, and one of the following
+options can be selected:
 
 **Install Proxmox Backup Server (Graphical)**
 
-- 
2.39.5




From c.ebner at proxmox.com  Tue Nov 26 17:14:52 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Tue, 26 Nov 2024 17:14:52 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] client: backup writer: fix
 regression in progress output
Message-ID: <20241126161452.404685-1-c.ebner@proxmox.com>

Fixes a regression introduced when switching from the plain string
to be used for archive names to the BackupArchiveName api type in
commit addfae26 ("api types: introduce `BackupArchiveName` type").

The archive name now always is stored including the server archive
name extension. Adapt the check for which archive types to display
the progress log output to reflect this change.

Fixes: addfae26 ("api types: introduce `BackupArchiveName` type")
Reported-by: Max Carrara 
Signed-off-by: Christian Ebner 
---
 pbs-client/src/backup_writer.rs | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/pbs-client/src/backup_writer.rs b/pbs-client/src/backup_writer.rs
index baf2aebb1..f321ea403 100644
--- a/pbs-client/src/backup_writer.rs
+++ b/pbs-client/src/backup_writer.rs
@@ -847,9 +847,9 @@ impl BackupWriter {
         let (upload_queue, upload_result) =
             Self::append_chunk_queue(h2.clone(), wid, append_chunk_path, uploaded_len.clone());
 
-        let progress_handle = if archive.ends_with(".img")
-            || archive.ends_with(".pxar")
-            || archive.ends_with(".ppxar")
+        let progress_handle = if archive.ends_with(".img.fidx")
+            || archive.ends_with(".pxar.didx")
+            || archive.ends_with(".ppxar.didx")
         {
             let counters = counters.clone();
             Some(tokio::spawn(async move {
-- 
2.39.5




From f.ebner at proxmox.com  Tue Nov 26 17:20:04 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:20:04 +0100
Subject: [pbs-devel] [PATCH v2 widget-toolkit] form: display-edit: support
 emptyText
In-Reply-To: <20241126162005.85583-1-f.ebner@proxmox.com>
References: <20241126162005.85583-1-f.ebner@proxmox.com>
Message-ID: <20241126162005.85583-2-f.ebner@proxmox.com>

To access the edit field, its xtype is now tracked.

First user is intended to be the path field for datastores where the
emptyText should dynamically be for a relative or absolute path.

Signed-off-by: Fiona Ebner 
---

Changes in v2:
* avoid tracking emptyText state twice, pass directly to edit field.

 src/form/DisplayEdit.js | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/src/form/DisplayEdit.js b/src/form/DisplayEdit.js
index fe1b83b..01d6a05 100644
--- a/src/form/DisplayEdit.js
+++ b/src/form/DisplayEdit.js
@@ -12,6 +12,9 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 
     displayType: 'displayfield',
 
+    // internal only, use editConfig to set the xtype
+    _editType: 'textfield',
+
     editConfig: {},
     editable: false,
     setEditable: function(editable) {
@@ -41,6 +44,15 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 	vm.get('value');
     },
 
+    setEmptyText: function(emptyText) {
+	let me = this;
+	me.down(me._editType).setEmptyText(emptyText);
+    },
+    getEmptyText: function() {
+	let me = this;
+	return me.down(me._editType).getEmptyText();
+    },
+
     layout: 'fit',
     defaults: {
 	hideLabel: true,
@@ -64,6 +76,8 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 	});
 	Ext.applyIf(editConfig, displayConfig);
 
+	me.__editType = editConfig.xtype;
+
 	if (me.initialConfig && me.initialConfig.displayConfig) {
 	    Ext.applyIf(displayConfig, me.initialConfig.displayConfig);
 	    delete displayConfig.displayConfig;
-- 
2.39.5




From f.ebner at proxmox.com  Tue Nov 26 17:20:03 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:20:03 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-widget-toolkit/proxmox-backup 0/2]
 ui: slightly improve removable datastore dialog
Message-ID: <20241126162005.85583-1-f.ebner@proxmox.com>

Changes in v2:
* avoid tracking emptyText state twice

proxmox-widget-toolkit:

Fiona Ebner (1):
  form: display-edit: support emptyText

 src/form/DisplayEdit.js | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

proxmox-backup:

Fiona Ebner (1):
  ui: datastore edit: fix emptytext for path field

 www/window/DataStoreEdit.js | 2 ++
 1 file changed, 2 insertions(+)

-- 
2.39.5




From f.ebner at proxmox.com  Tue Nov 26 17:20:05 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:20:05 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/2] ui: datastore edit: fix
 emptytext for path field
In-Reply-To: <20241126162005.85583-1-f.ebner@proxmox.com>
References: <20241126162005.85583-1-f.ebner@proxmox.com>
Message-ID: <20241126162005.85583-3-f.ebner@proxmox.com>

It is a relative path for removable datastores.

Signed-off-by: Fiona Ebner 
---

No changes in v2.

 www/window/DataStoreEdit.js | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js
index 40ccd20a..4a0b8d81 100644
--- a/www/window/DataStoreEdit.js
+++ b/www/window/DataStoreEdit.js
@@ -114,8 +114,10 @@ Ext.define('PBS.DataStoreEdit', {
 				uuidEditField.setValue('');
 				if (isRemovable) {
 				    pathField.setFieldLabel(gettext('Path on Device'));
+				    pathField.setEmptyText(gettext('A relative path'));
 				} else {
 				    pathField.setFieldLabel(gettext('Backing Path'));
+				    pathField.setEmptyText(gettext('An absolute path'));
 				}
 			    },
 			},
-- 
2.39.5




From s.sterz at proxmox.com  Tue Nov 26 17:22:46 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Tue, 26 Nov 2024 17:22:46 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] docs: installation: several
 small fixes/improvements
In-Reply-To: <20241126155718.3-1-a.zeidler@proxmox.com>
References: <20241126155718.3-1-a.zeidler@proxmox.com>
Message-ID: 

On Tue Nov 26, 2024 at 4:57 PM CET, Alexander Zeidler wrote:
> * consistently use "medium" (singular), as only one is needed for
>   installation (installation-media.rst not renamed)
> * add short introduction to recently added chapter "Installation Media"
> * update minimum required flash drive storage space to 2 GB
> * remove CD-ROM (too less storage space available) but keep DVD

this would be too little, too less is grammatically incorrect

> * mention explicitly that data get overwritten on installation media /
>   installation target disks
> * mention that using `dd` requires to be the root user or use `sudo`
> * add accidentally cut off text when copying from PVE docs
> * add reference labels to currently needed section titles
> * reword some paragraphs for completeness and readability
> * mention all installation methods in the intro of "Server Installation"
> * add the boot order as possible boot issue
> * remove recently added redundant product website hyperlinks (as earlier
>   with commit 34407477e2)
> * fix broken heading level of APT-based PBC repo
>
> * slightly reorder sub-chapters of "Installation":
>
> After adding the chapter "Installation Media" (d363818641), the chapter
> order under "Installation" is:
>
> 1. System Requirements
> 2. Installation Media
> 3. Debian Package Repositories
> 4. Server Installation
> 5. Client Installation
>
> But repos are more likely to be configured after installation, and for
> other installation methods chapter links exist anyway. So to keep the
> chapter order more logical, "Debian Package Repositories" is now moved
> after "Client Installation".
>
> Signed-off-by: Alexander Zeidler 
> ---
>  docs/installation-media.rst   | 56 +++++++++++++++++++++--------------
>  docs/installation.rst         | 41 +++++++++++++++++--------
>  docs/package-repositories.rst |  2 +-
>  docs/using-the-installer.rst  | 25 ++++++++--------
>  4 files changed, 75 insertions(+), 49 deletions(-)
>
> diff --git a/docs/installation-media.rst b/docs/installation-media.rst
> index e109f2ba..ce293e4c 100644
> --- a/docs/installation-media.rst
> +++ b/docs/installation-media.rst
> @@ -1,44 +1,53 @@
> -.. _installation_media:
> +.. _installation_medium:
>
> -Installation Media
> -------------------
> +Installation Medium
> +-------------------
>
> -Prepare Installation Media
> -~~~~~~~~~~~~~~~~~~~~~~~~~~
> +Proxmox Backup Server can be installed via
> +:ref:`different methods `. The recommended method is the
> +usage of a installation medium, to simply boot the interactive

of *an* installation medium

> +installer.
> +
> +
> +Prepare Installation Medium
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~
>
>  Download the installer ISO image from |DOWNLOADS|.
>
> -The `Proxmox Backup`_ Server installation media is a hybrid ISO image. It works
> -in two ways:
> +The Proxmox Backup Server installation medium is a hybrid ISO image.
> +It works in two ways:
>
> -- An ISO image file ready to burn to a CD or DVD.
> +- An ISO image file ready to burn to a DVD.
>
>  - A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
>
> -Using a USB flash drive to install `Proxmox Backup`_ Server is the recommended
> -way since it is the faster option.
> +Using a USB flash drive to install Proxmox Backup Server is the
> +recommended way since it is the faster and more frequently available
> +option these days.
>
>  Prepare a USB Flash Drive as Installation Medium
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>
> -The flash drive needs to have at least 1 GB of storage available.
> +The flash drive needs to have at least 2 GB of storage space.
>
>  .. note::
>
> -   Do not use *UNetbootin*. It does not work with the `Proxmox Backup`_ Server
> -   installation image.
> +   Do not use *UNetbootin*. It does not work with the Proxmox Backup
> +   Server installation image.
>
>  .. important::
>
> -   Make sure that the USB flash drive is not mounted and does not
> -   contain any important data.
> +   Existing data on the USB flash drive will be overwritten. Therefore

comma missing after `Therefore`

> +   make sure that it does not contain any still needed data and
> +   unmount it afterwards again before proceeding.
>
>  Instructions for GNU/Linux
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~
>
> -On Unix-like operating system use the ``dd`` command to copy the ISO
> +On Unix-like operating systems use the ``dd`` command to copy the ISO
>  image to the USB flash drive. First find the correct device name of the
> -USB flash drive (see below). Then run the ``dd`` command.
> +USB flash drive (see below). Then run the ``dd`` command when logged
> +in as root user, or use ``sudo dd `` otherwise.

sudo isn't available on all Unix-like operating system, it isn't on
proxmox backup server, for example.

if you want to keep this, maybe a more general "Depending on your
environment, you will need to have root privileges to execute ``dd``."
would be better.

>
>  .. code-block:: console
>
> @@ -132,16 +141,17 @@ Using Rufus
>
>  Rufus is a more lightweight alternative, but you need to use the **DD
>  mode** to make it work. Download Rufus from https://rufus.ie/. Either
> -install it or use
> +install it or use the portable version. Select the destination drive
> +and the downloaded Proxmox ISO file.
>
>  .. important::
>
>     Once you click *Start*, you have to click *No* on the dialog asking to
>     download a different version of Grub. In the next dialog select **DD mode**.
>
> -Boot your Server from the USB Flash Drive
> -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> +Use the Installation Medium
> +~~~~~~~~~~~~~~~~~~~~~~~~~~~
>
> -Connect the USB flash drive to your server and make sure that booting from USB
> -is enabled (check your servers firmware settings). Then follow the steps of the
> -:ref:`installation wizard `.
> +Insert the created USB flash drive (or DVD) into your server. Continue
> +by reading the :ref:`installer ` chapter, which
> +also describes possible boot issues.
> diff --git a/docs/installation.rst b/docs/installation.rst
> index 79cba840..2e93713a 100644
> --- a/docs/installation.rst
> +++ b/docs/installation.rst
> @@ -9,7 +9,7 @@ Debian_ from the provided package repository.
>
>  .. include:: installation-media.rst
>
> -.. include:: package-repositories.rst
> +.. _install_pbs:
>
>  Server Installation
>  -------------------
> @@ -20,24 +20,34 @@ for various management tasks such as disk management.
>  .. note:: You always need a backup server. It is not possible to use
>     Proxmox Backup without the server part.
>
> -The disk image (ISO file) provided by Proxmox includes a complete Debian system
> -as well as all necessary packages for the Proxmox Backup Server.
> +Using our provided disk image (ISO file) is the recommended
> +installation method, as it includes a convenient installer, a complete
> +Debian system as well as all necessary packages for the Proxmox Backup
> +Server.
> +
> +Once you created a :ref:`installation_medium`, the booted
> +:ref:`installer ` will guide you through the
> +setup process. Thereby you partition the local disks, apply basic

imo there is an "a" missing here:

Thereby you partition the local disks, apply a basic...

> +system configuration like the timezone, language and network, and

imo settings after network would make this sound better, like this it
feels like something is missing. you don't really "apply the network",
you "apply network settings". or you do "apply a basic configuration for
the timezone, language and network".

> +finally install all required packages within minutes.
>
> -The installer will guide you through the setup process and allow
> -you to partition the local disk(s), apply basic system configuration
> -(for example timezone, language, network), and install all required packages.
> -The provided ISO will get you started in just a few minutes, and is the
> -recommended method for new and existing users.
> +As an alternative to the interactive installer, advanced users may
> +wish to install Proxmox Backup Server
> +:ref:`unattended `.
>
> -Alternatively, Proxmox Backup Server can be installed on top of an
> -existing Debian system. This option is only recommended for advanced users
> -because detailed knowledge about Proxmox Backup Server is required.
> +With sufficient Debian knowledge, you can also install Proxmox Backup
> +Server :ref:`on top of Debian ` yourself.
> +
> +While not recommended, Proxmox Backup Server could also be installed
> +:ref:`on Proxmox VE `.
>
>  .. include:: using-the-installer.rst
>
> +.. _install_pbs_unattended:
> +
>  Install `Proxmox Backup`_ Server Unattended
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> -It is possible to install `Proxmox Backup`_ Server automatically in an
> +It is possible to install Proxmox Backup Server automatically in an
>  unattended manner. This enables you to fully automate the setup process on
>  bare-metal. Once the installation is complete and the host has booted up,
>  automation tools like Ansible can be used to further configure the installation.
> @@ -51,6 +61,7 @@ installation ISO.  For more details and information on the unattended
>  installation see `our wiki
>  `_.
>
> +.. _install_pbs_on_debian:
>
>  Install `Proxmox Backup`_ Server on Debian
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
> @@ -88,6 +99,8 @@ support, and a set of common and useful packages.
>     your web browser, using HTTPS on port 8007. For example at
>     ``https://:8007``
>
> +.. _install_pbs_on_pve:
> +
>  Install Proxmox Backup Server on `Proxmox VE`_
>  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>
> @@ -108,6 +121,8 @@ After configuring the
>     your web browser, using HTTPS on port 8007. For example at
>     ``https://:8007``
>
> +.. _install_pbc:
> +
>  Client Installation
>  -------------------
>
> @@ -123,7 +138,7 @@ you need to run:
>    # apt update
>    # apt install proxmox-backup-client
>
> -
>  .. note:: The client-only repository should be usable by most recent Debian and
>     Ubuntu derivatives.
>
> +.. include:: package-repositories.rst
> diff --git a/docs/package-repositories.rst b/docs/package-repositories.rst
> index b429b4b4..aecd6c64 100644
> --- a/docs/package-repositories.rst
> +++ b/docs/package-repositories.rst
> @@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
>  .. _package_repositories_client_only_apt:
>
>  APT-based Proxmox Backup Client Repository
> -++++++++++++++++++++++++++++++++++++++++++
> +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>
>  For modern Linux distributions using `apt` as package manager, like all Debian
>  and Ubuntu Derivative do, you may be able to use the APT-based repository.
> diff --git a/docs/using-the-installer.rst b/docs/using-the-installer.rst
> index cec640c5..85d7c75b 100644
> --- a/docs/using-the-installer.rst
> +++ b/docs/using-the-installer.rst
> @@ -17,26 +17,27 @@ It includes the following:
>
>  * Web-based management interface
>
> -.. note:: All existing data on the selected drives will be removed during the
> -   installation process. The installer does not add boot menu entries for other
> -   operating systems.
> +.. note:: Any existing data on the selected drives will be overwritten
> +   during the installation process. The installer does not add boot
> +   menu entries for other operating systems.
>
> -Please insert the :ref:`installation_media` (for example, USB flash drive or
> -CD-ROM) and boot from it.
> +Please insert the :ref:`installation_medium` (for example, USB flash
> +drive or DVD) and boot from it.
>
> -.. note:: Make sure that booting from the installation medium (for example, USB)
> -   is enabled in your server's firmware settings. Secure boot needs to be
> -   disabled when booting an installer prior to `Proxmox Backup`_ Server version
> -   3.1.
> +.. note:: You may need to go into your server's firmware settings, to
> +   enable booting from your installation medium (for example, USB) and
> +   set the desired boot order. When booting an installer prior to
> +   `Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
> +   disabled.
>
>  .. image:: images/screenshots/pbs-installer-grub-menu.png
>    :target: _images/pbs-installer-grub-menu.png
>    :align: right
>    :alt: Proxmox Backup Server Installer GRUB Menu
>
> -After choosing the correct entry (for example, *Boot from USB*) the `Proxmox
> -Backup`_ Server menu will be displayed, and one of the following options can be
> -selected:
> +After choosing the correct entry (for example, *Boot from USB*) the
> +Proxmox Backup Server menu will be displayed, and one of the following
> +options can be selected:
>
>  **Install Proxmox Backup Server (Graphical)**
>




From f.ebner at proxmox.com  Tue Nov 26 17:23:48 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:23:48 +0100
Subject: [pbs-devel] [PATCH widget-toolkit 1/1] form: display-edit:
 support emptyText
In-Reply-To: <0d6fba4f-8f22-4813-9657-8148cdfb125d@proxmox.com>
References: <20241126151300.71000-1-f.ebner@proxmox.com>
 <20241126151300.71000-2-f.ebner@proxmox.com>
 <0d6fba4f-8f22-4813-9657-8148cdfb125d@proxmox.com>
Message-ID: 

Am 26.11.24 um 16:28 schrieb Thomas Lamprecht:
> Am 26.11.24 um 16:12 schrieb Fiona Ebner:
>> @@ -41,6 +42,19 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
>>  	vm.get('value');
>>      },
>>  
>> +    setEmptyText: function(emptyText) {
>> +	let me = this;
>> +	let vm = me.getViewModel();
>> +
>> +	me.emptyText = emptyText;
>> +	vm.set('emptyText', emptyText);
> 
> did you try to skip this and just directly call the setEmptyText from the
> edit field?
> 
>> +    },
>> +    getEmptyText: function() {
>> +	let me = this;
>> +	let vm = me.getViewModel();
>> +	return vm.get('emptyText');
> 
> same here but with getEmptyText from the underlying editField?
> 
> I mean, it can be fine as is, but if we can skip tracking this twice (here and
> on editField level) it would IMO be a bit more robust.

As also quickly discussed off-list, the slightly hairy bit is getting to
the edit item. I opted for tracking the xtype of the field in v2:
https://lore.proxmox.com/pbs-devel/20241126162005.85583-1-f.ebner at proxmox.com/T/



From f.ebner at proxmox.com  Tue Nov 26 17:25:15 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:25:15 +0100
Subject: [pbs-devel] [PATCH v2 widget-toolkit] form: display-edit:
 support emptyText
In-Reply-To: <20241126162005.85583-2-f.ebner@proxmox.com>
References: <20241126162005.85583-1-f.ebner@proxmox.com>
 <20241126162005.85583-2-f.ebner@proxmox.com>
Message-ID: <33c92137-b2cb-4e91-820d-bd3d96756662@proxmox.com>



Am 26.11.24 um 17:20 schrieb Fiona Ebner:
> To access the edit field, its xtype is now tracked.
> 
> First user is intended to be the path field for datastores where the
> emptyText should dynamically be for a relative or absolute path.
> 
> Signed-off-by: Fiona Ebner 
> ---
> 
> Changes in v2:
> * avoid tracking emptyText state twice, pass directly to edit field.
> 
>  src/form/DisplayEdit.js | 14 ++++++++++++++
>  1 file changed, 14 insertions(+)
> 
> diff --git a/src/form/DisplayEdit.js b/src/form/DisplayEdit.js
> index fe1b83b..01d6a05 100644
> --- a/src/form/DisplayEdit.js
> +++ b/src/form/DisplayEdit.js
> @@ -12,6 +12,9 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
>  
>      displayType: 'displayfield',
>  
> +    // internal only, use editConfig to set the xtype
> +    _editType: 'textfield',
> +
>      editConfig: {},
>      editable: false,
>      setEditable: function(editable) {
> @@ -41,6 +44,15 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
>  	vm.get('value');
>      },
>  
> +    setEmptyText: function(emptyText) {
> +	let me = this;
> +	me.down(me._editType).setEmptyText(emptyText);
> +    },
> +    getEmptyText: function() {
> +	let me = this;
> +	return me.down(me._editType).getEmptyText();
> +    },
> +
>      layout: 'fit',
>      defaults: {
>  	hideLabel: true,
> @@ -64,6 +76,8 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
>  	});
>  	Ext.applyIf(editConfig, displayConfig);
>  
> +	me.__editType = editConfig.xtype;

Sorry, there is a typo here, __editType instead of _editType

> +
>  	if (me.initialConfig && me.initialConfig.displayConfig) {
>  	    Ext.applyIf(displayConfig, me.initialConfig.displayConfig);
>  	    delete displayConfig.displayConfig;




From t.lamprecht at proxmox.com  Tue Nov 26 17:25:42 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Tue, 26 Nov 2024 17:25:42 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] client: backup writer:
 fix regression in progress output
In-Reply-To: <20241126161452.404685-1-c.ebner@proxmox.com>
References: <20241126161452.404685-1-c.ebner@proxmox.com>
Message-ID: <6e820c96-0022-4440-b0b5-98f337a6a3a2@proxmox.com>

Am 26.11.24 um 17:14 schrieb Christian Ebner:
> Fixes a regression introduced when switching from the plain string
> to be used for archive names to the BackupArchiveName api type in
> commit addfae26 ("api types: introduce `BackupArchiveName` type").
> 
> The archive name now always is stored including the server archive
> name extension. Adapt the check for which archive types to display
> the progress log output to reflect this change.
> 
> Fixes: addfae26 ("api types: introduce `BackupArchiveName` type")
> Reported-by: Max Carrara 
> Signed-off-by: Christian Ebner 
> ---
>  pbs-client/src/backup_writer.rs | 6 +++---
>  1 file changed, 3 insertions(+), 3 deletions(-)
> 
>

applied, thanks!



From f.ebner at proxmox.com  Tue Nov 26 17:29:14 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:29:14 +0100
Subject: [pbs-devel] [PATCH v3 proxmox-widget-toolkit/proxmox-backup 0/2]
 ui: slightly improve removable datastore dialog
Message-ID: <20241126162916.87958-1-f.ebner@proxmox.com>

Changes in v3:
* fix typo in variable name

Changes in v2:
* avoid tracking emptyText state twice

proxmox-widget-toolkit:

Fiona Ebner (1):
  form: display-edit: support emptyText

 src/form/DisplayEdit.js | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

proxmox-backup:

Fiona Ebner (1):
  ui: datastore edit: fix emptytext for path field

 www/window/DataStoreEdit.js | 2 ++
 1 file changed, 2 insertions(+)

-- 
2.39.5




From f.ebner at proxmox.com  Tue Nov 26 17:29:16 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:29:16 +0100
Subject: [pbs-devel] [PATCH v3 proxmox-backup 2/2] ui: datastore edit: fix
 emptytext for path field
In-Reply-To: <20241126162916.87958-1-f.ebner@proxmox.com>
References: <20241126162916.87958-1-f.ebner@proxmox.com>
Message-ID: <20241126162916.87958-3-f.ebner@proxmox.com>

It is a relative path for removable datastores.

Signed-off-by: Fiona Ebner 
---

No changes in v3.
No changes in v2.

 www/window/DataStoreEdit.js | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/www/window/DataStoreEdit.js b/www/window/DataStoreEdit.js
index 40ccd20a..4a0b8d81 100644
--- a/www/window/DataStoreEdit.js
+++ b/www/window/DataStoreEdit.js
@@ -114,8 +114,10 @@ Ext.define('PBS.DataStoreEdit', {
 				uuidEditField.setValue('');
 				if (isRemovable) {
 				    pathField.setFieldLabel(gettext('Path on Device'));
+				    pathField.setEmptyText(gettext('A relative path'));
 				} else {
 				    pathField.setFieldLabel(gettext('Backing Path'));
+				    pathField.setEmptyText(gettext('An absolute path'));
 				}
 			    },
 			},
-- 
2.39.5




From f.ebner at proxmox.com  Tue Nov 26 17:29:15 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Tue, 26 Nov 2024 17:29:15 +0100
Subject: [pbs-devel] [PATCH v3 widget-toolkit] form: display-edit: support
 emptyText
In-Reply-To: <20241126162916.87958-1-f.ebner@proxmox.com>
References: <20241126162916.87958-1-f.ebner@proxmox.com>
Message-ID: <20241126162916.87958-2-f.ebner@proxmox.com>

To access the edit field, its xtype is now tracked.

First user is intended to be the path field for datastores where the
emptyText should dynamically be for a relative or absolute path.

Signed-off-by: Fiona Ebner 
---

Changes in v3:
* fix typo in variable name

Changes in v2:
* avoid tracking emptyText state twice, pass directly to edit field.

 src/form/DisplayEdit.js | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/src/form/DisplayEdit.js b/src/form/DisplayEdit.js
index fe1b83b..01d6a05 100644
--- a/src/form/DisplayEdit.js
+++ b/src/form/DisplayEdit.js
@@ -12,6 +12,9 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 
     displayType: 'displayfield',
 
+    // internal only, use editConfig to set the xtype
+    _editType: 'textfield',
+
     editConfig: {},
     editable: false,
     setEditable: function(editable) {
@@ -41,6 +44,15 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 	vm.get('value');
     },
 
+    setEmptyText: function(emptyText) {
+	let me = this;
+	me.down(me._editType).setEmptyText(emptyText);
+    },
+    getEmptyText: function() {
+	let me = this;
+	return me.down(me._editType).getEmptyText();
+    },
+
     layout: 'fit',
     defaults: {
 	hideLabel: true,
@@ -64,6 +76,8 @@ Ext.define('Proxmox.form.field.DisplayEdit', {
 	});
 	Ext.applyIf(editConfig, displayConfig);
 
+	me._editType = editConfig.xtype;
+
 	if (me.initialConfig && me.initialConfig.displayConfig) {
 	    Ext.applyIf(displayConfig, me.initialConfig.displayConfig);
 	    delete displayConfig.displayConfig;
-- 
2.39.5




From h.laimer at proxmox.com  Tue Nov 26 17:51:26 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Tue, 26 Nov 2024 17:51:26 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api: directory: use relative
 path when creating removable datastore
Message-ID: <20241126165126.167497-1-h.laimer@proxmox.com>

Reported-by: Markus Frank 
Fixes: 94a068e31 ("api: node: allow creation of removable datastore through directory endpoint")
Signed-off-by: Hannes Laimer 
---
 src/api2/node/disks/directory.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 2f7cc7a27..6a76dd5a7 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -237,7 +237,7 @@ pub fn create_datastore_disk(
                 let lock = pbs_config::datastore::lock_config()?;
                 let datastore: DataStoreConfig = if removable_datastore {
                     serde_json::from_value(
-                        json!({ "name": name, "path": format!("/{name}"), "backing-device": uuid }),
+                        json!({ "name": name, "path": name, "backing-device": uuid }),
                     )?
                 } else {
                     serde_json::from_value(json!({ "name": name, "path": mount_point }))?
-- 
2.39.5




From f.gruenbichler at proxmox.com  Wed Nov 27 09:26:04 2024
From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=)
Date: Wed, 27 Nov 2024 09:26:04 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] pull: properly skip missing
 snapshots
Message-ID: <20241127082604.89880-1-f.gruenbichler@proxmox.com>

when loading the verification state for a local snapshot, it must first be
ensured that it actually exists, else the lack of manifest will be interpreted
as corrupt snapshot triggering a "resync" that is actually a sync of all
missing snapshots.

Fixes: 0974ddfa17be018f777d6ece90a71bfa8fc130d8 "fix #3786: api: add resync-corrupt option to sync jobs"

Signed-off-by: Fabian Gr?nbichler 
---
 src/server/pull.rs | 26 ++++++++++++++------------
 1 file changed, 14 insertions(+), 12 deletions(-)

diff --git a/src/server/pull.rs b/src/server/pull.rs
index 9abb673ae..361ed0687 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -551,21 +551,23 @@ async fn pull_group(
                     .store
                     .backup_dir(target_ns.clone(), dir.clone());
                 if let Ok(local_dir) = local_dir {
-                    match local_dir.verify_state() {
-                        Ok(Some(state)) => {
-                            if state == VerifyState::Failed {
+                    if local_dir.full_path().exists() {
+                        match local_dir.verify_state() {
+                            Ok(Some(state)) => {
+                                if state == VerifyState::Failed {
+                                    return Some((dir, true));
+                                }
+                            }
+                            Ok(None) => {
+                                // The verify_state item was not found in the manifest, this means the
+                                // snapshot is new.
+                            }
+                            Err(_) => {
+                                // There was an error loading the manifest, probably better if we
+                                // resync.
                                 return Some((dir, true));
                             }
                         }
-                        Ok(None) => {
-                            // The verify_state item was not found in the manifest, this means the
-                            // snapshot is new.
-                        }
-                        Err(_) => {
-                            // There was an error loading the manifest, probably better if we
-                            // resync.
-                            return Some((dir, true));
-                        }
                     }
                 }
             }
-- 
2.39.5




From d.csapak at proxmox.com  Wed Nov 27 09:41:15 2024
From: d.csapak at proxmox.com (Dominik Csapak)
Date: Wed, 27 Nov 2024 09:41:15 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: prune keep input: actually
 clear value on clear trigger click
Message-ID: <20241127084115.489112-1-d.csapak@proxmox.com>

instead of resetting to the originalValue. This makes it behave like
other similar fields (e.g. the combogrid).

Reported-by: Fabian Gr?nbichler 
Signed-off-by: Dominik Csapak 
---
 www/datastore/Prune.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/datastore/Prune.js b/www/datastore/Prune.js
index 5752907e3..e0ff4f2dc 100644
--- a/www/datastore/Prune.js
+++ b/www/datastore/Prune.js
@@ -32,7 +32,7 @@ Ext.define('PBS.PruneKeepInput', {
 	    hidden: true,
 	    handler: function() {
 		this.triggers.clear.setVisible(false);
-		this.setValue(this.originalValue);
+		this.setValue("");
 	    },
 	},
     },
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 09:58:50 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 09:58:50 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api: directory: use relative
 path when creating removable datastore
In-Reply-To: <20241126165126.167497-1-h.laimer@proxmox.com>
References: <20241126165126.167497-1-h.laimer@proxmox.com>
Message-ID: 

Can you please add a comment message for this with some rationale, e.g.,
why the old value was assembled that way, if there was any reasoning back
then, simple mistakes naturally happen too to everybody, and/or why this
is now the correct way.

You can also write it as reply and I amend the patch or send a v2,
whatever you prefer.

Am 26.11.24 um 17:51 schrieb Hannes Laimer:
> Reported-by: Markus Frank 
> Fixes: 94a068e31 ("api: node: allow creation of removable datastore through directory endpoint")
> Signed-off-by: Hannes Laimer 
> ---
>  src/api2/node/disks/directory.rs | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
> index 2f7cc7a27..6a76dd5a7 100644
> --- a/src/api2/node/disks/directory.rs
> +++ b/src/api2/node/disks/directory.rs
> @@ -237,7 +237,7 @@ pub fn create_datastore_disk(
>                  let lock = pbs_config::datastore::lock_config()?;
>                  let datastore: DataStoreConfig = if removable_datastore {
>                      serde_json::from_value(
> -                        json!({ "name": name, "path": format!("/{name}"), "backing-device": uuid }),
> +                        json!({ "name": name, "path": name, "backing-device": uuid }),
>                      )?
>                  } else {
>                      serde_json::from_value(json!({ "name": name, "path": mount_point }))?




From h.laimer at proxmox.com  Wed Nov 27 10:03:27 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Wed, 27 Nov 2024 10:03:27 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api: directory: use relative
 path when creating removable datastore
In-Reply-To: 
References: <20241126165126.167497-1-h.laimer@proxmox.com>
 
Message-ID: <433bb804-e861-4201-a802-17c709ecaff1@proxmox.com>



On 11/27/24 09:58, Thomas Lamprecht wrote:
> Can you please add a comment message for this with some rationale, e.g.,
> why the old value was assembled that way, if there was any reasoning back
> then, simple mistakes naturally happen too to everybody, and/or why this
> is now the correct way.
> 
> You can also write it as reply and I amend the patch or send a v2,
> whatever you prefer.
> 

In an earlier version of this series the datastore path was absolute for 
removable datastores. This is a leftover I missed when changing that.

> Am 26.11.24 um 17:51 schrieb Hannes Laimer:
>> Reported-by: Markus Frank 
>> Fixes: 94a068e31 ("api: node: allow creation of removable datastore through directory endpoint")
>> Signed-off-by: Hannes Laimer 
>> ---
>>   src/api2/node/disks/directory.rs | 2 +-
>>   1 file changed, 1 insertion(+), 1 deletion(-)
>>
>> diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
>> index 2f7cc7a27..6a76dd5a7 100644
>> --- a/src/api2/node/disks/directory.rs
>> +++ b/src/api2/node/disks/directory.rs
>> @@ -237,7 +237,7 @@ pub fn create_datastore_disk(
>>                   let lock = pbs_config::datastore::lock_config()?;
>>                   let datastore: DataStoreConfig = if removable_datastore {
>>                       serde_json::from_value(
>> -                        json!({ "name": name, "path": format!("/{name}"), "backing-device": uuid }),
>> +                        json!({ "name": name, "path": name, "backing-device": uuid }),
>>                       )?
>>                   } else {
>>                       serde_json::from_value(json!({ "name": name, "path": mount_point }))?
> 




From d.csapak at proxmox.com  Wed Nov 27 10:05:00 2024
From: d.csapak at proxmox.com (Dominik Csapak)
Date: Wed, 27 Nov 2024 10:05:00 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] pull: properly skip missing
 snapshots
In-Reply-To: <20241127082604.89880-1-f.gruenbichler@proxmox.com>
References: <20241127082604.89880-1-f.gruenbichler@proxmox.com>
Message-ID: 

Code wise it looks good to me, and tested fine.
I am not sure though if there isn't a slight TOCTOU issue?
if the snapshot is deleted between the exist and verify_state check?
(not sure if there's a lock anyway here, couldn't tell from the surrounding code)

in that case we could maybe check err for ENOENT (if that's returned?) or returning
a custom Error type that includes that information

aside from that, consider this

Reviewed-by: Dominik Csapak 
Tested-by: Dominik Csapak 


On 11/27/24 09:26, Fabian Gr?nbichler wrote:
> when loading the verification state for a local snapshot, it must first be
> ensured that it actually exists, else the lack of manifest will be interpreted
> as corrupt snapshot triggering a "resync" that is actually a sync of all
> missing snapshots.
> 
> Fixes: 0974ddfa17be018f777d6ece90a71bfa8fc130d8 "fix #3786: api: add resync-corrupt option to sync jobs"
> 
> Signed-off-by: Fabian Gr?nbichler 
> ---
>   src/server/pull.rs | 26 ++++++++++++++------------
>   1 file changed, 14 insertions(+), 12 deletions(-)
> 
> diff --git a/src/server/pull.rs b/src/server/pull.rs
> index 9abb673ae..361ed0687 100644
> --- a/src/server/pull.rs
> +++ b/src/server/pull.rs
> @@ -551,21 +551,23 @@ async fn pull_group(
>                       .store
>                       .backup_dir(target_ns.clone(), dir.clone());
>                   if let Ok(local_dir) = local_dir {
> -                    match local_dir.verify_state() {
> -                        Ok(Some(state)) => {
> -                            if state == VerifyState::Failed {
> +                    if local_dir.full_path().exists() {
> +                        match local_dir.verify_state() {
> +                            Ok(Some(state)) => {
> +                                if state == VerifyState::Failed {
> +                                    return Some((dir, true));
> +                                }
> +                            }
> +                            Ok(None) => {
> +                                // The verify_state item was not found in the manifest, this means the
> +                                // snapshot is new.
> +                            }
> +                            Err(_) => {
> +                                // There was an error loading the manifest, probably better if we
> +                                // resync.
>                                   return Some((dir, true));
>                               }
>                           }
> -                        Ok(None) => {
> -                            // The verify_state item was not found in the manifest, this means the
> -                            // snapshot is new.
> -                        }
> -                        Err(_) => {
> -                            // There was an error loading the manifest, probably better if we
> -                            // resync.
> -                            return Some((dir, true));
> -                        }
>                       }
>                   }
>               }




From t.lamprecht at proxmox.com  Wed Nov 27 10:05:21 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 10:05:21 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] pull: properly skip
 missing snapshots
In-Reply-To: <20241127082604.89880-1-f.gruenbichler@proxmox.com>
References: <20241127082604.89880-1-f.gruenbichler@proxmox.com>
Message-ID: <4960dd8d-3138-4b34-88cd-5b232e2288c7@proxmox.com>

Am 27.11.24 um 09:26 schrieb Fabian Gr?nbichler:
> when loading the verification state for a local snapshot, it must first be
> ensured that it actually exists, else the lack of manifest will be interpreted
> as corrupt snapshot triggering a "resync" that is actually a sync of all
> missing snapshots.
> 
> Fixes: 0974ddfa17be018f777d6ece90a71bfa8fc130d8 "fix #3786: api: add resync-corrupt option to sync jobs"
> 
> Signed-off-by: Fabian Gr?nbichler 
> ---
>  src/server/pull.rs | 26 ++++++++++++++------------
>  1 file changed, 14 insertions(+), 12 deletions(-)
> 
>

applied, with some subjective rewording/addition to commit message, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 10:06:33 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 10:06:33 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] api: directory: use
 relative path when creating removable datastore
In-Reply-To: <20241126165126.167497-1-h.laimer@proxmox.com>
References: <20241126165126.167497-1-h.laimer@proxmox.com>
Message-ID: 

Am 26.11.24 um 17:51 schrieb Hannes Laimer:
> Reported-by: Markus Frank 
> Fixes: 94a068e31 ("api: node: allow creation of removable datastore through directory endpoint")
> Signed-off-by: Hannes Laimer 
> ---
>  src/api2/node/disks/directory.rs | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, with your commit message (slightly adapted) fleeced in, thanks!



From f.gruenbichler at proxmox.com  Wed Nov 27 10:17:43 2024
From: f.gruenbichler at proxmox.com (Fabian =?iso-8859-1?q?Gr=FCnbichler?=)
Date: Wed, 27 Nov 2024 10:17:43 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] pull: properly skip missing
 snapshots
In-Reply-To: 
References: <20241127082604.89880-1-f.gruenbichler@proxmox.com>
 
Message-ID: <1732698965.5ul13djmpt.astroid@yuna.none>

On November 27, 2024 10:05 am, Dominik Csapak wrote:
> Code wise it looks good to me, and tested fine.
> I am not sure though if there isn't a slight TOCTOU issue?
> if the snapshot is deleted between the exist and verify_state check?
> (not sure if there's a lock anyway here, couldn't tell from the surrounding code)

there is no lock at that point, that would also make this much more
expensive (we need to do this for every existing snapshot after all).

in general, sync should handle snapshots disappearing gracefully, and
the race window here is tiny.

handling ENOENT might be nice as additional safeguard, not sure if we
properly bubble that up atm though..

> in that case we could maybe check err for ENOENT (if that's returned?) or returning
> a custom Error type that includes that information
> 
> aside from that, consider this
> 
> Reviewed-by: Dominik Csapak 
> Tested-by: Dominik Csapak 
> 
> 
> On 11/27/24 09:26, Fabian Gr?nbichler wrote:
>> when loading the verification state for a local snapshot, it must first be
>> ensured that it actually exists, else the lack of manifest will be interpreted
>> as corrupt snapshot triggering a "resync" that is actually a sync of all
>> missing snapshots.
>> 
>> Fixes: 0974ddfa17be018f777d6ece90a71bfa8fc130d8 "fix #3786: api: add resync-corrupt option to sync jobs"
>> 
>> Signed-off-by: Fabian Gr?nbichler 
>> ---
>>   src/server/pull.rs | 26 ++++++++++++++------------
>>   1 file changed, 14 insertions(+), 12 deletions(-)
>> 
>> diff --git a/src/server/pull.rs b/src/server/pull.rs
>> index 9abb673ae..361ed0687 100644
>> --- a/src/server/pull.rs
>> +++ b/src/server/pull.rs
>> @@ -551,21 +551,23 @@ async fn pull_group(
>>                       .store
>>                       .backup_dir(target_ns.clone(), dir.clone());
>>                   if let Ok(local_dir) = local_dir {
>> -                    match local_dir.verify_state() {
>> -                        Ok(Some(state)) => {
>> -                            if state == VerifyState::Failed {
>> +                    if local_dir.full_path().exists() {
>> +                        match local_dir.verify_state() {
>> +                            Ok(Some(state)) => {
>> +                                if state == VerifyState::Failed {
>> +                                    return Some((dir, true));
>> +                                }
>> +                            }
>> +                            Ok(None) => {
>> +                                // The verify_state item was not found in the manifest, this means the
>> +                                // snapshot is new.
>> +                            }
>> +                            Err(_) => {
>> +                                // There was an error loading the manifest, probably better if we
>> +                                // resync.
>>                                   return Some((dir, true));
>>                               }
>>                           }
>> -                        Ok(None) => {
>> -                            // The verify_state item was not found in the manifest, this means the
>> -                            // snapshot is new.
>> -                        }
>> -                        Err(_) => {
>> -                            // There was an error loading the manifest, probably better if we
>> -                            // resync.
>> -                            return Some((dir, true));
>> -                        }
>>                       }
>>                   }
>>               }
> 
> 
> 



From c.ebner at proxmox.com  Wed Nov 27 10:26:53 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Wed, 27 Nov 2024 10:26:53 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] sync: push: pass full error
 context when returning error to job
Message-ID: <20241127092653.92852-1-c.ebner@proxmox.com>

Show the full error context when fetching the remote target
namespaces fails. As logging of the error is handled by the calling
sync job, reformat the error to include the error context before
returning.

Instead of the error
```
TASK ERROR: Fetching remote namespaces failed, remote returned error
```

the user is now presented with an error like
```
TASK ERROR: Fetching remote namespaces failed, remote returned error: datastore 'removable1' is not mounted
```

Signed-off-by: Christian Ebner 
---
 src/server/push.rs | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/src/server/push.rs b/src/server/push.rs
index 99757a3cc..74bc29e7e 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -3,7 +3,7 @@
 use std::collections::HashSet;
 use std::sync::{Arc, Mutex};
 
-use anyhow::{bail, Context, Error};
+use anyhow::{bail, format_err, Context, Error};
 use futures::stream::{self, StreamExt, TryStreamExt};
 use tokio::sync::mpsc;
 use tokio_stream::wrappers::ReceiverStream;
@@ -379,7 +379,9 @@ pub(crate) async fn push_store(mut params: PushParameters) -> Result

We don't use the abbreviation anywhere else in our UI or docs.
To avoid any confusion about this (loaded) abbreviation, this
commits replaces it with the full word "Namespace".
There is more than enough space in the top bar for the larger button
size, even on low resolution screens (checked on 1280x700).

Signed-off-by: Lukas Wagner 
---
 www/datastore/Content.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/datastore/Content.js b/www/datastore/Content.js
index e11b14b5..fffd8c16 100644
--- a/www/datastore/Content.js
+++ b/www/datastore/Content.js
@@ -1257,7 +1257,7 @@ Ext.define('PBS.DataStoreContent', {
 	},
 	{
 	    xtype: 'proxmoxButton',
-	    text: gettext('Add NS'),
+	    text: gettext('Add Namespace'),
 	    iconCls: 'fa fa-plus-square',
 	    handler: 'addNS',
 	},
-- 
2.39.5




From h.laimer at proxmox.com  Wed Nov 27 11:18:40 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Wed, 27 Nov 2024 11:18:40 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api: admin: add Datastore.Modify
 permission for mount
Message-ID: <20241127101840.43370-1-h.laimer@proxmox.com>

So the mount and unmount endpoint have matching permissions.

Signed-off-by: Hannes Laimer 
---
 src/api2/admin/datastore.rs | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index cae7eb89c..d27c00f2b 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -2513,7 +2513,7 @@ pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
     },
     access: {
         permission: &Permission::And(&[
-            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
+            &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_MODIFY, false),
             &Permission::Privilege(&["system", "disks"], PRIV_SYS_MODIFY, false)
         ]),
     },
-- 
2.39.5




From g.goller at proxmox.com  Wed Nov 27 11:27:37 2024
From: g.goller at proxmox.com (Gabriel Goller)
Date: Wed, 27 Nov 2024 11:27:37 +0100
Subject: [pbs-devel] [PATCH] textareafield: add emptyText message to show
 markdown is possible
Message-ID: <20241127102737.81854-1-g.goller@proxmox.com>

Just like in our `Notes` fields show a emptyText message that explains
that markdown can be used.

Reported-by: Lukas Wagner 
Signed-off-by: Gabriel Goller 
---
 src/form/TextAreaField.js | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/form/TextAreaField.js b/src/form/TextAreaField.js
index 267b40c87d74..ed6015a9ec51 100644
--- a/src/form/TextAreaField.js
+++ b/src/form/TextAreaField.js
@@ -10,6 +10,7 @@ Ext.define('Proxmox.form.field.Base64TextArea', {
         width: 600,
         height: 400,
         scrollable: 'y',
+        emptyText: 'You can use Markdown for richt text formatting.',
     },
 
     setValue: function(value) {
-- 
2.39.5




From l.wagner at proxmox.com  Wed Nov 27 11:29:30 2024
From: l.wagner at proxmox.com (Lukas Wagner)
Date: Wed, 27 Nov 2024 11:29:30 +0100
Subject: [pbs-devel] [PATCH] textareafield: add emptyText message to
 show markdown is possible
In-Reply-To: <20241127102737.81854-1-g.goller@proxmox.com>
References: <20241127102737.81854-1-g.goller@proxmox.com>
Message-ID: <123dbdc2-192c-4565-af00-927a86c1ffd9@proxmox.com>

On  2024-11-27 11:27, Gabriel Goller wrote:
> Just like in our `Notes` fields show a emptyText message that explains
> that markdown can be used.
> 
> Reported-by: Lukas Wagner 
> Signed-off-by: Gabriel Goller 
> ---
>  src/form/TextAreaField.js | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/src/form/TextAreaField.js b/src/form/TextAreaField.js
> index 267b40c87d74..ed6015a9ec51 100644
> --- a/src/form/TextAreaField.js
> +++ b/src/form/TextAreaField.js
> @@ -10,6 +10,7 @@ Ext.define('Proxmox.form.field.Base64TextArea', {
>          width: 600,
>          height: 400,
>          scrollable: 'y',
> +        emptyText: 'You can use Markdown for richt text formatting.',

This should use gettext :)
Also there is a typo in 'rich' :D

-- 
- Lukas



From a.lauterer at proxmox.com  Wed Nov 27 11:42:35 2024
From: a.lauterer at proxmox.com (Aaron Lauterer)
Date: Wed, 27 Nov 2024 11:42:35 +0100
Subject: [pbs-devel] [PATCH] api: removable datastore: downgrade device
 already mounted error to info
Message-ID: <20241127104235.154875-1-a.lauterer@proxmox.com>

pbs-datastore::datastore::is_datastore_mounted_at() verifies that the
mounted file system has the expected UUID. Therefore we don't have to
error out if we try to mount an already mounted removable datastore.

Signed-off-by: Aaron Lauterer 
---
 src/api2/admin/datastore.rs | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index cae7eb89..3f794e83 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -2448,10 +2448,11 @@ fn setup_mounted_device(datastore: &DataStoreConfig, tmp_mount_path: &str) -> Re
 pub fn do_mount_device(datastore: DataStoreConfig) -> Result<(), Error> {
     if let Some(uuid) = datastore.backing_device.as_ref() {
         if pbs_datastore::get_datastore_mount_status(&datastore) == Some(true) {
-            bail!(
+            info!(
                 "device is already mounted at '{}'",
                 datastore.absolute_path()
             );
+            return Ok(());
         }
         let tmp_mount_path = format!(
             "{}/{:x}",
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 11:46:08 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 11:46:08 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] pull: properly skip missing
 snapshots
In-Reply-To: <1732698965.5ul13djmpt.astroid@yuna.none>
References: <20241127082604.89880-1-f.gruenbichler@proxmox.com>
 
 <1732698965.5ul13djmpt.astroid@yuna.none>
Message-ID: 

Am 27.11.24 um 10:17 schrieb Fabian Gr?nbichler:
> handling ENOENT might be nice as additional safeguard, not sure if we
> properly bubble that up atm though..

Yes, would be nice(r), but it indeed seems like the underlying
BackupDir::load_blob from pbs_datastore hides the actual error.
We should switch that over to use anyhow's context and probably drop the
try_block; not really useful here.



From t.lamprecht at proxmox.com  Wed Nov 27 11:49:41 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 11:49:41 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] api: admin: add
 Datastore.Modify permission for mount
In-Reply-To: <20241127101840.43370-1-h.laimer@proxmox.com>
References: <20241127101840.43370-1-h.laimer@proxmox.com>
Message-ID: <493bcd34-d5ad-4e53-a04e-71465c4acbc0@proxmox.com>

Am 27.11.24 um 11:18 schrieb Hannes Laimer:
> So the mount and unmount endpoint have matching permissions.
> 
> Signed-off-by: Hannes Laimer 
> ---
>  src/api2/admin/datastore.rs | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 11:50:38 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 11:50:38 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] ui: datastore content:
 change button text "Add NS" to "Add Namespace"
In-Reply-To: <20241127100245.86992-1-l.wagner@proxmox.com>
References: <20241127100245.86992-1-l.wagner@proxmox.com>
Message-ID: 

Am 27.11.24 um 11:02 schrieb Lukas Wagner:
> We don't use the abbreviation anywhere else in our UI or docs.
> To avoid any confusion about this (loaded) abbreviation, this
> commits replaces it with the full word "Namespace".
> There is more than enough space in the top bar for the larger button
> size, even on low resolution screens (checked on 1280x700).
> 
> Signed-off-by: Lukas Wagner 
> ---
>  www/datastore/Content.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, it improves UX, but I fail to see this being loaded in such a
context, thanks!



From a.zeidler at proxmox.com  Wed Nov 27 11:49:58 2024
From: a.zeidler at proxmox.com (Alexander Zeidler)
Date: Wed, 27 Nov 2024 11:49:58 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup] docs: installation: several
 small fixes/improvements
Message-ID: <20241127104958.3-1-a.zeidler@proxmox.com>

* consistently use "medium" (singular), as only one is needed for
  installation (installation-media.rst not renamed)
* add short introduction to recently added chapter "Installation Media"
* update minimum required flash drive storage space to 2 GB
* remove CD-ROM (too little storage space) but keep DVD
* mention explicitly that data get overwritten on installation media /
  installation target disks
* mention that using `dd` will require root privileges
* add accidentally cut off text when copying from PVE docs
* add reference labels to currently needed section titles
* reword some paragraphs for completeness and readability
* mention all installation methods in the intro of "Server Installation"
* add the boot order as possible boot issue
* remove recently added redundant product website hyperlinks (as earlier
  with commit 34407477e2)
* fix broken heading level of APT-based PBC repo

* slightly reorder sub-chapters of "Installation":

After adding the chapter "Installation Media" (d363818641), the chapter
order under "Installation" is:

1. System Requirements
2. Installation Media
3. Debian Package Repositories
4. Server Installation
5. Client Installation

But repos are more likely to be configured after installation, and for
other installation methods chapter links exist anyway. So to keep the
chapter order more logical, "Debian Package Repositories" is now moved
after "Client Installation".

Signed-off-by: Alexander Zeidler 
---
v2:
 * implement all suggestions from Shannon


 docs/installation-media.rst   | 57 +++++++++++++++++++++--------------
 docs/installation.rst         | 41 +++++++++++++++++--------
 docs/package-repositories.rst |  2 +-
 docs/using-the-installer.rst  | 25 +++++++--------
 4 files changed, 76 insertions(+), 49 deletions(-)

diff --git a/docs/installation-media.rst b/docs/installation-media.rst
index e109f2ba..dbcb2078 100644
--- a/docs/installation-media.rst
+++ b/docs/installation-media.rst
@@ -1,44 +1,54 @@
-.. _installation_media:
+.. _installation_medium:
 
-Installation Media
-------------------
+Installation Medium
+-------------------
 
-Prepare Installation Media
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+Proxmox Backup Server can be installed via
+:ref:`different methods `. The recommended method is the
+usage of an installation medium, to simply boot the interactive
+installer.
+
+
+Prepare Installation Medium
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Download the installer ISO image from |DOWNLOADS|.
 
-The `Proxmox Backup`_ Server installation media is a hybrid ISO image. It works
-in two ways:
+The Proxmox Backup Server installation medium is a hybrid ISO image.
+It works in two ways:
 
-- An ISO image file ready to burn to a CD or DVD.
+- An ISO image file ready to burn to a DVD.
 
 - A raw sector (IMG) image file ready to copy to a USB flash drive (USB stick).
 
-Using a USB flash drive to install `Proxmox Backup`_ Server is the recommended
-way since it is the faster option.
+Using a USB flash drive to install Proxmox Backup Server is the
+recommended way since it is the faster and more frequently available
+option these days.
 
 Prepare a USB Flash Drive as Installation Medium
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-The flash drive needs to have at least 1 GB of storage available.
+The flash drive needs to have at least 2 GB of storage space.
 
 .. note::
 
-   Do not use *UNetbootin*. It does not work with the `Proxmox Backup`_ Server
-   installation image.
+   Do not use *UNetbootin*. It does not work with the Proxmox Backup
+   Server installation image.
 
 .. important::
 
-   Make sure that the USB flash drive is not mounted and does not
-   contain any important data.
+   Existing data on the USB flash drive will be overwritten.
+   Therefore, make sure that it does not contain any still needed data
+   and unmount it afterwards again before proceeding.
 
 Instructions for GNU/Linux
 ~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-On Unix-like operating system use the ``dd`` command to copy the ISO
+On Unix-like operating systems use the ``dd`` command to copy the ISO
 image to the USB flash drive. First find the correct device name of the
-USB flash drive (see below). Then run the ``dd`` command.
+USB flash drive (see below). Then run the ``dd`` command. Depending on
+your environment, you will need to have root privileges to execute
+``dd``.
 
 .. code-block:: console
 
@@ -132,16 +142,17 @@ Using Rufus
 
 Rufus is a more lightweight alternative, but you need to use the **DD
 mode** to make it work. Download Rufus from https://rufus.ie/. Either
-install it or use
+install it or use the portable version. Select the destination drive
+and the downloaded Proxmox ISO file.
 
 .. important::
 
    Once you click *Start*, you have to click *No* on the dialog asking to
    download a different version of Grub. In the next dialog select **DD mode**.
 
-Boot your Server from the USB Flash Drive
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Use the Installation Medium
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Connect the USB flash drive to your server and make sure that booting from USB
-is enabled (check your servers firmware settings). Then follow the steps of the
-:ref:`installation wizard `.
+Insert the created USB flash drive (or DVD) into your server. Continue
+by reading the :ref:`installer ` chapter, which
+also describes possible boot issues.
diff --git a/docs/installation.rst b/docs/installation.rst
index 79cba840..dd09a1e9 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -9,7 +9,7 @@ Debian_ from the provided package repository.
 
 .. include:: installation-media.rst
 
-.. include:: package-repositories.rst
+.. _install_pbs:
 
 Server Installation
 -------------------
@@ -20,24 +20,34 @@ for various management tasks such as disk management.
 .. note:: You always need a backup server. It is not possible to use
    Proxmox Backup without the server part.
 
-The disk image (ISO file) provided by Proxmox includes a complete Debian system
-as well as all necessary packages for the Proxmox Backup Server.
+Using our provided disk image (ISO file) is the recommended
+installation method, as it includes a convenient installer, a complete
+Debian system as well as all necessary packages for the Proxmox Backup
+Server.
+
+Once you have created an :ref:`installation_medium`, the booted
+:ref:`installer ` will guide you through the
+setup process. It will help you to partition your disks, apply basic
+settings such as the language, time zone and network configuration,
+and finally install all required packages within minutes.
 
-The installer will guide you through the setup process and allow
-you to partition the local disk(s), apply basic system configuration
-(for example timezone, language, network), and install all required packages.
-The provided ISO will get you started in just a few minutes, and is the
-recommended method for new and existing users.
+As an alternative to the interactive installer, advanced users may
+wish to install Proxmox Backup Server
+:ref:`unattended `.
 
-Alternatively, Proxmox Backup Server can be installed on top of an
-existing Debian system. This option is only recommended for advanced users
-because detailed knowledge about Proxmox Backup Server is required.
+With sufficient Debian knowledge, you can also install Proxmox Backup
+Server :ref:`on top of Debian ` yourself.
+
+While not recommended, Proxmox Backup Server could also be installed
+:ref:`on Proxmox VE `.
 
 .. include:: using-the-installer.rst
 
+.. _install_pbs_unattended:
+
 Install `Proxmox Backup`_ Server Unattended
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-It is possible to install `Proxmox Backup`_ Server automatically in an
+It is possible to install Proxmox Backup Server automatically in an
 unattended manner. This enables you to fully automate the setup process on
 bare-metal. Once the installation is complete and the host has booted up,
 automation tools like Ansible can be used to further configure the installation.
@@ -51,6 +61,7 @@ installation ISO.  For more details and information on the unattended
 installation see `our wiki
 `_.
 
+.. _install_pbs_on_debian:
 
 Install `Proxmox Backup`_ Server on Debian
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -88,6 +99,8 @@ support, and a set of common and useful packages.
    your web browser, using HTTPS on port 8007. For example at
    ``https://:8007``
 
+.. _install_pbs_on_pve:
+
 Install Proxmox Backup Server on `Proxmox VE`_
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
@@ -108,6 +121,8 @@ After configuring the
    your web browser, using HTTPS on port 8007. For example at
    ``https://:8007``
 
+.. _install_pbc:
+
 Client Installation
 -------------------
 
@@ -123,7 +138,7 @@ you need to run:
   # apt update
   # apt install proxmox-backup-client
 
-
 .. note:: The client-only repository should be usable by most recent Debian and
    Ubuntu derivatives.
 
+.. include:: package-repositories.rst
diff --git a/docs/package-repositories.rst b/docs/package-repositories.rst
index b429b4b4..aecd6c64 100644
--- a/docs/package-repositories.rst
+++ b/docs/package-repositories.rst
@@ -149,7 +149,7 @@ Currently there's only a client-repository for APT based systems.
 .. _package_repositories_client_only_apt:
 
 APT-based Proxmox Backup Client Repository
-++++++++++++++++++++++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 For modern Linux distributions using `apt` as package manager, like all Debian
 and Ubuntu Derivative do, you may be able to use the APT-based repository.
diff --git a/docs/using-the-installer.rst b/docs/using-the-installer.rst
index cec640c5..85d7c75b 100644
--- a/docs/using-the-installer.rst
+++ b/docs/using-the-installer.rst
@@ -17,26 +17,27 @@ It includes the following:
 
 * Web-based management interface
 
-.. note:: All existing data on the selected drives will be removed during the
-   installation process. The installer does not add boot menu entries for other
-   operating systems.
+.. note:: Any existing data on the selected drives will be overwritten
+   during the installation process. The installer does not add boot
+   menu entries for other operating systems.
 
-Please insert the :ref:`installation_media` (for example, USB flash drive or
-CD-ROM) and boot from it.
+Please insert the :ref:`installation_medium` (for example, USB flash
+drive or DVD) and boot from it.
 
-.. note:: Make sure that booting from the installation medium (for example, USB)
-   is enabled in your server's firmware settings. Secure boot needs to be
-   disabled when booting an installer prior to `Proxmox Backup`_ Server version
-   3.1.
+.. note:: You may need to go into your server's firmware settings, to
+   enable booting from your installation medium (for example, USB) and
+   set the desired boot order. When booting an installer prior to
+   `Proxmox Backup`_ Server version 3.1, Secure Boot needs to be
+   disabled.
 
 .. image:: images/screenshots/pbs-installer-grub-menu.png
   :target: _images/pbs-installer-grub-menu.png
   :align: right
   :alt: Proxmox Backup Server Installer GRUB Menu
 
-After choosing the correct entry (for example, *Boot from USB*) the `Proxmox
-Backup`_ Server menu will be displayed, and one of the following options can be
-selected:
+After choosing the correct entry (for example, *Boot from USB*) the
+Proxmox Backup Server menu will be displayed, and one of the following
+options can be selected:
 
 **Install Proxmox Backup Server (Graphical)**
 
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 11:56:06 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 11:56:06 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] ui: prune keep input:
 actually clear value on clear trigger click
In-Reply-To: <20241127084115.489112-1-d.csapak@proxmox.com>
References: <20241127084115.489112-1-d.csapak@proxmox.com>
Message-ID: <5586efac-e5b4-451f-9aaf-b885fc2971d5@proxmox.com>

Am 27.11.24 um 09:41 schrieb Dominik Csapak:
> instead of resetting to the originalValue. This makes it behave like
> other similar fields (e.g. the combogrid).
> 
> Reported-by: Fabian Gr?nbichler 
> Signed-off-by: Dominik Csapak 
> ---
>  www/datastore/Prune.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 12:34:33 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 12:34:33 +0100
Subject: [pbs-devel] applied: [PATCH v3
 proxmox-widget-toolkit/proxmox-backup 0/2] ui: slightly improve removable
 datastore dialog
In-Reply-To: <20241126162916.87958-1-f.ebner@proxmox.com>
References: <20241126162916.87958-1-f.ebner@proxmox.com>
Message-ID: 

Am 26.11.24 um 17:29 schrieb Fiona Ebner:
> Changes in v3:
> * fix typo in variable name
> 
> Changes in v2:
> * avoid tracking emptyText state twice
> 
> proxmox-widget-toolkit:
> 
> Fiona Ebner (1):
>   form: display-edit: support emptyText
> 
>  src/form/DisplayEdit.js | 14 ++++++++++++++
>  1 file changed, 14 insertions(+)
> 
> proxmox-backup:
> 
> Fiona Ebner (1):
>   ui: datastore edit: fix emptytext for path field
> 
>  www/window/DataStoreEdit.js | 2 ++
>  1 file changed, 2 insertions(+)
> 


applied series, with the widget-toolkit side moved over to save a reference
on the edit- and display-fields on initialization, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 12:39:03 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 12:39:03 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] sync: push: pass full
 error context when returning error to job
In-Reply-To: <20241127092653.92852-1-c.ebner@proxmox.com>
References: <20241127092653.92852-1-c.ebner@proxmox.com>
Message-ID: <0c8e2611-388f-4fa8-b27e-ee27eebfec09@proxmox.com>

Am 27.11.24 um 10:26 schrieb Christian Ebner:
> Show the full error context when fetching the remote target
> namespaces fails. As logging of the error is handled by the calling
> sync job, reformat the error to include the error context before
> returning.
> 
> Instead of the error
> ```
> TASK ERROR: Fetching remote namespaces failed, remote returned error
> ```
> 
> the user is now presented with an error like
> ```
> TASK ERROR: Fetching remote namespaces failed, remote returned error: datastore 'removable1' is not mounted
> ```
> 
> Signed-off-by: Christian Ebner 
> ---
>  src/server/push.rs | 6 ++++--
>  1 file changed, 4 insertions(+), 2 deletions(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 12:43:45 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 12:43:45 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] docs: update copyright
 years
In-Reply-To: <20240927155812.1754476-1-c.heiss@proxmox.com>
References: <20240927155812.1754476-1-c.heiss@proxmox.com>
Message-ID: 

Am 27.09.24 um 17:58 schrieb Christoph Heiss:
> It's already 2024 for quite some time now.
> 
> Signed-off-by: Christoph Heiss 
> ---
>  docs/conf.py | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied (before it really gets outdated again ^^), thanks!



From c.ebner at proxmox.com  Wed Nov 27 12:44:05 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Wed, 27 Nov 2024 12:44:05 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/3] api: sync: restrict edit
 permissions for push sync jobs
Message-ID: <20241127114407.269907-1-c.ebner@proxmox.com>

Users require `Datastore.Audit` on the source datastore to read sync
jobs. Further restrict also the permissions to modify sync jobs in
push direction to include the `Datastore.Audit` permission on the
source, as otherwise a user is able to create or edit sync jobs in
push direction, but not able to see them.

Reported-by: Friedrich Weber 
Suggested-by: Fabian Gr?nbichler 
Signed-off-by: Christian Ebner 
---
 src/api2/config/sync.rs | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs
index bc012744a..eb97ef940 100644
--- a/src/api2/config/sync.rs
+++ b/src/api2/config/sync.rs
@@ -129,6 +129,11 @@ pub fn check_sync_job_modify_access(
             }
 
             let source_privs = user_info.lookup_privs(auth_id, &job.acl_path());
+            // only allow to modify jobs the user is also allowed to read
+            if source_privs & PRIV_DATASTORE_AUDIT == 0 {
+                return false;
+            }
+
             // check user is allowed to read from (local) source datastore/namespace, independent
             // of job ownership
             if source_privs & PRIV_DATASTORE_READ != 0 {
-- 
2.39.5




From c.ebner at proxmox.com  Wed Nov 27 12:44:06 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Wed, 27 Nov 2024 12:44:06 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/3] api: sync: include required
 permissions for push direction
In-Reply-To: <20241127114407.269907-1-c.ebner@proxmox.com>
References: <20241127114407.269907-1-c.ebner@proxmox.com>
Message-ID: <20241127114407.269907-2-c.ebner@proxmox.com>

Sync jobs in push and pull direction require a different set of
privileges for the various api methods provided. Update the
descriptitons to include the push direction and list them
accordingly.

Signed-off-by: Christian Ebner 
---
 src/api2/config/sync.rs | 31 ++++++++++++++++++++++++++-----
 1 file changed, 26 insertions(+), 5 deletions(-)

diff --git a/src/api2/config/sync.rs b/src/api2/config/sync.rs
index eb97ef940..a8ea93465 100644
--- a/src/api2/config/sync.rs
+++ b/src/api2/config/sync.rs
@@ -167,7 +167,10 @@ pub fn check_sync_job_modify_access(
         items: { type: SyncJobConfig },
     },
     access: {
-        description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
+        description: "Limited to sync job entries where user has Datastore.Audit on target \
+datastore, and Remote.Audit on source remote for sync jobs in pull direction.\n\
+For push direction the user requires RemoteDatastore.Audit on the remote datastore and \
+Datastore.Audit on the local datastore.",
         permission: &Permission::Anybody,
     },
 )]
@@ -208,7 +211,12 @@ pub fn list_sync_jobs(
         },
     },
     access: {
-        description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
+        description: "For sync jobs in pull direction user needs Datastore.Backup on target \
+datastore, and Remote.Read on source remote. Additionally, remove_vanished requires \
+Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify.\n\
+For sync jobs in push direction, user needs RemoteDatastore.Backup on remote datastore, and \
+Datastore.Audit, Datastore.Read and Datastore.Modify on the source datastore, the latter not \
+required sync job owned by user. Additionally, remove vanished requires RemoteDatastore.Modify.",
         permission: &Permission::Anybody,
     },
 )]
@@ -269,7 +277,10 @@ pub fn create_sync_job(
     },
     returns: { type: SyncJobConfig },
     access: {
-        description: "Limited to sync job entries where user has Datastore.Audit on target datastore, and Remote.Audit on source remote.",
+        description: "Limited to sync job entries where user has Datastore.Audit on target \
+datastore, and Remote.Audit on source remote for sync jobs in pull direction.\n\
+For push direction the user requires RemoteDatastore.Audit on the remote datastore and \
+Datastore.Audit on the local datastore.",
         permission: &Permission::Anybody,
     },
 )]
@@ -355,7 +366,12 @@ pub enum DeletableProperty {
     },
     access: {
         permission: &Permission::Anybody,
-        description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
+        description: "For sync jobs in pull direction user needs Datastore.Backup on target \
+datastore, and Remote.Read on source remote. Additionally, remove_vanished requires \
+Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify.\n\
+For sync jobs in push direction, user needs RemoteDatastore.Backup on remote datastore, and \
+Datastore.Audit, Datastore.Read and Datastore.Modify on the source datastore, the latter not \
+required sync job owned by user. Additionally, remove vanished requires RemoteDatastore.Modify.",
     },
 )]
 /// Update sync job config.
@@ -545,7 +561,12 @@ pub fn update_sync_job(
     },
     access: {
         permission: &Permission::Anybody,
-        description: "User needs Datastore.Backup on target datastore, and Remote.Read on source remote. Additionally, remove_vanished requires Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify",
+        description: "For sync jobs in pull direction user needs Datastore.Backup on target \
+datastore, and Remote.Read on source remote. Additionally, remove_vanished requires \
+Datastore.Prune, and any owner other than the user themselves requires Datastore.Modify.\n\
+For sync jobs in push direction, user needs RemoteDatastore.Backup on remote datastore, and \
+Datastore.Audit, Datastore.Read and Datastore.Modify on the source datastore, the latter not \
+required sync job owned by user. Additionally, remove vanished requires RemoteDatastore.Modify.",
     },
 )]
 /// Remove a sync job configuration
-- 
2.39.5




From c.ebner at proxmox.com  Wed Nov 27 12:44:07 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Wed, 27 Nov 2024 12:44:07 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 3/3] docs: mention required
 source audit permission for push sync jobs
In-Reply-To: <20241127114407.269907-1-c.ebner@proxmox.com>
References: <20241127114407.269907-1-c.ebner@proxmox.com>
Message-ID: <20241127114407.269907-3-c.ebner@proxmox.com>

To be in line with the updated permission requirements, as
Datastore.Audit is now required to read and edit sync jobs in push
direction.

Signed-off-by: Christian Ebner 
---
 docs/managing-remotes.rst | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/docs/managing-remotes.rst b/docs/managing-remotes.rst
index 4a78a9310..f8012636e 100644
--- a/docs/managing-remotes.rst
+++ b/docs/managing-remotes.rst
@@ -256,9 +256,9 @@ The following permissions are required for a sync job in push direction:
 
 #. ``Remote.Audit`` on ``/remote/{remote}`` and ``Remote.DatastoreBackup`` on
    ``/remote/{remote}/{remote-store}/{remote-ns}`` path or subnamespace.
-#. At least ``Datastore.Read`` on the local source datastore namespace
-   (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if owner of the sync
-   job.
+#. At least ``Datastore.Read`` and ``Datastore.Audit`` on the local source
+   datastore namespace (``/datastore/{store}/{ns}``) or ``Datastore.Backup`` if
+   owner of the sync job.
 #. ``Remote.DatastorePrune`` on ``/remote/{remote}/{remote-store}/{remote-ns}``
    path to remove vanished snapshots and groups. Make sure to use a dedicated
    remote for each sync job in push direction as noted above.
-- 
2.39.5




From g.goller at proxmox.com  Wed Nov 27 12:46:50 2024
From: g.goller at proxmox.com (Gabriel Goller)
Date: Wed, 27 Nov 2024 12:46:50 +0100
Subject: [pbs-devel] [PATCH] log: ignore to_stdout parameter
Message-ID: <20241127114650.229625-1-g.goller@proxmox.com>

This parameter causes the FileLogger to duplicate the log output to
stdout. This causes duplicate output on proxmox-backup-manager because
this is now handled by tracing. This should be removed completely in the
future.
In the worst case this will only result in missing log lines on stdout
(which is visible only on proxmox-backup-manager/client invocations
anyway).

Signed-off-by: Gabriel Goller 
---
 proxmox-log/src/file_logger.rs | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

diff --git a/proxmox-log/src/file_logger.rs b/proxmox-log/src/file_logger.rs
index c3648976b690..ab3072e4d477 100644
--- a/proxmox-log/src/file_logger.rs
+++ b/proxmox-log/src/file_logger.rs
@@ -103,11 +103,12 @@ impl FileLogger {
     pub fn log>(&mut self, msg: S) {
         let msg = msg.as_ref();
 
-        if self.options.to_stdout {
-            let mut stdout = std::io::stdout();
-            let _ = stdout.write_all(msg.as_bytes());
-            let _ = stdout.write_all(b"\n");
-        }
+        // TODO: remove whole to_stdout option
+        //if self.options.to_stdout {
+        //    let mut stdout = std::io::stdout();
+        //    let _ = stdout.write_all(msg.as_bytes());
+        //    let _ = stdout.write_all(b"\n");
+        //}
 
         let line = if self.options.prefix_time {
             let now = proxmox_time::epoch_i64();
@@ -128,16 +129,18 @@ impl FileLogger {
 
 impl std::io::Write for FileLogger {
     fn write(&mut self, buf: &[u8]) -> Result {
-        if self.options.to_stdout {
-            let _ = std::io::stdout().write(buf);
-        }
+        // TODO: remove whole to_stdout option
+        //if self.options.to_stdout {
+        //    let _ = std::io::stdout().write(buf);
+        //}
         self.file.write(buf)
     }
 
     fn flush(&mut self) -> Result<(), std::io::Error> {
-        if self.options.to_stdout {
-            let _ = std::io::stdout().flush();
-        }
+        // TODO: remove whole to_stdout option
+        //if self.options.to_stdout {
+        //    let _ = std::io::stdout().flush();
+        //}
         self.file.flush()
     }
 }
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 12:50:48 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 12:50:48 +0100
Subject: [pbs-devel] applied: [PATCH] api: removable datastore: downgrade
 device already mounted error to info
In-Reply-To: <20241127104235.154875-1-a.lauterer@proxmox.com>
References: <20241127104235.154875-1-a.lauterer@proxmox.com>
Message-ID: <96119412-277a-4964-a722-3aadf4dd9c88@proxmox.com>

Am 27.11.24 um 11:42 schrieb Aaron Lauterer:
> pbs-datastore::datastore::is_datastore_mounted_at() verifies that the
> mounted file system has the expected UUID. Therefore we don't have to
> error out if we try to mount an already mounted removable datastore.
> 
> Signed-off-by: Aaron Lauterer 
> ---
>  src/api2/admin/datastore.rs | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
>

applied, thanks!

But IMO the info we get from is_datastore_mounted_at could be a bit more
telling, or well, I'd like to be able to bail here if the backing-device
is mounted somewhere else or if the wrong device is mounted on our path.



From g.goller at proxmox.com  Wed Nov 27 12:50:58 2024
From: g.goller at proxmox.com (Gabriel Goller)
Date: Wed, 27 Nov 2024 12:50:58 +0100
Subject: [pbs-devel] [PATCH] textareafield: add emptyText message to
 show markdown is possible
In-Reply-To: <123dbdc2-192c-4565-af00-927a86c1ffd9@proxmox.com>
References: <20241127102737.81854-1-g.goller@proxmox.com>
 <123dbdc2-192c-4565-af00-927a86c1ffd9@proxmox.com>
Message-ID: 

On 27.11.2024 11:29, Lukas Wagner wrote:
>On  2024-11-27 11:27, Gabriel Goller wrote:
>> Just like in our `Notes` fields show a emptyText message that explains
>> that markdown can be used.
>>
>> Reported-by: Lukas Wagner 
>> Signed-off-by: Gabriel Goller 
>> ---
>>  src/form/TextAreaField.js | 1 +
>>  1 file changed, 1 insertion(+)
>>
>> diff --git a/src/form/TextAreaField.js b/src/form/TextAreaField.js
>> index 267b40c87d74..ed6015a9ec51 100644
>> --- a/src/form/TextAreaField.js
>> +++ b/src/form/TextAreaField.js
>> @@ -10,6 +10,7 @@ Ext.define('Proxmox.form.field.Base64TextArea', {
>>          width: 600,
>>          height: 400,
>>          scrollable: 'y',
>> +        emptyText: 'You can use Markdown for richt text formatting.',
>
>This should use gettext :)
>Also there is a typo in 'rich' :D

Ack, thanks for the review!
v2 is out!



From g.goller at proxmox.com  Wed Nov 27 12:52:36 2024
From: g.goller at proxmox.com (Gabriel Goller)
Date: Wed, 27 Nov 2024 12:52:36 +0100
Subject: [pbs-devel] [PATCH v2 widget-toolkit] textareafield: add emptyText
 message to show markdown is possible
Message-ID: <20241127115236.237616-1-g.goller@proxmox.com>

Just like in our `Notes` fields show a emptyText message that explains
that markdown can be used.

Reported-by: Lukas Wagner 
Signed-off-by: Gabriel Goller 
---

v2, thanks @Lukas:
 - use gettext
 - fix typo

 src/form/TextAreaField.js | 1 +
 1 file changed, 1 insertion(+)

diff --git a/src/form/TextAreaField.js b/src/form/TextAreaField.js
index 267b40c87d74..c9a6ff5e5ac4 100644
--- a/src/form/TextAreaField.js
+++ b/src/form/TextAreaField.js
@@ -10,6 +10,7 @@ Ext.define('Proxmox.form.field.Base64TextArea', {
         width: 600,
         height: 400,
         scrollable: 'y',
+        emptyText: gettext('You can use Markdown for rich text formatting.'),
     },
 
     setValue: function(value) {
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 12:57:11 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 12:57:11 +0100
Subject: [pbs-devel] applied: [PATCH v2 proxmox-backup] docs: installation:
 several small fixes/improvements
In-Reply-To: <20241127104958.3-1-a.zeidler@proxmox.com>
References: <20241127104958.3-1-a.zeidler@proxmox.com>
Message-ID: <4fadf03d-8f6a-40de-b230-d3fdc1e108c4@proxmox.com>

Am 27.11.24 um 11:49 schrieb Alexander Zeidler:
> * consistently use "medium" (singular), as only one is needed for
>   installation (installation-media.rst not renamed)
> * add short introduction to recently added chapter "Installation Media"
> * update minimum required flash drive storage space to 2 GB
> * remove CD-ROM (too little storage space) but keep DVD
> * mention explicitly that data get overwritten on installation media /
>   installation target disks
> * mention that using `dd` will require root privileges
> * add accidentally cut off text when copying from PVE docs
> * add reference labels to currently needed section titles
> * reword some paragraphs for completeness and readability
> * mention all installation methods in the intro of "Server Installation"
> * add the boot order as possible boot issue
> * remove recently added redundant product website hyperlinks (as earlier
>   with commit 34407477e2)
> * fix broken heading level of APT-based PBC repo
> 
> * slightly reorder sub-chapters of "Installation":
> 
> After adding the chapter "Installation Media" (d363818641), the chapter
> order under "Installation" is:
> 
> 1. System Requirements
> 2. Installation Media
> 3. Debian Package Repositories
> 4. Server Installation
> 5. Client Installation
> 
> But repos are more likely to be configured after installation, and for
> other installation methods chapter links exist anyway. So to keep the
> chapter order more logical, "Debian Package Repositories" is now moved
> after "Client Installation".
> 
> Signed-off-by: Alexander Zeidler 
> ---
> v2:
>  * implement all suggestions from Shannon
> 
> 
>  docs/installation-media.rst   | 57 +++++++++++++++++++++--------------
>  docs/installation.rst         | 41 +++++++++++++++++--------
>  docs/package-repositories.rst |  2 +-
>  docs/using-the-installer.rst  | 25 +++++++--------
>  4 files changed, 76 insertions(+), 49 deletions(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 13:25:09 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 13:25:09 +0100
Subject: [pbs-devel] applied-series: [PATCH proxmox-backup 1/3] api: sync:
 restrict edit permissions for push sync jobs
In-Reply-To: <20241127114407.269907-1-c.ebner@proxmox.com>
References: <20241127114407.269907-1-c.ebner@proxmox.com>
Message-ID: <8f71eff0-05f1-4e1c-8527-11b9368976b2@proxmox.com>

Am 27.11.24 um 12:44 schrieb Christian Ebner:
> Users require `Datastore.Audit` on the source datastore to read sync
> jobs. Further restrict also the permissions to modify sync jobs in
> push direction to include the `Datastore.Audit` permission on the
> source, as otherwise a user is able to create or edit sync jobs in
> push direction, but not able to see them.
> 
> Reported-by: Friedrich Weber 
> Suggested-by: Fabian Gr?nbichler 
> Signed-off-by: Christian Ebner 
> ---
>  src/api2/config/sync.rs | 5 +++++
>  1 file changed, 5 insertions(+)
> 
>

applied-series, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 13:33:53 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 13:33:53 +0100
Subject: [pbs-devel] applied:  [PATCH] log: ignore to_stdout parameter
In-Reply-To: <20241127114650.229625-1-g.goller@proxmox.com>
References: <20241127114650.229625-1-g.goller@proxmox.com>
Message-ID: 

Am 27.11.24 um 12:46 schrieb Gabriel Goller:
> This parameter causes the FileLogger to duplicate the log output to
> stdout. This causes duplicate output on proxmox-backup-manager because
> this is now handled by tracing. This should be removed completely in the
> future.
> In the worst case this will only result in missing log lines on stdout
> (which is visible only on proxmox-backup-manager/client invocations
> anyway).
> 
> Signed-off-by: Gabriel Goller 
> ---
>  proxmox-log/src/file_logger.rs | 25 ++++++++++++++-----------
>  1 file changed, 14 insertions(+), 11 deletions(-)
> 
>

applied, with a doc-comment added to the respective struct member so that it's
less likely that this will get new users, thanks!



From c.ebner at proxmox.com  Wed Nov 27 14:04:17 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Wed, 27 Nov 2024 14:04:17 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] update Italian translation
Message-ID: <20241127130417.320887-1-c.ebner@proxmox.com>

Signed-off-by: Christian Ebner 
---
 it.po | 58 +++++++++++++++++++---------------------------------------
 1 file changed, 19 insertions(+), 39 deletions(-)

diff --git a/it.po b/it.po
index f6105c5..376d96f 100644
--- a/it.po
+++ b/it.po
@@ -2079,14 +2079,12 @@ msgid "Connection failure. Network error or Proxmox VE services not running?"
 msgstr "Errore di connessione. Errore rete o servizi Proxmox VE non attivi?"
 
 #: proxmox-widget-toolkit/src/window/ConsentModal.js:15
-#, fuzzy
 msgid "Consent"
-msgstr "Console"
+msgstr "Consenso"
 
 #: proxmox-backup/www/config/NodeOptionView.js:60
-#, fuzzy
 msgid "Consent Text"
-msgstr "Tipo Contenuto"
+msgstr "Testo di consenso"
 
 #: proxmox-widget-toolkit/src/Utils.js:708 pmg-gui/js/ServerStatus.js:59
 #: pve-manager/www/manager6/Utils.js:2045
@@ -2555,9 +2553,8 @@ msgid "Datastore is not available"
 msgstr "Datastore non disponibile"
 
 #: proxmox-backup/www/datastore/Summary.js:74
-#, fuzzy
 msgid "Datastore is not mounted"
-msgstr "Datastore non disponibile"
+msgstr "Datastore non ? montato"
 
 #: proxmox-backup/www/datastore/DataStoreList.js:196
 msgid "Datastores"
@@ -2924,7 +2921,6 @@ msgid "Device node"
 msgstr "Nodo Dispositivo"
 
 #: proxmox-backup/www/window/DataStoreEdit.js:75
-#, fuzzy
 msgid "Device path"
 msgstr "Path Dispositivo"
 
@@ -4321,9 +4317,8 @@ msgstr "Formattazione in corso"
 #: pve-manager/www/manager6/grid/FirewallOptions.js:155
 #: pve-manager/www/manager6/grid/FirewallOptions.js:160
 #: pve-manager/www/manager6/grid/FirewallOptions.js:165
-#, fuzzy
 msgid "Forward Policy"
-msgstr "Policy"
+msgstr "Policy Forward"
 
 #: pve-manager/www/manager6/grid/FirewallRules.js:243
 msgid ""
@@ -5261,9 +5256,8 @@ msgid "Is this token already registered?"
 msgstr ""
 
 #: pve-manager/www/manager6/sdn/VnetEdit.js:81
-#, fuzzy
 msgid "Isolate Ports"
-msgstr "Porta del Relay"
+msgstr "Isolare Porte"
 
 #: proxmox-widget-toolkit/src/panel/Certificates.js:18
 #: proxmox-widget-toolkit/src/window/Certificates.js:26
@@ -5769,9 +5763,8 @@ msgstr "Store locale"
 
 #: proxmox-backup/www/config/SyncView.js:36
 #: proxmox-backup/www/window/SyncJobEdit.js:39
-#, fuzzy
 msgid "Local User"
-msgstr "Proprietario locale"
+msgstr "User Locale"
 
 #: proxmox-backup/www/Utils.js:713
 msgid "Locating"
@@ -6434,9 +6427,8 @@ msgid "Mount"
 msgstr "Monta"
 
 #: proxmox-backup/www/Utils.js:420
-#, fuzzy
 msgid "Mount Device"
-msgstr "Dal Dispositivo"
+msgstr "Monta Dispositivo"
 
 #: pve-manager/www/manager6/lxc/MPEdit.js:370
 #: pve-manager/www/manager6/lxc/MPEdit.js:372
@@ -7002,9 +6994,8 @@ msgstr "Nessuna VM selezionata"
 
 #: pve-manager/www/manager6/sdn/FirewallVnetView.js:10
 #: pve-manager/www/manager6/sdn/VnetView.js:6
-#, fuzzy
 msgid "No VNet configured."
-msgstr "Non ancora configurata"
+msgstr "Nessun VNet configurato."
 
 #: pve-manager/www/manager6/ceph/Status.js:130
 msgid "No Warnings/Errors"
@@ -7050,9 +7041,8 @@ msgid "No default available"
 msgstr "Nessun predefinito disponibile"
 
 #: pve-manager/www/manager6/grid/FirewallRules.js:617
-#, fuzzy
 msgid "No firewall rule configured here."
-msgstr "Nessun target configurato"
+msgstr "Nessuna regola di firewall configurata qui."
 
 #: pmg-gui/js/QuarantineList.js:265
 msgid "No match found"
@@ -7131,9 +7121,8 @@ msgid "No valid subscription"
 msgstr "Nessun abbonamento valido"
 
 #: pve-manager/www/manager6/sdn/ZoneView.js:6
-#, fuzzy
 msgid "No zone configured."
-msgstr "Nessun {0} configurato."
+msgstr "Nessun zone configurato."
 
 #: pve-manager/www/manager6/dc/RealmSyncJob.js:8
 msgid "No {0} configured"
@@ -7524,9 +7513,8 @@ msgid "On"
 msgstr ""
 
 #: proxmox-backup/www/window/DataStoreEdit.js:116
-#, fuzzy
 msgid "On device path"
-msgstr "Path Dispositivo"
+msgstr "Path sul dispositivo"
 
 #: pve-manager/www/manager6/dc/BackupJobDetail.js:215
 #: pve-manager/www/manager6/form/NotificationPolicySelector.js:6
@@ -8599,9 +8587,8 @@ msgid "Re-Verify After"
 msgstr "Re-Verifica dopo"
 
 #: proxmox-backup/www/window/SyncJobEdit.js:389
-#, fuzzy
 msgid "Re-sync corrupt snapshots"
-msgstr "Ripristina snapshot"
+msgstr "Re-sync snapshots danneggiati"
 
 #: proxmox-backup/www/window/SyncJobEdit.js:394
 msgid "Re-sync snapshots, whose verification failed."
@@ -8923,14 +8910,12 @@ msgstr ""
 
 #: proxmox-backup/www/window/CreateDirectory.js:59
 #: proxmox-backup/www/window/DataStoreEdit.js:104
-#, fuzzy
 msgid "Removable datastore"
-msgstr "Rimuovi Datastore"
+msgstr "Datastore Rimovibile"
 
 #: proxmox-backup/www/NavigationTree.js:279
-#, fuzzy
 msgid "Removable datastore not mounted"
-msgstr "Rimuovi Datastore"
+msgstr "Datastore rimovibile non montato"
 
 #: pve-manager/www/manager6/grid/Replication.js:339
 #: pve-manager/www/manager6/grid/Replication.js:340
@@ -10317,9 +10302,8 @@ msgid "Source Slot"
 msgstr "Slot sorgente"
 
 #: pve-manager/www/manager6/window/GuestImport.js:570
-#, fuzzy
 msgid "Source Storage"
-msgstr "Cambia Storage"
+msgstr "Storage Sorgente"
 
 #: pve-manager/www/manager6/window/Migrate.js:337
 msgid "Source node"
@@ -11163,9 +11147,8 @@ msgid "Target Ratio"
 msgstr ""
 
 #: proxmox-backup/www/window/SyncJobEdit.js:36
-#, fuzzy
 msgid "Target Remote"
-msgstr "Nome Destinazione"
+msgstr "Remote di destinazione"
 
 #: proxmox-backup/www/config/MetricServerView.js:73
 msgid "Target Server"
@@ -11897,14 +11880,12 @@ msgid "Unmount"
 msgstr "Smonta"
 
 #: proxmox-backup/www/Utils.js:431
-#, fuzzy
 msgid "Unmount Device"
-msgstr "Dispositivo USB"
+msgstr "Smonta Dispositivo"
 
 #: proxmox-backup/www/Utils.js:805
-#, fuzzy
 msgid "Unmounting"
-msgstr "Smonta"
+msgstr "Smontando"
 
 #: pve-manager/www/manager6/form/USBSelector.js:173
 msgid "Unplugged"
@@ -12442,9 +12423,8 @@ msgid "VNet"
 msgstr ""
 
 #: pve-manager/www/manager6/dc/Config.js:228
-#, fuzzy
 msgid "VNet Firewall"
-msgstr "Firewall"
+msgstr "Firewall VNet"
 
 #: pve-manager/www/manager6/sdn/zones/EvpnEdit.js:38
 msgid "VNet MAC Address"
-- 
2.39.5




From f.gruenbichler at proxmox.com  Wed Nov 27 14:05:21 2024
From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=)
Date: Wed, 27 Nov 2024 14:05:21 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] api: create_datastore: fix
 nesting checks
Message-ID: <20241127130521.1019765-1-f.gruenbichler@proxmox.com>

there two kinds of overlap we need to check here:
- two removable datastores backed by the same device must not have nested
  relative paths on the device
- any two datastores must not have nested absolute paths

Signed-off-by: Fabian Gr?nbichler 
---
 src/api2/config/datastore.rs | 25 +++++++++++++++++--------
 1 file changed, 17 insertions(+), 8 deletions(-)

diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index 24d3f6303..c72eb5a72 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -82,21 +82,30 @@ pub(crate) fn do_create_datastore(
         bail!("cannot create datastore in root path");
     }
 
-    let new_store_path = Path::new(&datastore.path);
+    let new_store_path = PathBuf::from(&datastore.absolute_path());
+    let removable = datastore.backing_device.is_some();
     for store in config.convert_to_typed_array::("datastore")? {
-        if store.backing_device != datastore.backing_device {
-            continue;
+        // Relative paths must not be nested on the backing device of removable datastores
+        if removable && store.backing_device == datastore.backing_device {
+            let new_path = Path::new(&datastore.path);
+            let path = Path::new(&store.path);
+            if new_path.starts_with(path) || path.starts_with(new_path) {
+                param_bail!(
+                    "path",
+                    "paths on backing device must not be nested - {path:?} already used by '{store}'!",
+                    store = store.name
+                );
+            }
         }
 
-        // Since we check for that on creation, we assume all removable datastore
-        // paths are relative, so don't have a leading `/`.
-        let store_path = Path::new(&store.path);
+        // No two datastores should have a nested absolute path
+        let store_path = PathBuf::from(store.absolute_path());
         if store_path.starts_with(&new_store_path) || new_store_path.starts_with(&store_path) {
             param_bail!(
                 "path",
-                "nested datastores not allowed: '{}' already in '{}'",
+                "nested datastores not allowed: '{}' already in {:?}",
                 store.name,
-                store.path
+                store_path,
             );
         }
     }
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 14:14:48 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 14:14:48 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] update Italian
 translation
In-Reply-To: <20241127130417.320887-1-c.ebner@proxmox.com>
References: <20241127130417.320887-1-c.ebner@proxmox.com>
Message-ID: <4d8893d5-808a-497f-bfb3-93735149ea10@proxmox.com>

Am 27.11.24 um 14:04 schrieb Christian Ebner:
> Signed-off-by: Christian Ebner 
> ---
>  it.po | 58 +++++++++++++++++++---------------------------------------
>  1 file changed, 19 insertions(+), 39 deletions(-)
> 
>

applied, thanks!

I also committed a `make update` now with some new stuff, now that the PBS UI
should not move much anymore for the next days.



From t.lamprecht at proxmox.com  Wed Nov 27 14:23:59 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 14:23:59 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] api: create_datastore:
 fix nesting checks
In-Reply-To: <20241127130521.1019765-1-f.gruenbichler@proxmox.com>
References: <20241127130521.1019765-1-f.gruenbichler@proxmox.com>
Message-ID: <22373833-a743-47cc-bf24-c82847ab3e87@proxmox.com>

Am 27.11.24 um 14:05 schrieb Fabian Gr?nbichler:
> there two kinds of overlap we need to check here:
> - two removable datastores backed by the same device must not have nested
>   relative paths on the device
> - any two datastores must not have nested absolute paths
> 
> Signed-off-by: Fabian Gr?nbichler 
> ---
>  src/api2/config/datastore.rs | 25 +++++++++++++++++--------
>  1 file changed, 17 insertions(+), 8 deletions(-)
> 
>

applied, thanks!

FWIW, switching the "store" loop variable to something like "existing_store"
and "datastore" to "new_store" would help on reading here.



From m.sandoval at proxmox.com  Wed Nov 27 14:54:29 2024
From: m.sandoval at proxmox.com (Maximiliano Sandoval)
Date: Wed, 27 Nov 2024 14:54:29 +0100
Subject: [pbs-devel] [PATCH backup 1/2] ui: tree: make Tape Backup string
 translatable
Message-ID: <20241127135430.357036-1-m.sandoval@proxmox.com>

Signed-off-by: Maximiliano Sandoval 
---
 www/NavigationTree.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/NavigationTree.js b/www/NavigationTree.js
index 024a4799..f10b0cd6 100644
--- a/www/NavigationTree.js
+++ b/www/NavigationTree.js
@@ -104,7 +104,7 @@ Ext.define('PBS.store.NavigationStore', {
 		],
 	    },
 	    {
-		text: "Tape Backup",
+		text: gettext('Tape Backup'),
 		iconCls: 'pbs-icon-tape',
 		id: 'tape_management',
 		path: 'pbsTapeManagement',
-- 
2.39.5




From m.sandoval at proxmox.com  Wed Nov 27 14:54:30 2024
From: m.sandoval at proxmox.com (Maximiliano Sandoval)
Date: Wed, 27 Nov 2024 14:54:30 +0100
Subject: [pbs-devel] [PATCH backup 2/2] dashboard: make Subscription
 translatable
In-Reply-To: <20241127135430.357036-1-m.sandoval@proxmox.com>
References: <20241127135430.357036-1-m.sandoval@proxmox.com>
Message-ID: <20241127135430.357036-2-m.sandoval@proxmox.com>

Signed-off-by: Maximiliano Sandoval 
---
 www/Dashboard.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/Dashboard.js b/www/Dashboard.js
index 57fdcdda..52b4a6ec 100644
--- a/www/Dashboard.js
+++ b/www/Dashboard.js
@@ -269,7 +269,7 @@ Ext.define('PBS.Dashboard', {
 	},
 	{
 	    iconCls: 'fa fa-ticket',
-	    title: 'Subscription',
+	    title: gettext('Subscription'),
 	    height: 250,
 	    reference: 'subscription',
 	    xtype: 'pbsSubscriptionInfo',
-- 
2.39.5




From f.gruenbichler at proxmox.com  Wed Nov 27 15:11:28 2024
From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=)
Date: Wed, 27 Nov 2024 15:11:28 +0100
Subject: [pbs-devel] [RFC proxmox-backup 2/2] GC: add check for nested
 datastore
In-Reply-To: <20241127141128.1123925-1-f.gruenbichler@proxmox.com>
References: <20241127141128.1123925-1-f.gruenbichler@proxmox.com>
Message-ID: <20241127141128.1123925-2-f.gruenbichler@proxmox.com>

these are particularly problematic since GC will walk the whole datastore tree
on the file system, and will thus pick up indices (but not chunks!) from nested
directories that are ignored in other code paths that use our regular
iterators..

Signed-off-by: Fabian Gr?nbichler 
---

Notes:
    a similar check might also be sensible for mounting and should now be fairly
    easy to implement there as well..

 pbs-datastore/src/datastore.rs | 11 +++++++++++
 1 file changed, 11 insertions(+)

diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 33bc1f72e..4c062e244 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -1150,6 +1150,17 @@ impl DataStore {
             // writer" information and thus no safe atime cutoff
             let _exclusive_lock = self.inner.chunk_store.try_exclusive_lock()?;
 
+            let (config, _digest) = pbs_config::datastore::config()?;
+            let gc_store_config: DataStoreConfig = config.lookup("datastore", &self.name())?;
+            let all_stores = config.convert_to_typed_array("datastore")?;
+            if let Err(err) = gc_store_config.ensure_not_nested(&all_stores) {
+                info!(
+                    "Current datastore path: {path}",
+                    path = gc_store_config.absolute_path()
+                );
+                bail!("Aborting GC for safety reasons: {err}");
+            }
+
             let phase1_start_time = proxmox_time::epoch_i64();
             let oldest_writer = self
                 .inner
-- 
2.39.5




From f.gruenbichler at proxmox.com  Wed Nov 27 15:11:27 2024
From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=)
Date: Wed, 27 Nov 2024 15:11:27 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] datastore: extract nesting
 check into helper
Message-ID: <20241127141128.1123925-1-f.gruenbichler@proxmox.com>

and improve the variable namign while we are at it. this allows the check to be
re-used in other code paths, like when starting a garbage collection.

Signed-off-by: Fabian Gr?nbichler 
---

Notes:
    no semantic changes intended, for *after* the release

 pbs-api-types/src/datastore.rs | 39 ++++++++++++++++++++++++++++++++++
 src/api2/config/datastore.rs   | 29 +++----------------------
 2 files changed, 42 insertions(+), 26 deletions(-)

diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
index d3876838b..ddd8d3c6b 100644
--- a/pbs-api-types/src/datastore.rs
+++ b/pbs-api-types/src/datastore.rs
@@ -441,6 +441,45 @@ impl DataStoreConfig {
 
         Ok(())
     }
+
+    pub fn ensure_not_nested(&self, stores: &[DataStoreConfig]) -> Result<(), Error> {
+        let our_absolute_path = PathBuf::from(self.absolute_path());
+        let removable = self.backing_device.is_some();
+        for other_store in stores {
+            if self == other_store {
+                continue;
+            };
+
+            // Relative paths must not be nested on the backing device of removable datastores
+            if removable && other_store.backing_device == self.backing_device {
+                let our_relative_path = Path::new(&self.path);
+                let other_relative_path = Path::new(&other_store.path);
+                if our_relative_path.starts_with(other_relative_path)
+                    || other_relative_path.starts_with(our_relative_path)
+                {
+                    bail!(
+                        "paths on backing device must not be nested - {path:?} already used by '{store}'!",
+                        path = other_relative_path,
+                        store = other_store.name,
+                    );
+                }
+            }
+
+            // No two datastores should have a nested absolute path
+            let other_absolute_path = PathBuf::from(other_store.absolute_path());
+            if other_absolute_path.starts_with(&our_absolute_path)
+                || our_absolute_path.starts_with(&other_absolute_path)
+            {
+                bail!(
+                    "nested datastores not allowed: '{}' already in {:?}",
+                    other_store.name,
+                    other_absolute_path,
+                );
+            }
+        }
+
+        Ok(())
+    }
 }
 
 #[api(
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index 7c087d9fc..d8bae2078 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -82,32 +82,9 @@ pub(crate) fn do_create_datastore(
         bail!("cannot create datastore in root path");
     }
 
-    let new_store_path = PathBuf::from(&datastore.absolute_path());
-    let removable = datastore.backing_device.is_some();
-    for store in config.convert_to_typed_array::("datastore")? {
-        // Relative paths must not be nested on the backing device of removable datastores
-        if removable && store.backing_device == datastore.backing_device {
-            let new_path = Path::new(&datastore.path);
-            let path = Path::new(&store.path);
-            if new_path.starts_with(path) || path.starts_with(new_path) {
-                param_bail!(
-                    "path",
-                    "paths on backing device must not be nested - {path:?} already used by '{store}'!",
-                    store = store.name
-                );
-            }
-        }
-
-        // No two datastores should have a nested absolute path
-        let store_path = PathBuf::from(store.absolute_path());
-        if store_path.starts_with(&new_store_path) || new_store_path.starts_with(&store_path) {
-            param_bail!(
-                "path",
-                "nested datastores not allowed: '{}' already in {:?}",
-                store.name,
-                store_path,
-            );
-        }
+    let existing_stores = config.convert_to_typed_array("datastore")?;
+    if let Err(err) = datastore.ensure_not_nested(&existing_stores) {
+        param_bail!("path", err);
     }
 
     let need_unmount = datastore.backing_device.is_some();
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 15:27:45 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 15:27:45 +0100
Subject: [pbs-devel] applied: [PATCH backup 1/2] ui: tree: make Tape Backup
 string translatable
In-Reply-To: <20241127135430.357036-1-m.sandoval@proxmox.com>
References: <20241127135430.357036-1-m.sandoval@proxmox.com>
Message-ID: 

Am 27.11.24 um 14:54 schrieb Maximiliano Sandoval:
> Signed-off-by: Maximiliano Sandoval 
> ---
>  www/NavigationTree.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied both patches, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 15:27:50 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 15:27:50 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup 1/2] datastore: extract
 nesting check into helper
In-Reply-To: <20241127141128.1123925-1-f.gruenbichler@proxmox.com>
References: <20241127141128.1123925-1-f.gruenbichler@proxmox.com>
Message-ID: 

Am 27.11.24 um 15:11 schrieb Fabian Gr?nbichler:
> and improve the variable namign while we are at it. this allows the check to be
> re-used in other code paths, like when starting a garbage collection.
> 
> Signed-off-by: Fabian Gr?nbichler 
> ---
> 
> Notes:
>     no semantic changes intended, for *after* the release
> 
>  pbs-api-types/src/datastore.rs | 39 ++++++++++++++++++++++++++++++++++
>  src/api2/config/datastore.rs   | 29 +++----------------------
>  2 files changed, 42 insertions(+), 26 deletions(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 15:31:31 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 15:31:31 +0100
Subject: [pbs-devel] applied: [RFC proxmox-backup 2/2] GC: add check for
 nested datastore
In-Reply-To: <20241127141128.1123925-2-f.gruenbichler@proxmox.com>
References: <20241127141128.1123925-1-f.gruenbichler@proxmox.com>
 <20241127141128.1123925-2-f.gruenbichler@proxmox.com>
Message-ID: 

Am 27.11.24 um 15:11 schrieb Fabian Gr?nbichler:
> these are particularly problematic since GC will walk the whole datastore tree
> on the file system, and will thus pick up indices (but not chunks!) from nested
> directories that are ignored in other code paths that use our regular
> iterators..
> 
> Signed-off-by: Fabian Gr?nbichler 
> ---
> 
> Notes:
>     a similar check might also be sensible for mounting and should now be fairly
>     easy to implement there as well..
> 
>  pbs-datastore/src/datastore.rs | 11 +++++++++++
>  1 file changed, 11 insertions(+)
> 
>

applied, thanks!

It's a sensible improvement and cost is low, especially compared to what GC does
in general. That said, it's naturally not a silver bullet, through a symlink or
bind mount one could construct a datastore that would not be detected, but
probably obvious to you already so just mentioning for the sake of completeness.



From t.lamprecht at proxmox.com  Wed Nov 27 15:32:44 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 15:32:44 +0100
Subject: [pbs-devel] applied: [PATCH v2 widget-toolkit] textareafield: add
 emptyText message to show markdown is possible
In-Reply-To: <20241127115236.237616-1-g.goller@proxmox.com>
References: <20241127115236.237616-1-g.goller@proxmox.com>
Message-ID: 

Am 27.11.24 um 12:52 schrieb Gabriel Goller:
> Just like in our `Notes` fields show a emptyText message that explains
> that markdown can be used.
> 
> Reported-by: Lukas Wagner 
> Signed-off-by: Gabriel Goller 
> ---
> 
> v2, thanks @Lukas:
>  - use gettext
>  - fix typo
> 
>  src/form/TextAreaField.js | 1 +
>  1 file changed, 1 insertion(+)
> 
>

applied, thanks!



From f.ebner at proxmox.com  Wed Nov 27 16:06:57 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Wed, 27 Nov 2024 16:06:57 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api: disks: directory:
 factor out helper for mount unit path
In-Reply-To: <20241127150658.107034-1-f.ebner@proxmox.com>
References: <20241127150658.107034-1-f.ebner@proxmox.com>
Message-ID: <20241127150658.107034-2-f.ebner@proxmox.com>

In preparation to check for a pre-existing mount unit.

Signed-off-by: Fiona Ebner 
---
 src/api2/node/disks/directory.rs | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 6a76dd5a..7bdf0111 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -324,16 +324,23 @@ pub const ROUTER: Router = Router::new()
     .post(&API_METHOD_CREATE_DATASTORE_DISK)
     .match_all("name", &ITEM_ROUTER);
 
+fn datastore_mount_unit_path_info(mount_point: &str) -> (String, String) {
+    let mut mount_unit_name = proxmox_systemd::escape_unit(mount_point, true);
+    mount_unit_name.push_str(".mount");
+
+    (
+        format!("/etc/systemd/system/{}", mount_unit_name),
+        mount_unit_name,
+    )
+}
+
 fn create_datastore_mount_unit(
     datastore_name: &str,
     mount_point: &str,
     fs_type: FileSystemType,
     what: &str,
 ) -> Result {
-    let mut mount_unit_name = proxmox_systemd::escape_unit(mount_point, true);
-    mount_unit_name.push_str(".mount");
-
-    let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
+    let (mount_unit_path, mount_unit_name) = datastore_mount_unit_path_info(mount_point);
 
     let unit = SystemdUnitSection {
         Description: format!(
-- 
2.39.5




From f.ebner at proxmox.com  Wed Nov 27 16:06:56 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Wed, 27 Nov 2024 16:06:56 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 0/2] api: disks: directory: fail
 if mount unit already exists
Message-ID: <20241127150658.107034-1-f.ebner@proxmox.com>

Protect against overriding a pre-existing mount unit during creation
of a directory (datastore). The unit might belong to an existing
datastore.

Fiona Ebner (2):
  api: disks: directory: factor out helper for mount unit path
  api: disks: directory: fail if mount unit already exists

 src/api2/node/disks/directory.rs | 20 ++++++++++++++++----
 1 file changed, 16 insertions(+), 4 deletions(-)

-- 
2.39.5




From f.ebner at proxmox.com  Wed Nov 27 16:06:58 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Wed, 27 Nov 2024 16:06:58 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/2] api: disks: directory: fail
 if mount unit already exists
In-Reply-To: <20241127150658.107034-1-f.ebner@proxmox.com>
References: <20241127150658.107034-1-f.ebner@proxmox.com>
Message-ID: <20241127150658.107034-3-f.ebner@proxmox.com>

Without this check, if a mount unit is present, but the file system is
not mounted, it will just get overwritten. The unit might belong to an
existing datastore.

There already is a check against a duplicate datastore, but only after
the mount unit is already overwritten and having the add-datastore
flag present is not a precondition to trigger the issue.

The check is done even if the newly created directory datastore is
removable. While in that case, the mount unit is not overwritten, the
conflict for the mount point is still present, so it is nice to fail
early.

Signed-off-by: Fiona Ebner 
---
 src/api2/node/disks/directory.rs | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 7bdf0111..d4690d45 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -204,6 +204,11 @@ pub fn create_datastore_disk(
         }
     }
 
+    let (mount_unit_path, _) = datastore_mount_unit_path_info(&mount_point);
+    if std::path::PathBuf::from(&mount_unit_path).exists() {
+        bail!("systemd mount unit {mount_unit_path:?} already exists");
+    }
+
     let upid_str = WorkerTask::new_thread(
         "dircreate",
         Some(name.clone()),
-- 
2.39.5




From h.laimer at proxmox.com  Wed Nov 27 16:17:30 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Wed, 27 Nov 2024 16:17:30 +0100
Subject: [pbs-devel] [PATCH] ui: filter partitions without proper UUID in
 partition selector
Message-ID: <20241127151730.139984-1-h.laimer@proxmox.com>

Signed-off-by: Hannes Laimer 
---
 www/form/PartitionSelector.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/form/PartitionSelector.js b/www/form/PartitionSelector.js
index 162dbe418..7ef542ffb 100644
--- a/www/form/PartitionSelector.js
+++ b/www/form/PartitionSelector.js
@@ -8,7 +8,7 @@ Ext.define('pbs-partition-list', {
 	    transform: (rawData) => rawData.data
 		.flatMap(disk => (disk.partitions
 			.map(part => ({ ...part, model: disk.model })) ?? [])
-			.filter(partition => partition.used === 'filesystem')),
+			.filter(partition => partition.used === 'filesystem' && !!partition.uuid)),
 	},jj
     },
     idProperty: 'devpath',
-- 
2.39.5




From s.sterz at proxmox.com  Wed Nov 27 16:21:39 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Wed, 27 Nov 2024 16:21:39 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api: disks: directory:
 factor out helper for mount unit path
In-Reply-To: <20241127150658.107034-2-f.ebner@proxmox.com>
References: <20241127150658.107034-1-f.ebner@proxmox.com>
 <20241127150658.107034-2-f.ebner@proxmox.com>
Message-ID: 

On Wed Nov 27, 2024 at 4:06 PM CET, Fiona Ebner wrote:
> In preparation to check for a pre-existing mount unit.
>
> Signed-off-by: Fiona Ebner 
> ---
>  src/api2/node/disks/directory.rs | 15 +++++++++++----
>  1 file changed, 11 insertions(+), 4 deletions(-)
>
> diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
> index 6a76dd5a..7bdf0111 100644
> --- a/src/api2/node/disks/directory.rs
> +++ b/src/api2/node/disks/directory.rs
> @@ -324,16 +324,23 @@ pub const ROUTER: Router = Router::new()
>      .post(&API_METHOD_CREATE_DATASTORE_DISK)
>      .match_all("name", &ITEM_ROUTER);
>
> +fn datastore_mount_unit_path_info(mount_point: &str) -> (String, String) {
> +    let mut mount_unit_name = proxmox_systemd::escape_unit(mount_point, true);
> +    mount_unit_name.push_str(".mount");
> +
> +    (
> +        format!("/etc/systemd/system/{}", mount_unit_name),

nit, this could be:

```rs
format!("/etc/systemd/system/{mount_unit_name}"),
```

> +        mount_unit_name,
> +    )
> +}
> +
>  fn create_datastore_mount_unit(
>      datastore_name: &str,
>      mount_point: &str,
>      fs_type: FileSystemType,
>      what: &str,
>  ) -> Result {
> -    let mut mount_unit_name = proxmox_systemd::escape_unit(mount_point, true);
> -    mount_unit_name.push_str(".mount");
> -
> -    let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
> +    let (mount_unit_path, mount_unit_name) = datastore_mount_unit_path_info(mount_point);
>
>      let unit = SystemdUnitSection {
>          Description: format!(

other than that consider this:

Reviewed-by: Shannon Sterz 





From s.sterz at proxmox.com  Wed Nov 27 16:23:04 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Wed, 27 Nov 2024 16:23:04 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/2] api: disks: directory:
 fail if mount unit already exists
In-Reply-To: <20241127150658.107034-3-f.ebner@proxmox.com>
References: <20241127150658.107034-1-f.ebner@proxmox.com>
 <20241127150658.107034-3-f.ebner@proxmox.com>
Message-ID: 

On Wed Nov 27, 2024 at 4:06 PM CET, Fiona Ebner wrote:
> Without this check, if a mount unit is present, but the file system is
> not mounted, it will just get overwritten. The unit might belong to an
> existing datastore.
>
> There already is a check against a duplicate datastore, but only after
> the mount unit is already overwritten and having the add-datastore
> flag present is not a precondition to trigger the issue.
>
> The check is done even if the newly created directory datastore is
> removable. While in that case, the mount unit is not overwritten, the
> conflict for the mount point is still present, so it is nice to fail
> early.
>
> Signed-off-by: Fiona Ebner 
> ---
>  src/api2/node/disks/directory.rs | 5 +++++
>  1 file changed, 5 insertions(+)
>
> diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
> index 7bdf0111..d4690d45 100644
> --- a/src/api2/node/disks/directory.rs
> +++ b/src/api2/node/disks/directory.rs
> @@ -204,6 +204,11 @@ pub fn create_datastore_disk(
>          }
>      }
>
> +    let (mount_unit_path, _) = datastore_mount_unit_path_info(&mount_point);
> +    if std::path::PathBuf::from(&mount_unit_path).exists() {
> +        bail!("systemd mount unit {mount_unit_path:?} already exists");

nit: relying on the `Debug` trait here to quote the string feels a bit..
hacky to me? maybe just:

```rs
bail!("systemd mount unit \"{mount_unit_path}\" already exists");
```

or use single quotes as we do elsewhere.

> +    }
> +
>      let upid_str = WorkerTask::new_thread(
>          "dircreate",
>          Some(name.clone()),

Otherwise:

Reviewed-by: Shannon Sterz 



From t.lamprecht at proxmox.com  Wed Nov 27 16:26:51 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 16:26:51 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/2] api: disks: directory:
 fail if mount unit already exists
In-Reply-To: 
References: <20241127150658.107034-1-f.ebner@proxmox.com>
 <20241127150658.107034-3-f.ebner@proxmox.com>
 
Message-ID: <8e912dbf-8afe-4018-972f-18d6cc037833@proxmox.com>

Am 27.11.24 um 16:23 schrieb Shannon Sterz:
> On Wed Nov 27, 2024 at 4:06 PM CET, Fiona Ebner wrote:
>> +    if std::path::PathBuf::from(&mount_unit_path).exists() {
>> +        bail!("systemd mount unit {mount_unit_path:?} already exists");
> 
> nit: relying on the `Debug` trait here to quote the string feels a bit..
> hacky to me? maybe just:
> 
> ```rs
> bail!("systemd mount unit \"{mount_unit_path}\" already exists");
> ```
> 
> or use single quotes as we do elsewhere.
> 

FWIW, we (mis)use this semi-frequently already, it might not be the cleanest
thing but works out OK; I'm fine either way, just wanted to mention that if
this really has some issue, or is considered non-idiomatic, then a tree-wide
clean-up would be warranted, otherwise its usage will probably grow due to
copying existing code as base for new dev work.



From h.laimer at proxmox.com  Wed Nov 27 16:34:02 2024
From: h.laimer at proxmox.com (Hannes Laimer)
Date: Wed, 27 Nov 2024 16:34:02 +0100
Subject: [pbs-devel] [PATCH] docs: add note for why FAT is not supported for
 removable datastores
Message-ID: <20241127153402.147997-1-h.laimer@proxmox.com>

Signed-off-by: Hannes Laimer 
---
 docs/storage.rst | 11 ++++++++---
 1 file changed, 8 insertions(+), 3 deletions(-)

diff --git a/docs/storage.rst b/docs/storage.rst
index bf97f57ac..69b4200cf 100644
--- a/docs/storage.rst
+++ b/docs/storage.rst
@@ -172,9 +172,14 @@ mounted and unmounted. Other than that they behave the same way a normal datasto
 would.
 
 They can be created on already correctly formatted partitions, which, as with normal
-datastores, should be either ``ext4`` or ``xfs``.  It is also possible to create them
-on completely unused disks through "Administration" > "Disks / Storage" > "Directory",
-using this method the disk will be partitioned and formatted automatically for the datastore.
+datastores, should be either ``ext4`` or ``xfs``.
+
+.. note:: FAT filesystems don't support the concept of POSIX file ownership. Since
+   datastores rely on it, removable datastores can't be created on those filesystems.
+
+It is also possible to create them on completely unused disks through
+"Administration" > "Disks / Storage" > "Directory", using this method the disk will
+be partitioned and formatted automatically for the datastore.
 
 Devices with only one datastore on them will be mounted automatically. Unmounting has
 to be done through the UI by clicking "Unmount" on the summary page or using the CLI.
-- 
2.39.5




From t.lamprecht at proxmox.com  Wed Nov 27 16:50:57 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 16:50:57 +0100
Subject: [pbs-devel] [PATCH proxmox-backup v13 03/26] pbs-api-types: add
 backing-device to DataStoreConfig
In-Reply-To: <2ca6ce0e-ecdb-4c46-ac1d-eb289dd7076a@proxmox.com>
References: <20241113150102.164820-1-h.laimer@proxmox.com>
 <20241113150102.164820-4-h.laimer@proxmox.com>
 <2ca6ce0e-ecdb-4c46-ac1d-eb289dd7076a@proxmox.com>
Message-ID: <49d6e2e4-25de-4d49-8403-d557c50ba21b@proxmox.com>

Am 17.11.24 um 20:27 schrieb Thomas Lamprecht:
> note: this pen drive is brand new, got just unwrapped and passed through to my dev
> VM, and as such it's still coming with the formatting from factoring.
> 
> Now, I first did not even expect that it shows up in the selector, but it did, so I'm
> wondering if it either should not be available or if it should work to use this disk
> too.

For the record, the main blocker here are:

1. that exfat/vfat does not support arbitrary UID/GID for files, so a datastore creation
   fails when PBS tries to assign the directories and files ownership to the backup user
   and group. While not really nice, that can be worked around by using the 
   `-ouid=34,gid=34` mount options.

2. more importantly, a too low max dentry per directory limit, causing ENOSPACE errors
   on chunkstore creation. While this could be workarounded by creating deeper chunkstore
   levels with fewer directories per level, e.g. a three-letter prefix per level, which is
   coming out a 3 * 4 bits = 12 -> 4096 directories per level, and that for three or four
   levels. But that's rather a bigger change, and having different layouts per filesystem
   type sounds like it could cause quite a few issues, not what one wants for a stable
   backup solution.

3. Maybe more? we did not get around to even create a datastore, so actual usage and
   support for locks and all that might be other blockers.

So it's simply not feasible to support vfat, or other limited FS.



From f.ebner at proxmox.com  Wed Nov 27 17:06:37 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Wed, 27 Nov 2024 17:06:37 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/2] api: disks: directory:
 fail if mount unit already exists
In-Reply-To: <20241127160637.131088-1-f.ebner@proxmox.com>
References: <20241127160637.131088-1-f.ebner@proxmox.com>
Message-ID: <20241127160637.131088-3-f.ebner@proxmox.com>

Without this check, if a mount unit is present, but the file system is
not mounted, it will just get overwritten. The unit might belong to an
existing datastore.

There already is a check against a duplicate datastore, but only after
the mount unit is already overwritten and having the add-datastore
flag present is not a precondition to trigger the issue.

The check is done even if the newly created directory datastore is
removable. While in that case, the mount unit is not overwritten, the
conflict for the mount point is still present, so it is nice to fail
early.

Signed-off-by: Fiona Ebner 
---

Changes in v2:
* don't rely on debug trait for printing error message

 src/api2/node/disks/directory.rs | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 57add02b..62f46343 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -204,6 +204,11 @@ pub fn create_datastore_disk(
         }
     }
 
+    let (mount_unit_path, _) = datastore_mount_unit_path_info(&mount_point);
+    if std::path::PathBuf::from(&mount_unit_path).exists() {
+        bail!("systemd mount unit '{mount_unit_path}' already exists");
+    }
+
     let upid_str = WorkerTask::new_thread(
         "dircreate",
         Some(name.clone()),
-- 
2.39.5




From f.ebner at proxmox.com  Wed Nov 27 17:06:35 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Wed, 27 Nov 2024 17:06:35 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 0/2] api: disks: directory:
 fail if mount unit already exists
Message-ID: <20241127160637.131088-1-f.ebner@proxmox.com>

Changes in v2:
* style changes

Protect against overriding a pre-existing mount unit during creation
of a directory (datastore). The unit might belong to an existing
datastore.

Fiona Ebner (2):
  api: disks: directory: factor out helper for mount unit path
  api: disks: directory: fail if mount unit already exists

 src/api2/node/disks/directory.rs | 20 ++++++++++++++++----
 1 file changed, 16 insertions(+), 4 deletions(-)

-- 
2.39.5




From f.ebner at proxmox.com  Wed Nov 27 17:06:36 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Wed, 27 Nov 2024 17:06:36 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 1/2] api: disks: directory:
 factor out helper for mount unit path
In-Reply-To: <20241127160637.131088-1-f.ebner@proxmox.com>
References: <20241127160637.131088-1-f.ebner@proxmox.com>
Message-ID: <20241127160637.131088-2-f.ebner@proxmox.com>

In preparation to check for a pre-existing mount unit.

Signed-off-by: Fiona Ebner 
Reviewed-by: Shannon Sterz 
---

Changes in v2:
* inline variable name in format!()

 src/api2/node/disks/directory.rs | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 6a76dd5a..57add02b 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -324,16 +324,23 @@ pub const ROUTER: Router = Router::new()
     .post(&API_METHOD_CREATE_DATASTORE_DISK)
     .match_all("name", &ITEM_ROUTER);
 
+fn datastore_mount_unit_path_info(mount_point: &str) -> (String, String) {
+    let mut mount_unit_name = proxmox_systemd::escape_unit(mount_point, true);
+    mount_unit_name.push_str(".mount");
+
+    (
+        format!("/etc/systemd/system/{mount_unit_name}"),
+        mount_unit_name,
+    )
+}
+
 fn create_datastore_mount_unit(
     datastore_name: &str,
     mount_point: &str,
     fs_type: FileSystemType,
     what: &str,
 ) -> Result {
-    let mut mount_unit_name = proxmox_systemd::escape_unit(mount_point, true);
-    mount_unit_name.push_str(".mount");
-
-    let mount_unit_path = format!("/etc/systemd/system/{}", mount_unit_name);
+    let (mount_unit_path, mount_unit_name) = datastore_mount_unit_path_info(mount_point);
 
     let unit = SystemdUnitSection {
         Description: format!(
-- 
2.39.5




From f.ebner at proxmox.com  Wed Nov 27 17:08:12 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Wed, 27 Nov 2024 17:08:12 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/2] api: disks: directory:
 fail if mount unit already exists
In-Reply-To: <8e912dbf-8afe-4018-972f-18d6cc037833@proxmox.com>
References: <20241127150658.107034-1-f.ebner@proxmox.com>
 <20241127150658.107034-3-f.ebner@proxmox.com>
 
 <8e912dbf-8afe-4018-972f-18d6cc037833@proxmox.com>
Message-ID: 

Am 27.11.24 um 16:26 schrieb Thomas Lamprecht:
> Am 27.11.24 um 16:23 schrieb Shannon Sterz:
>> On Wed Nov 27, 2024 at 4:06 PM CET, Fiona Ebner wrote:
>>> +    if std::path::PathBuf::from(&mount_unit_path).exists() {
>>> +        bail!("systemd mount unit {mount_unit_path:?} already exists");
>>
>> nit: relying on the `Debug` trait here to quote the string feels a bit..
>> hacky to me? maybe just:
>>
>> ```rs
>> bail!("systemd mount unit \"{mount_unit_path}\" already exists");
>> ```
>>
>> or use single quotes as we do elsewhere.
>>
> 
> FWIW, we (mis)use this semi-frequently already, it might not be the cleanest
> thing but works out OK; I'm fine either way, just wanted to mention that if
> this really has some issue, or is considered non-idiomatic, then a tree-wide
> clean-up would be warranted, otherwise its usage will probably grow due to
> copying existing code as base for new dev work.
> 

I did copy the pattern from surrounding code 0:)

Sent a v2 addressing Shannon's comments, thanks!
https://lore.proxmox.com/pbs-devel/20241127160637.131088-1-f.ebner at proxmox.com/T/



From t.lamprecht at proxmox.com  Wed Nov 27 19:58:30 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 19:58:30 +0100
Subject: [pbs-devel] applied: [PATCH] docs: add note for why FAT is not
 supported for removable datastores
In-Reply-To: <20241127153402.147997-1-h.laimer@proxmox.com>
References: <20241127153402.147997-1-h.laimer@proxmox.com>
Message-ID: <1b3d120b-98af-401e-a7c6-de423a51e0ed@proxmox.com>

Am 27.11.24 um 16:34 schrieb Hannes Laimer:
> Signed-off-by: Hannes Laimer 
> ---
>  docs/storage.rst | 11 ++++++++---
>  1 file changed, 8 insertions(+), 3 deletions(-)
> 
>

applied, thanks!



From t.lamprecht at proxmox.com  Wed Nov 27 20:03:25 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 20:03:25 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 0/2] api: disks:
 directory: fail if mount unit already exists
In-Reply-To: <20241127160637.131088-1-f.ebner@proxmox.com>
References: <20241127160637.131088-1-f.ebner@proxmox.com>
Message-ID: <494cf422-bf41-4886-97b3-25ab5ba3b246@proxmox.com>

Am 27.11.24 um 17:06 schrieb Fiona Ebner:
> Changes in v2:
> * style changes
> 
> Protect against overriding a pre-existing mount unit during creation
> of a directory (datastore). The unit might belong to an existing
> datastore.
> 
> Fiona Ebner (2):
>   api: disks: directory: factor out helper for mount unit path
>   api: disks: directory: fail if mount unit already exists
> 
>  src/api2/node/disks/directory.rs | 20 ++++++++++++++++----
>  1 file changed, 16 insertions(+), 4 deletions(-)
> 

applied series with a detour by first applying and pushing out v1, which
I should have questioned, and then merging in the actual v2 to show what
happened for real.



From t.lamprecht at proxmox.com  Wed Nov 27 20:19:00 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Wed, 27 Nov 2024 20:19:00 +0100
Subject: [pbs-devel] applied: [PATCH] ui: filter partitions without proper
 UUID in partition selector
In-Reply-To: <20241127151730.139984-1-h.laimer@proxmox.com>
References: <20241127151730.139984-1-h.laimer@proxmox.com>
Message-ID: 

Am 27.11.24 um 16:17 schrieb Hannes Laimer:
> Signed-off-by: Hannes Laimer 
> ---
>  www/form/PartitionSelector.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
> diff --git a/www/form/PartitionSelector.js b/www/form/PartitionSelector.js
> index 162dbe418..7ef542ffb 100644
> --- a/www/form/PartitionSelector.js
> +++ b/www/form/PartitionSelector.js
> @@ -8,7 +8,7 @@ Ext.define('pbs-partition-list', {
>  	    transform: (rawData) => rawData.data
>  		.flatMap(disk => (disk.partitions
>  			.map(part => ({ ...part, model: disk.model })) ?? [])
> -			.filter(partition => partition.used === 'filesystem')),
> +			.filter(partition => partition.used === 'filesystem' && !!partition.uuid)),
>  	},jj

^-- git am spotted this mishap as:

error: patch failed: www/form/PartitionSelector.js:8      
error: www/form/PartitionSelector.js: patch does not apply
error: Did you hand edit your patch?                      

I fixed that up and applied the patch, one needs to take care when editing
a patch directly, ideally it's avoided.

>      },
>      idProperty: 'devpath',




From c.ebner at proxmox.com  Thu Nov 28 11:01:10 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 11:01:10 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: fix remove vanished tooltip
 to be valid for both sync directions
Message-ID: <20241128100110.105693-1-c.ebner@proxmox.com>

The tooltip text shown for the remove vanished flag when hovering
is incorrect for push direction. By using `sync target` over `local`,
make the text agnostic to the actual sync direction.

Signed-off-by: Christian Ebner 
---
 www/window/SyncJobEdit.js | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/www/window/SyncJobEdit.js b/www/window/SyncJobEdit.js
index 1f47e016e..bcd2f2fb2 100644
--- a/www/window/SyncJobEdit.js
+++ b/www/window/SyncJobEdit.js
@@ -343,7 +343,7 @@ Ext.define('PBS.window.SyncJobEdit', {
 			name: 'remove-vanished',
 			autoEl: {
 			    tag: 'div',
-			    'data-qtip': gettext('Remove snapshots from local datastore if they vanished from source datastore?'),
+			    'data-qtip': gettext('Remove snapshots from sync target datastore if they vanished from source datastore?'),
 			},
 			uncheckedValue: false,
 			value: false,
-- 
2.39.5




From t.lamprecht at proxmox.com  Thu Nov 28 11:29:29 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Thu, 28 Nov 2024 11:29:29 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] ui: fix remove vanished
 tooltip to be valid for both sync directions
In-Reply-To: <20241128100110.105693-1-c.ebner@proxmox.com>
References: <20241128100110.105693-1-c.ebner@proxmox.com>
Message-ID: <06ccd8af-08f5-4669-a973-0ae9507a028c@proxmox.com>

Am 28.11.24 um 11:01 schrieb Christian Ebner:
> The tooltip text shown for the remove vanished flag when hovering
> is incorrect for push direction. By using `sync target` over `local`,
> make the text agnostic to the actual sync direction.
> 
> Signed-off-by: Christian Ebner 
> ---
>  www/window/SyncJobEdit.js | 2 +-
>  1 file changed, 1 insertion(+), 1 deletion(-)
> 
>

applied, thanks!



From s.hanreich at proxmox.com  Thu Nov 28 11:40:32 2024
From: s.hanreich at proxmox.com (Stefan Hanreich)
Date: Thu, 28 Nov 2024 11:40:32 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/1] ui: mask unmounted
 datastores in datastore overview
Message-ID: <20241128104032.66011-1-s.hanreich@proxmox.com>

Currently, showing the Datastore summary page leads to errors since
the status returned by the API does not contain any fields that are
checked by the component rendering the datastore summary. We solve
this by checking if the datastore is currently mounted first and mask
the element if it is currently unmounted.

Signed-off-by: Stefan Hanreich 
---
 www/datastore/DataStoreListSummary.js | 7 +++++++
 1 file changed, 7 insertions(+)

diff --git a/www/datastore/DataStoreListSummary.js b/www/datastore/DataStoreListSummary.js
index f7ea83e7..b43f1ab2 100644
--- a/www/datastore/DataStoreListSummary.js
+++ b/www/datastore/DataStoreListSummary.js
@@ -35,6 +35,13 @@ Ext.define('PBS.datastore.DataStoreListSummary', {
 	let me = this;
 	let vm = me.getViewModel();
 
+	if (statusData['mount-status'] === 'notmounted') {
+	    let maskMessage = gettext('Datastore is not mounted');
+	    let maskIcon = 'fa pbs-unplugged-mask';
+	    me.mask(maskMessage, maskIcon);
+	    return;
+	}
+
 	if (statusData.error !== undefined) {
 	    Proxmox.Utils.API2Request({
 		url: `/config/datastore/${statusData.store}`,
-- 
2.39.5



From s.sterz at proxmox.com  Thu Nov 28 11:48:41 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Thu, 28 Nov 2024 11:48:41 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/1] ui: mask unmounted
 datastores in datastore overview
In-Reply-To: <20241128104032.66011-1-s.hanreich@proxmox.com>
References: <20241128104032.66011-1-s.hanreich@proxmox.com>
Message-ID: 

On Thu Nov 28, 2024 at 11:40 AM CET, Stefan Hanreich wrote:
> Currently, showing the Datastore summary page leads to errors since
> the status returned by the API does not contain any fields that are
> checked by the component rendering the datastore summary. We solve
> this by checking if the datastore is currently mounted first and mask
> the element if it is currently unmounted.
>
> Signed-off-by: Stefan Hanreich 
> ---
>  www/datastore/DataStoreListSummary.js | 7 +++++++
>  1 file changed, 7 insertions(+)
>
> diff --git a/www/datastore/DataStoreListSummary.js b/www/datastore/DataStoreListSummary.js
> index f7ea83e7..b43f1ab2 100644
> --- a/www/datastore/DataStoreListSummary.js
> +++ b/www/datastore/DataStoreListSummary.js
> @@ -35,6 +35,13 @@ Ext.define('PBS.datastore.DataStoreListSummary', {
>  	let me = this;
>  	let vm = me.getViewModel();
>
> +	if (statusData['mount-status'] === 'notmounted') {
> +	    let maskMessage = gettext('Datastore is not mounted');
> +	    let maskIcon = 'fa pbs-unplugged-mask';
> +	    me.mask(maskMessage, maskIcon);
> +	    return;
> +	}
> +
>  	if (statusData.error !== undefined) {
>  	    Proxmox.Utils.API2Request({
>  		url: `/config/datastore/${statusData.store}`,

tested this and seems to works as intended (no more error in the
console, datastore is properly masked), so consider this:

Tested-by: Shannon Sterz 

code also looks fine to me



From s.hanreich at proxmox.com  Thu Nov 28 11:50:39 2024
From: s.hanreich at proxmox.com (Stefan Hanreich)
Date: Thu, 28 Nov 2024 11:50:39 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/1] ui: mask unmounted
 datastores in datastore overview
In-Reply-To: <20241128104032.66011-1-s.hanreich@proxmox.com>
References: <20241128104032.66011-1-s.hanreich@proxmox.com>
Message-ID: 

just found something - element doesn't get unmasked if the store gets
mounted while the summary page is open. will send a v2.



From s.hanreich at proxmox.com  Thu Nov 28 12:00:14 2024
From: s.hanreich at proxmox.com (Stefan Hanreich)
Date: Thu, 28 Nov 2024 12:00:14 +0100
Subject: [pbs-devel] [PATCH proxmox-backup v2 1/1] ui: mask unmounted
 datastores in datastore overview
Message-ID: <20241128110014.78209-1-s.hanreich@proxmox.com>

Currently, showing the Datastore summary page leads to errors since
the status returned by the API does not contain any fields that are
checked by the component rendering the datastore summary. We solve
this by checking if the datastore is currently mounted first and mask
the element if it is currently unmounted.

Signed-off-by: Stefan Hanreich 
---
Changes from v1 to v2:
* unmask element if state changes

 www/datastore/DataStoreListSummary.js | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/www/datastore/DataStoreListSummary.js b/www/datastore/DataStoreListSummary.js
index f7ea83e7..f61e88ec 100644
--- a/www/datastore/DataStoreListSummary.js
+++ b/www/datastore/DataStoreListSummary.js
@@ -35,6 +35,15 @@ Ext.define('PBS.datastore.DataStoreListSummary', {
 	let me = this;
 	let vm = me.getViewModel();
 
+	if (statusData['mount-status'] === 'notmounted') {
+	    let maskMessage = gettext('Datastore is not mounted');
+	    let maskIcon = 'fa pbs-unplugged-mask';
+	    me.mask(maskMessage, maskIcon);
+	    return;
+	} else if (me.isMasked()) {
+	    me.unmask();
+	}
+
 	if (statusData.error !== undefined) {
 	    Proxmox.Utils.API2Request({
 		url: `/config/datastore/${statusData.store}`,
-- 
2.39.5



From s.sterz at proxmox.com  Thu Nov 28 12:05:44 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Thu, 28 Nov 2024 12:05:44 +0100
Subject: [pbs-devel] [PATCH proxmox-backup v2 1/1] ui: mask unmounted
 datastores in datastore overview
In-Reply-To: <20241128110014.78209-1-s.hanreich@proxmox.com>
References: <20241128110014.78209-1-s.hanreich@proxmox.com>
Message-ID: 

On Thu Nov 28, 2024 at 12:00 PM CET, Stefan Hanreich wrote:
> Currently, showing the Datastore summary page leads to errors since
> the status returned by the API does not contain any fields that are
> checked by the component rendering the datastore summary. We solve
> this by checking if the datastore is currently mounted first and mask
> the element if it is currently unmounted.
>
> Signed-off-by: Stefan Hanreich 
> ---
> Changes from v1 to v2:
> * unmask element if state changes
>
>  www/datastore/DataStoreListSummary.js | 9 +++++++++
>  1 file changed, 9 insertions(+)
>
> diff --git a/www/datastore/DataStoreListSummary.js b/www/datastore/DataStoreListSummary.js
> index f7ea83e7..f61e88ec 100644
> --- a/www/datastore/DataStoreListSummary.js
> +++ b/www/datastore/DataStoreListSummary.js
> @@ -35,6 +35,15 @@ Ext.define('PBS.datastore.DataStoreListSummary', {
>  	let me = this;
>  	let vm = me.getViewModel();
>
> +	if (statusData['mount-status'] === 'notmounted') {
> +	    let maskMessage = gettext('Datastore is not mounted');
> +	    let maskIcon = 'fa pbs-unplugged-mask';
> +	    me.mask(maskMessage, maskIcon);
> +	    return;
> +	} else if (me.isMasked()) {
> +	    me.unmask();
> +	}
> +
>  	if (statusData.error !== undefined) {
>  	    Proxmox.Utils.API2Request({
>  		url: `/config/datastore/${statusData.store}`,

this now masks and properly unmasks the datastore, sorry i didn't notice
that before. tested it again, no more errors in the console too, so:

Tested-by: Shannon Sterz 



From t.lamprecht at proxmox.com  Thu Nov 28 12:07:48 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Thu, 28 Nov 2024 12:07:48 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup v2 1/1] ui: mask
 unmounted datastores in datastore overview
In-Reply-To: <20241128110014.78209-1-s.hanreich@proxmox.com>
References: <20241128110014.78209-1-s.hanreich@proxmox.com>
Message-ID: 

Am 28.11.24 um 12:00 schrieb Stefan Hanreich:
> Currently, showing the Datastore summary page leads to errors since
> the status returned by the API does not contain any fields that are
> checked by the component rendering the datastore summary. We solve
> this by checking if the datastore is currently mounted first and mask
> the element if it is currently unmounted.
> 
> Signed-off-by: Stefan Hanreich 
> ---
> Changes from v1 to v2:
> * unmask element if state changes
> 
>  www/datastore/DataStoreListSummary.js | 9 +++++++++
>  1 file changed, 9 insertions(+)
> 
>

applied, with Shannon's T-b, thanks!



From c.ebner at proxmox.com  Thu Nov 28 12:56:57 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 12:56:57 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] server: push: fix supported api
 version check
Message-ID: <20241128115657.239291-1-c.ebner@proxmox.com>

The current version check does not cover cases where the minor
version is 3, but the release version is below 11. Fix this by
extending the check accordingly.

Signed-off-by: Christian Ebner 
---
 src/server/push.rs | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/src/server/push.rs b/src/server/push.rs
index 53098d43b..afc4ec815 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -134,7 +134,8 @@ impl PushParameters {
         }
 
         let supports_prune_delete_stats = api_version.major > 3
-            || (api_version.major == 3 && api_version.minor >= 2 && api_version.release >= 11);
+            || (api_version.major == 3 && api_version.minor == 2 && api_version.release >= 11)
+            || (api_version.major == 3 && api_version.minor >= 3);
 
         let target = PushTarget {
             remote,
-- 
2.39.5




From t.lamprecht at proxmox.com  Thu Nov 28 12:59:35 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Thu, 28 Nov 2024 12:59:35 +0100
Subject: [pbs-devel] applied: [PATCH proxmox-backup] server: push: fix
 supported api version check
In-Reply-To: <20241128115657.239291-1-c.ebner@proxmox.com>
References: <20241128115657.239291-1-c.ebner@proxmox.com>
Message-ID: 

Am 28.11.24 um 12:56 schrieb Christian Ebner:
> The current version check does not cover cases where the minor
> version is 3, but the release version is below 11. Fix this by
> extending the check accordingly.
> 
> Signed-off-by: Christian Ebner 
> ---
>  src/server/push.rs | 3 ++-
>  1 file changed, 2 insertions(+), 1 deletion(-)
> 
>

applied, but re-ordered the lines to go from bigger to smaller, thanks!



From c.ebner at proxmox.com  Thu Nov 28 13:12:14 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 13:12:14 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] sync: push: also log archive
 names and upload size
Message-ID: <20241128121214.261257-1-c.ebner@proxmox.com>

Include the archive name and upload size/rate as well, as the pull
job logs them so the absence might cause confusion.

Reported-by: Max Carrara 
Signed-off-by: Christian Ebner 
---
 src/server/push.rs | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/src/server/push.rs b/src/server/push.rs
index 8f654d4b4..95c7c6bff 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -9,6 +9,8 @@ use tokio::sync::mpsc;
 use tokio_stream::wrappers::ReceiverStream;
 use tracing::{info, warn};
 
+use proxmox_human_byte::HumanByte;
+
 use pbs_api_types::{
     print_store_and_ns, ApiVersion, ApiVersionInfo, ArchiveType, Authid, BackupArchiveName,
     BackupDir, BackupGroup, BackupGroupDeleteStats, BackupNamespace, GroupFilter, GroupListItem,
@@ -848,12 +850,20 @@ pub(crate) async fn push_snapshot(
         path.push(&entry.filename);
         if path.try_exists()? {
             let archive_name = BackupArchiveName::from_path(&entry.filename)?;
+            log::info!("Sync archive {archive_name}");
             match archive_name.archive_type() {
                 ArchiveType::Blob => {
                     let file = std::fs::File::open(path.clone())?;
                     let backup_stats = backup_writer
                         .upload_blob(file, archive_name.as_ref())
                         .await?;
+                    log::info!(
+                        "Uploaded {} ({}/s)",
+                        HumanByte::from(backup_stats.size),
+                        HumanByte::new_binary(
+                            backup_stats.size as f64 / backup_stats.duration.as_secs_f64()
+                        ),
+                    );
                     stats.add(SyncStats {
                         chunk_count: backup_stats.chunk_count as usize,
                         bytes: backup_stats.size as usize,
@@ -883,6 +893,13 @@ pub(crate) async fn push_snapshot(
                         known_chunks.clone(),
                     )
                     .await?;
+                    log::info!(
+                        "Uploaded {} ({}/s)",
+                        HumanByte::from(sync_stats.bytes),
+                        HumanByte::new_binary(
+                            sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+                        ),
+                    );
                     stats.add(sync_stats);
                 }
                 ArchiveType::FixedIndex => {
@@ -908,6 +925,13 @@ pub(crate) async fn push_snapshot(
                         known_chunks.clone(),
                     )
                     .await?;
+                    log::info!(
+                        "Uploaded {} ({}/s)",
+                        HumanByte::from(sync_stats.bytes),
+                        HumanByte::new_binary(
+                            sync_stats.bytes as f64 / sync_stats.elapsed.as_secs_f64()
+                        ),
+                    );
                     stats.add(sync_stats);
                 }
             }
-- 
2.39.5




From c.ebner at proxmox.com  Thu Nov 28 13:49:25 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 13:49:25 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 2/2] sync: push: use min version
 helper for api compatibility checks
In-Reply-To: <20241128124925.318298-1-c.ebner@proxmox.com>
References: <20241128124925.318298-1-c.ebner@proxmox.com>
Message-ID: <20241128124925.318298-2-c.ebner@proxmox.com>

Use the compatibility check helper to reduce possible errors when
comparing api version.

No functional change intended.

Signed-off-by: Christian Ebner 
---
 src/server/push.rs | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/src/server/push.rs b/src/server/push.rs
index 95c7c6bff..957eb1ab2 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -129,13 +129,12 @@ impl PushParameters {
         let api_version = ApiVersion::try_from(version_info)?;
 
         // push assumes namespace support on the remote side, fail early if missing
-        if api_version.major < 2 || (api_version.major == 2 && api_version.minor < 2) {
+        if !api_version.is_min_required(ApiVersion::new(2, 2, 0, String::new())) {
             bail!("Unsupported remote api version, minimum v2.2 required");
         }
 
-        let supports_prune_delete_stats = api_version.major > 3
-            || (api_version.major == 3 && api_version.minor >= 3)
-            || (api_version.major == 3 && api_version.minor == 2 && api_version.release >= 11);
+        let supports_prune_delete_stats =
+            api_version.is_min_required(ApiVersion::new(3, 2, 11, String::new()));
 
         let target = PushTarget {
             remote,
-- 
2.39.5




From c.ebner at proxmox.com  Thu Nov 28 13:49:24 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 13:49:24 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api types: version: add
 helper for min version checks
Message-ID: <20241128124925.318298-1-c.ebner@proxmox.com>

Add a helper method to the ApiVersion type to reduce possible errors
when comparing api versions for feature compatibility checks.

Signed-off-by: Christian Ebner 
---
 pbs-api-types/src/version.rs | 33 +++++++++++++++++++++++++++++++++
 1 file changed, 33 insertions(+)

diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs
index 80f87e372..5f8efb663 100644
--- a/pbs-api-types/src/version.rs
+++ b/pbs-api-types/src/version.rs
@@ -1,4 +1,5 @@
 //! Defines the types for the api version info endpoint
+use std::cmp::Ordering;
 use std::convert::TryFrom;
 
 use anyhow::{format_err, Context};
@@ -68,3 +69,35 @@ impl TryFrom for ApiVersion {
         })
     }
 }
+
+impl ApiVersion {
+    pub fn new(
+        major: ApiVersionMajor,
+        minor: ApiVersionMinor,
+        release: ApiVersionRelease,
+        repoid: String,
+    ) -> Self {
+        Self {
+            major,
+            minor,
+            release,
+            repoid,
+        }
+    }
+
+    pub fn is_min_required(&self, version: ApiVersion) -> bool {
+        match (
+            version.major.cmp(&self.major),
+            version.minor.cmp(&self.minor),
+            version.release.cmp(&self.release),
+        ) {
+            (Ordering::Less, _, _) => true,
+            (Ordering::Greater, _, _) => false,
+            (Ordering::Equal, Ordering::Less, _) => true,
+            (Ordering::Equal, Ordering::Greater, _) => false,
+            (Ordering::Equal, Ordering::Equal, Ordering::Less) => true,
+            (Ordering::Equal, Ordering::Equal, Ordering::Equal) => true,
+            (Ordering::Equal, Ordering::Equal, Ordering::Greater) => false,
+        }
+    }
+}
-- 
2.39.5




From t.lamprecht at proxmox.com  Thu Nov 28 14:04:18 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Thu, 28 Nov 2024 14:04:18 +0100
Subject: [pbs-devel] Proxmox Backup Server 3.3 released!
Message-ID: 

Hello all!

We?re excited to share the newest release of Proxmox Backup Server 3.3, packed
with updates and improvements inspired by your valuable input!

This version is based on Debian 12.8 ("Bookworm") but uses the newer Linux
kernel 6.8.12-4 as stable default and kernel 6.11 as opt-in, and ZFS 2.2.6
(with compatibility patches for Kernel 6.11).

Here are some of the highlights

- New push direction for remote synchronization jobs
- Support for removable datastores
- New webhook notification target
- New change detection modes for speeding up file-based host and container
  backups
- Countless improvements for general client and backend usability

As always, we?d love to hear your thoughts. Check out the full release notes
and share your feedback below!

Release notes
https://pbs.proxmox.com/wiki/index.php/Roadmap

Press release
https://www.proxmox.com/en/news/press-releases

Watch the talk from SFSCON24:
"Proxmox Backup Server: Backup for your Datacenter" - Christian Ebner
https://www.sfscon.it/talks/proxmox-backup-server-backup-for-your-datacenter/

Download
Alternate ISO download:
https://enterprise.proxmox.com/iso
https://www.proxmox.com/en/downloads

Documentation
https://pbs.proxmox.com/docs

Community Forum
https://forum.proxmox.com

Bugtracker
https://bugzilla.proxmox.com

Source code
https://git.proxmox.com

This release is a testament to the power of collaboration within our
community. A big THANK YOU for your contributions, ideas, and support?we
couldn?t have done this without you!

FAQ
Q: Can I upgrade the latest Proxmox Backup Server 2.x to 3.3 with apt??
A: Yes, please follow the upgrade instructions on
   https://pbs.proxmox.com/wiki/index.php/Upgrade_from_2_to_3

Q: How does this integrate into Proxmox Virtual Environment?
A: Just add a Proxmox Backup Server datastore as a new storage target in your
   Proxmox VE. Make sure that you run the latest Proxmox VE 8.2.

Q: Is Proxmox Backup Server still compatible with older clients or Proxmox VE
   releases?
A: We are actively testing compatibility between all currently supported major
   versions, including the previous version. Full compatibility with even
   older (major) client versions is supported only on a best effort basis.

Q: How do I install the proxmox-backup-client on my Debian or Ubuntu server?
A: We provide a "Proxmox Backup Client-only Repository", see
   https://pbs.proxmox.com/docs/installation.html#client-installation

Q: What will happen with the existing backup tool (vzdump) in Proxmox Virtual
   Environment?
A: You can still use vzdump. The new backup is an additional, but very
   powerful way to backup and restore your VMs and containers.

Q: Is there any recommended server hardware for the Proxmox Backup Server?
A: We recommend enterprise-grade server hardware components, with fast local
   SSD/NVMe storage. Access and response times from rotating drives will slow
   down all backup server operations. See
   https://pbs.proxmox.com/docs/installation.html#recommended-server-system-requirements

Q: Can I install Proxmox Backup Server on Debian, in a VM, as LXC or alongside
   with Proxmox VE?
A: Yes, but all this is not the recommended setup (expert use only).

Q: Where can I get more information about upcoming features?
A: Follow the announcement forum and pbs-devel mailing list
   https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel, and subscribe
   to our newsletter https://www.proxmox.com/news and see
   https://pbs.proxmox.com/wiki/index.php/Roadmap.

Best regards
Thomas Lamprecht



From t.lamprecht at proxmox.com  Thu Nov 28 14:10:16 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Thu, 28 Nov 2024 14:10:16 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api types: version: add
 helper for min version checks
In-Reply-To: <20241128124925.318298-1-c.ebner@proxmox.com>
References: <20241128124925.318298-1-c.ebner@proxmox.com>
Message-ID: <8fb89fb5-0def-4b39-a30e-00193958bc16@proxmox.com>

Am 28.11.24 um 13:49 schrieb Christian Ebner:
> Add a helper method to the ApiVersion type to reduce possible errors
> when comparing api versions for feature compatibility checks.
> 
> Signed-off-by: Christian Ebner 
> ---
>  pbs-api-types/src/version.rs | 33 +++++++++++++++++++++++++++++++++
>  1 file changed, 33 insertions(+)
> 
> diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs
> index 80f87e372..5f8efb663 100644
> --- a/pbs-api-types/src/version.rs
> +++ b/pbs-api-types/src/version.rs
> @@ -1,4 +1,5 @@
>  //! Defines the types for the api version info endpoint
> +use std::cmp::Ordering;
>  use std::convert::TryFrom;
>  
>  use anyhow::{format_err, Context};
> @@ -68,3 +69,35 @@ impl TryFrom for ApiVersion {
>          })
>      }
>  }
> +
> +impl ApiVersion {
> +    pub fn new(
> +        major: ApiVersionMajor,
> +        minor: ApiVersionMinor,
> +        release: ApiVersionRelease,
> +        repoid: String,
> +    ) -> Self {
> +        Self {
> +            major,
> +            minor,
> +            release,
> +            repoid,
> +        }
> +    }
> +
> +    pub fn is_min_required(&self, version: ApiVersion) -> bool {
> +        match (
> +            version.major.cmp(&self.major),
> +            version.minor.cmp(&self.minor),
> +            version.release.cmp(&self.release),
> +        ) {
> +            (Ordering::Less, _, _) => true,
> +            (Ordering::Greater, _, _) => false,
> +            (Ordering::Equal, Ordering::Less, _) => true,
> +            (Ordering::Equal, Ordering::Greater, _) => false,
> +            (Ordering::Equal, Ordering::Equal, Ordering::Less) => true,
> +            (Ordering::Equal, Ordering::Equal, Ordering::Equal) => true,
> +            (Ordering::Equal, Ordering::Equal, Ordering::Greater) => false,
> +        }
> +    }
> +}


Why not impl the Ord trait here instead?

Then the call-site could be

let supports_prune_delete_stats = api_version >= ApiVersion::new(3, 2, 11, String::new());

And maybe a separate type for the triple without the commit hash on which you
also impl the same and then avoid that slightly confusing String::new() hack.



From c.ebner at proxmox.com  Thu Nov 28 14:14:27 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 14:14:27 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api types: version: add
 helper for min version checks
In-Reply-To: <8fb89fb5-0def-4b39-a30e-00193958bc16@proxmox.com>
References: <20241128124925.318298-1-c.ebner@proxmox.com>
 <8fb89fb5-0def-4b39-a30e-00193958bc16@proxmox.com>
Message-ID: 

On 11/28/24 14:10, Thomas Lamprecht wrote:
> Why not impl the Ord trait here instead?
> 
> Then the call-site could be
> 
> let supports_prune_delete_stats = api_version >= ApiVersion::new(3, 2, 11, String::new());

Ah, yes that's way nicer and allows also for exact version matching.

> And maybe a separate type for the triple without the commit hash on which you
> also impl the same and then avoid that slightly confusing String::new() hack.

Acked, will send a new version incorporating your feedback, thanks!




From t.lamprecht at proxmox.com  Thu Nov 28 14:18:55 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Thu, 28 Nov 2024 14:18:55 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api types: version: add
 helper for min version checks
In-Reply-To: 
References: <20241128124925.318298-1-c.ebner@proxmox.com>
 <8fb89fb5-0def-4b39-a30e-00193958bc16@proxmox.com>
 
Message-ID: <0f9f1b28-f83f-459e-a2b8-4c5fa60d3562@proxmox.com>

Am 28.11.24 um 14:14 schrieb Christian Ebner:
> On 11/28/24 14:10, Thomas Lamprecht wrote:
>> Why not impl the Ord trait here instead?
>>
>> Then the call-site could be
>>
>> let supports_prune_delete_stats = api_version >= ApiVersion::new(3, 2, 11, String::new());
> 
> Ah, yes that's way nicer and allows also for exact version matching.
> 
>> And maybe a separate type for the triple without the commit hash on which you
>> also impl the same and then avoid that slightly confusing String::new() hack.
> 
> Acked, will send a new version incorporating your feedback, thanks!
> 

Note that while I'm quite sure of the first thing the last thing was just a
idea from top of my head, not sure how much it improves,  but maybe having
a separate VersionTriple or SemanticVersion type might make a few things nicer
to use.



From c.ebner at proxmox.com  Thu Nov 28 14:49:14 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 14:49:14 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api types: version: add
 helper for min version checks
In-Reply-To: <0f9f1b28-f83f-459e-a2b8-4c5fa60d3562@proxmox.com>
References: <20241128124925.318298-1-c.ebner@proxmox.com>
 <8fb89fb5-0def-4b39-a30e-00193958bc16@proxmox.com>
 
 <0f9f1b28-f83f-459e-a2b8-4c5fa60d3562@proxmox.com>
Message-ID: <95f86bb4-96c9-4bf4-8343-b6996ce72479@proxmox.com>

On 11/28/24 14:18, Thomas Lamprecht wrote:
> Am 28.11.24 um 14:14 schrieb Christian Ebner:
>> On 11/28/24 14:10, Thomas Lamprecht wrote:
>>> Why not impl the Ord trait here instead?
>>>
>>> Then the call-site could be
>>>
>>> let supports_prune_delete_stats = api_version >= ApiVersion::new(3, 2, 11, String::new());
>>
>> Ah, yes that's way nicer and allows also for exact version matching.
>>
>>> And maybe a separate type for the triple without the commit hash on which you
>>> also impl the same and then avoid that slightly confusing String::new() hack.
>>
>> Acked, will send a new version incorporating your feedback, thanks!
>>
> 
> Note that while I'm quite sure of the first thing the last thing was just a
> idea from top of my head, not sure how much it improves,  but maybe having
> a separate VersionTriple or SemanticVersion type might make a few things nicer
> to use.

Okay, well I was rather opting for dropping the `repoid` from 
`ApiVersion` instead of introducing another type, as that is currently 
not used and was just included for completeness. The `repoid` can 
already be obtained directly from the `ApiVersionInfo`, returned by the 
version api endpoint so this information is redundant anyways.



From s.sterz at proxmox.com  Thu Nov 28 14:49:25 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Thu, 28 Nov 2024 14:49:25 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: check that store is set
 before trying to select in GCJobView
Message-ID: <20241128134925.196856-1-s.sterz@proxmox.com>

otherwise users will get a `b.store is null` error in the console and
a loading spinner is shown for a while.

the issue in question seems to stem from the event handler that gets
attached when the "Prune & GC Jobs" tab is opened for a specific
datastore. however, that event handler should *not* be attached for
the "Datastore" -> "Prune & GC Jobs" panel. it seems that the event
handler does still get attached, and will fire in the "Datastore"
view if it hasn't fired while opened in a specific datastore
(it should only trigger a single time).

that scenario seems to occur when a different tab was previously
selected in a specific datastore and navigation is triggered via the
side bar from the "Datastore" -> "Prune GC Jobs" to a specific
datastore. that leads to the "Prune & GC Jobs" view for that specific
datastore being opened very briefly in which the event handler gets
attached, navigation then automatically moves to the previously
selected tab. this will stop the store from updating ensuring that
the event is never triggered. when we then move to
the "Datastore" -> "Prune & GC Jobs" tab again the event handler will
be triggered but the store of the view is null leading to the error.

Signed-off-by: Shannon Sterz 
---
 www/config/GCView.js | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/www/config/GCView.js b/www/config/GCView.js
index a6e79fb3..51ce1cb6 100644
--- a/www/config/GCView.js
+++ b/www/config/GCView.js
@@ -33,7 +33,11 @@ Ext.define('PBS.config.GCJobView', {
 		// after the store is loaded, select the row to enable the Edit,.. buttons
 		store.rstore.proxy.on({
 		    'afterload': {
-			fn: () => view.getSelectionModel().select(0),
+			fn: () => {
+			    if (view.store) {
+				view.getSelectionModel().select(0);
+			    }
+			},
 			single: true,
 		    },
 		});
-- 
2.39.5




From t.lamprecht at proxmox.com  Thu Nov 28 14:50:20 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Thu, 28 Nov 2024 14:50:20 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api types: version: add
 helper for min version checks
In-Reply-To: <95f86bb4-96c9-4bf4-8343-b6996ce72479@proxmox.com>
References: <20241128124925.318298-1-c.ebner@proxmox.com>
 <8fb89fb5-0def-4b39-a30e-00193958bc16@proxmox.com>
 
 <0f9f1b28-f83f-459e-a2b8-4c5fa60d3562@proxmox.com>
 <95f86bb4-96c9-4bf4-8343-b6996ce72479@proxmox.com>
Message-ID: <487f125e-88d3-46e6-827d-5e665298b91d@proxmox.com>

Am 28.11.24 um 14:49 schrieb Christian Ebner:
> On 11/28/24 14:18, Thomas Lamprecht wrote:
>> Am 28.11.24 um 14:14 schrieb Christian Ebner:
>>> On 11/28/24 14:10, Thomas Lamprecht wrote:
>>>> Why not impl the Ord trait here instead?
>>>>
>>>> Then the call-site could be
>>>>
>>>> let supports_prune_delete_stats = api_version >= ApiVersion::new(3, 2, 11, String::new());
>>>
>>> Ah, yes that's way nicer and allows also for exact version matching.
>>>
>>>> And maybe a separate type for the triple without the commit hash on which you
>>>> also impl the same and then avoid that slightly confusing String::new() hack.
>>>
>>> Acked, will send a new version incorporating your feedback, thanks!
>>>
>>
>> Note that while I'm quite sure of the first thing the last thing was just a
>> idea from top of my head, not sure how much it improves,  but maybe having
>> a separate VersionTriple or SemanticVersion type might make a few things nicer
>> to use.
> 
> Okay, well I was rather opting for dropping the `repoid` from 
> `ApiVersion` instead of introducing another type, as that is currently 
> not used and was just included for completeness. The `repoid` can 
> already be obtained directly from the `ApiVersionInfo`, returned by the 
> version api endpoint so this information is redundant anyways.

It sounds like you checked out surrounding code more closely, I did not,
so it's probably better to go with your idea for now.



From f.ebner at proxmox.com  Thu Nov 28 15:15:14 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Thu, 28 Nov 2024 15:15:14 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: check that store is set
 before trying to select in GCJobView
In-Reply-To: <20241128134925.196856-1-s.sterz@proxmox.com>
References: <20241128134925.196856-1-s.sterz@proxmox.com>
Message-ID: <2a4b6b22-09a3-473a-ba37-fe30ae865e8a@proxmox.com>

Am 28.11.24 um 14:49 schrieb Shannon Sterz:
> otherwise users will get a `b.store is null` error in the console and
> a loading spinner is shown for a while.
> 
> the issue in question seems to stem from the event handler that gets
> attached when the "Prune & GC Jobs" tab is opened for a specific
> datastore. however, that event handler should *not* be attached for
> the "Datastore" -> "Prune & GC Jobs" panel. it seems that the event
> handler does still get attached, and will fire in the "Datastore"
> view if it hasn't fired while opened in a specific datastore
> (it should only trigger a single time).
> 
> that scenario seems to occur when a different tab was previously
> selected in a specific datastore and navigation is triggered via the
> side bar from the "Datastore" -> "Prune GC Jobs" to a specific
> datastore. that leads to the "Prune & GC Jobs" view for that specific
> datastore being opened very briefly in which the event handler gets
> attached, navigation then automatically moves to the previously
> selected tab. this will stop the store from updating ensuring that
> the event is never triggered. when we then move to
> the "Datastore" -> "Prune & GC Jobs" tab again the event handler will
> be triggered but the store of the view is null leading to the error.
> 
> Signed-off-by: Shannon Sterz 
> ---
>  www/config/GCView.js | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
> 
> diff --git a/www/config/GCView.js b/www/config/GCView.js
> index a6e79fb3..51ce1cb6 100644
> --- a/www/config/GCView.js
> +++ b/www/config/GCView.js
> @@ -33,7 +33,11 @@ Ext.define('PBS.config.GCJobView', {
>  		// after the store is loaded, select the row to enable the Edit,.. buttons
>  		store.rstore.proxy.on({
>  		    'afterload': {
> -			fn: () => view.getSelectionModel().select(0),
> +			fn: () => {
> +			    if (view.store) {
> +				view.getSelectionModel().select(0);

In my testing, view.store is set if I was previously at a datastore's
"Prune & GC Jobs" but not if I was on a different tab from a datastore.
In both cases, the row does not seem to be selected and the "Edit" and
"Run Now" buttons are still grayed out. So your patch is certainly an
improvement, because there is no error and loading :) But it still
doesn't seem to do what was intended according to the code comment.

> +			    }
> +			},
>  			single: true,
>  		    },
>  		});




From f.ebner at proxmox.com  Thu Nov 28 15:37:14 2024
From: f.ebner at proxmox.com (Fiona Ebner)
Date: Thu, 28 Nov 2024 15:37:14 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: check that store is set
 before trying to select in GCJobView
In-Reply-To: <2a4b6b22-09a3-473a-ba37-fe30ae865e8a@proxmox.com>
References: <20241128134925.196856-1-s.sterz@proxmox.com>
 <2a4b6b22-09a3-473a-ba37-fe30ae865e8a@proxmox.com>
Message-ID: <0ad59651-69cf-4f4b-8346-0ede2e6b5706@proxmox.com>



Am 28.11.24 um 15:15 schrieb Fiona Ebner:
> Am 28.11.24 um 14:49 schrieb Shannon Sterz:
>> otherwise users will get a `b.store is null` error in the console and
>> a loading spinner is shown for a while.
>>
>> the issue in question seems to stem from the event handler that gets
>> attached when the "Prune & GC Jobs" tab is opened for a specific
>> datastore. however, that event handler should *not* be attached for
>> the "Datastore" -> "Prune & GC Jobs" panel. it seems that the event
>> handler does still get attached, and will fire in the "Datastore"
>> view if it hasn't fired while opened in a specific datastore
>> (it should only trigger a single time).
>>
>> that scenario seems to occur when a different tab was previously
>> selected in a specific datastore and navigation is triggered via the
>> side bar from the "Datastore" -> "Prune GC Jobs" to a specific
>> datastore. that leads to the "Prune & GC Jobs" view for that specific
>> datastore being opened very briefly in which the event handler gets
>> attached, navigation then automatically moves to the previously
>> selected tab. this will stop the store from updating ensuring that
>> the event is never triggered. when we then move to
>> the "Datastore" -> "Prune & GC Jobs" tab again the event handler will
>> be triggered but the store of the view is null leading to the error.
>>
>> Signed-off-by: Shannon Sterz 
>> ---
>>  www/config/GCView.js | 6 +++++-
>>  1 file changed, 5 insertions(+), 1 deletion(-)
>>
>> diff --git a/www/config/GCView.js b/www/config/GCView.js
>> index a6e79fb3..51ce1cb6 100644
>> --- a/www/config/GCView.js
>> +++ b/www/config/GCView.js
>> @@ -33,7 +33,11 @@ Ext.define('PBS.config.GCJobView', {
>>  		// after the store is loaded, select the row to enable the Edit,.. buttons
>>  		store.rstore.proxy.on({
>>  		    'afterload': {
>> -			fn: () => view.getSelectionModel().select(0),
>> +			fn: () => {
>> +			    if (view.store) {
>> +				view.getSelectionModel().select(0);
> 
> In my testing, view.store is set if I was previously at a datastore's
> "Prune & GC Jobs" but not if I was on a different tab from a datastore.
> In both cases, the row does not seem to be selected and the "Edit" and
> "Run Now" buttons are still grayed out. So your patch is certainly an
> improvement, because there is no error and loading :) But it still
> doesn't seem to do what was intended according to the code comment.
> 

>From a quick off-list discussion, Shannon pointed out that this code is
intended for the "Prune & GC Jobs" tab for a single datastore, not the
overview, where it still works after the change :)

So consider this:

Tested-by: Fiona Ebner 



From s.sterz at proxmox.com  Thu Nov 28 15:41:09 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Thu, 28 Nov 2024 15:41:09 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: check that store is set
 before trying to select in GCJobView
In-Reply-To: <2a4b6b22-09a3-473a-ba37-fe30ae865e8a@proxmox.com>
References: <20241128134925.196856-1-s.sterz@proxmox.com>
 <2a4b6b22-09a3-473a-ba37-fe30ae865e8a@proxmox.com>
Message-ID: 

On Thu Nov 28, 2024 at 3:15 PM CET, Fiona Ebner wrote:
> Am 28.11.24 um 14:49 schrieb Shannon Sterz:
> > otherwise users will get a `b.store is null` error in the console and
> > a loading spinner is shown for a while.
> >
> > the issue in question seems to stem from the event handler that gets
> > attached when the "Prune & GC Jobs" tab is opened for a specific
> > datastore. however, that event handler should *not* be attached for
> > the "Datastore" -> "Prune & GC Jobs" panel. it seems that the event
> > handler does still get attached, and will fire in the "Datastore"
> > view if it hasn't fired while opened in a specific datastore
> > (it should only trigger a single time).
> >
> > that scenario seems to occur when a different tab was previously
> > selected in a specific datastore and navigation is triggered via the
> > side bar from the "Datastore" -> "Prune GC Jobs" to a specific
> > datastore. that leads to the "Prune & GC Jobs" view for that specific
> > datastore being opened very briefly in which the event handler gets
> > attached, navigation then automatically moves to the previously
> > selected tab. this will stop the store from updating ensuring that
> > the event is never triggered. when we then move to
> > the "Datastore" -> "Prune & GC Jobs" tab again the event handler will
> > be triggered but the store of the view is null leading to the error.
> >
> > Signed-off-by: Shannon Sterz 
> > ---
> >  www/config/GCView.js | 6 +++++-
> >  1 file changed, 5 insertions(+), 1 deletion(-)
> >
> > diff --git a/www/config/GCView.js b/www/config/GCView.js
> > index a6e79fb3..51ce1cb6 100644
> > --- a/www/config/GCView.js
> > +++ b/www/config/GCView.js
> > @@ -33,7 +33,11 @@ Ext.define('PBS.config.GCJobView', {
> >  		// after the store is loaded, select the row to enable the Edit,.. buttons
> >  		store.rstore.proxy.on({
> >  		    'afterload': {
> > -			fn: () => view.getSelectionModel().select(0),
> > +			fn: () => {
> > +			    if (view.store) {
> > +				view.getSelectionModel().select(0);
>

we've already talked about this off-list, but i wanted to document that
a bit better, so here goes nothing:

> In my testing, view.store is set if I was previously at a datastore's
> "Prune & GC Jobs" but not if I was on a different tab from a datastore.

yeah this is super confusing and requires setting the break points just
right to get to. datastore is set, if it was set by navigating to a
specific datastore's "Prune & GC Jobs" tab first as you pointed out and
isn't reset when you are in the top level "Datastore" panel.

in testing i tried to explicitly reset it to `undefined` in the
"Datastore" -> "Prune & GC Jobs" panel, but that would only avoid this
bug in certain situations. it was still possible to trigger it with the
navigation pattern described above.

> In both cases, the row does not seem to be selected and the "Edit" and
> "Run Now" buttons are still grayed out. So your patch is certainly an
> improvement, because there is no error and loading :) But it still
> doesn't seem to do what was intended according to the code comment.

it does select the row logically only in the specific datastore's "Prune
& GC Jobs" panel, as there should only ever be one GC job there. this
enables the "Edit" and "Run now" buttons without the user having to
select the single line in the table. (note that the highlighting is
removed, so visually nothing *looks* selected)

however, for the overview of all GC Jobs in the "Datastore" -> "Prune &
GC Jobs" panel, nothing should be auto-selected. hence, the `if
(view.datastore)` check before the event handler is attached. as
described in my commit message, it is still possible under certain
circumstance to have the event handler be attached and be triggered
here.

> > +			    }
> > +			},
> >  			single: true,
> >  		    },
> >  		});




From g.goller at proxmox.com  Thu Nov 28 15:44:57 2024
From: g.goller at proxmox.com (Gabriel Goller)
Date: Thu, 28 Nov 2024 15:44:57 +0100
Subject: [pbs-devel] [PATCH proxmox-backup] ui: check that store is set
 before trying to select in GCJobView
In-Reply-To: <20241128134925.196856-1-s.sterz@proxmox.com>
References: <20241128134925.196856-1-s.sterz@proxmox.com>
Message-ID: <6gcpqxynckaekgkzh76jnuwy7i4c5zgoudybvfpkfveveybsuy@llsxichmz7zl>

Thanks for fixing this!

Consider:
Tested-by: Gabriel Goller 




From d.csapak at proxmox.com  Thu Nov 28 15:54:40 2024
From: d.csapak at proxmox.com (Dominik Csapak)
Date: Thu, 28 Nov 2024 15:54:40 +0100
Subject: [pbs-devel] [PATCH proxmox] sys: fs: set FD_CLOEXEC when creating
 temp files
Message-ID: <20241128145440.4119007-1-d.csapak@proxmox.com>

In general we want all open files to have set CLOEXEC since our
reloading mechanism can basically fork at any moment and we don't want
newer daemons to carry around old file descriptors, especially lock
files.

Since `make_tmp_file` is called by many things (e.g. open_file_locked,
logrotate, rrd), set FD_CLOEXEC after getting the filehandle.

This fixes an issue with e.g. tape backups not working because of such
lingering lock files after a reload.

Signed-off-by: Dominik Csapak 
---
there are other code parts where we open file without CLOEXEC, but
wanted to send this for now.

 proxmox-sys/src/fs/file.rs | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/proxmox-sys/src/fs/file.rs b/proxmox-sys/src/fs/file.rs
index fbfc0b58..05d0aff0 100644
--- a/proxmox-sys/src/fs/file.rs
+++ b/proxmox-sys/src/fs/file.rs
@@ -7,7 +7,7 @@ use std::time::Duration;
 
 use anyhow::{bail, format_err, Context as _, Error};
 use nix::errno::Errno;
-use nix::fcntl::OFlag;
+use nix::fcntl::{FcntlArg, FdFlag, OFlag};
 use nix::sys::stat;
 use nix::unistd;
 use nix::NixPath;
@@ -128,7 +128,10 @@ pub fn make_tmp_file>(
     let mut template = path.to_owned();
     template.set_extension("tmp_XXXXXX");
     let (mut file, tmp_path) = match unistd::mkstemp(&template) {
-        Ok((fd, path)) => (unsafe { File::from_raw_fd(fd) }, path),
+        Ok((fd, path)) => {
+            nix::fcntl::fcntl(fd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))?;
+            (unsafe { File::from_raw_fd(fd) }, path)
+        }
         Err(err) => bail!("mkstemp {:?} failed: {}", template, err),
     };
 
-- 
2.39.5




From c.ebner at proxmox.com  Thu Nov 28 17:07:19 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 17:07:19 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 1/3] api types: version: drop
 unused `repoid` field
Message-ID: <20241128160721.583578-1-c.ebner@proxmox.com>

The `ApiVersion` type was introduced in commit a926803b
("api/api-types: refactor api endpoint version, add api types")
including the `repoid`, added for completeness when converting from
a pre-existing `ApiVersionInfo` instance, as returned by the
`version` api endpoint.

Drop the additional `repoid` field, since this is currently not used,
can be obtained fro the `ApiVersionInfo` as well and only hinders the
implementation for easy api version comparison.

Signed-off-by: Christian Ebner 
---
changes since version 1:
- not present in previous version

 pbs-api-types/src/version.rs | 2 --
 1 file changed, 2 deletions(-)

diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs
index 80f87e372..bd4c517da 100644
--- a/pbs-api-types/src/version.rs
+++ b/pbs-api-types/src/version.rs
@@ -37,7 +37,6 @@ pub struct ApiVersion {
     pub major: ApiVersionMajor,
     pub minor: ApiVersionMinor,
     pub release: ApiVersionRelease,
-    pub repoid: String,
 }
 
 impl TryFrom for ApiVersion {
@@ -64,7 +63,6 @@ impl TryFrom for ApiVersion {
             major,
             minor,
             release,
-            repoid: value.repoid.clone(),
         })
     }
 }
-- 
2.39.5




From c.ebner at proxmox.com  Thu Nov 28 17:07:21 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 17:07:21 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 3/3] sync: push: use direct
 api version comparison in compatibility checks
In-Reply-To: <20241128160721.583578-1-c.ebner@proxmox.com>
References: <20241128160721.583578-1-c.ebner@proxmox.com>
Message-ID: <20241128160721.583578-3-c.ebner@proxmox.com>

Use the trait implementations of `ApiVersion` to perform operator
based version comparisons. This makes the comparison more readable
and reduces the risk for errors.

No functional change intended.

Signed-off-by: Christian Ebner 
---
changes since version 1:
- use operator based version comparison, improving code readability

 src/server/push.rs | 6 ++----
 1 file changed, 2 insertions(+), 4 deletions(-)

diff --git a/src/server/push.rs b/src/server/push.rs
index 95c7c6bff..fdd259aff 100644
--- a/src/server/push.rs
+++ b/src/server/push.rs
@@ -129,13 +129,11 @@ impl PushParameters {
         let api_version = ApiVersion::try_from(version_info)?;
 
         // push assumes namespace support on the remote side, fail early if missing
-        if api_version.major < 2 || (api_version.major == 2 && api_version.minor < 2) {
+        if api_version < ApiVersion::new(2, 2, 0) {
             bail!("Unsupported remote api version, minimum v2.2 required");
         }
 
-        let supports_prune_delete_stats = api_version.major > 3
-            || (api_version.major == 3 && api_version.minor >= 3)
-            || (api_version.major == 3 && api_version.minor == 2 && api_version.release >= 11);
+        let supports_prune_delete_stats = api_version >= ApiVersion::new(3, 2, 11);
 
         let target = PushTarget {
             remote,
-- 
2.39.5




From c.ebner at proxmox.com  Thu Nov 28 17:07:20 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 17:07:20 +0100
Subject: [pbs-devel] [PATCH v2 proxmox-backup 2/3] api types: version:
 implement traits to allow for version comparison
In-Reply-To: <20241128160721.583578-1-c.ebner@proxmox.com>
References: <20241128160721.583578-1-c.ebner@proxmox.com>
Message-ID: <20241128160721.583578-2-c.ebner@proxmox.com>

Derive and implement the traits to allow comparison of two
`ApiVersion` instances for more direct and easy api version
comparisons. Further, add some basic test cases to reduce risk of
regressions.

This is useful for e.g. feature compatibility checks by comparing api
versions of remote instances.

Example comparison:
```
api_version >= ApiVersion::new(3, 3, 0)
```

Signed-off-by: Christian Ebner 
---
changes since version 1:
- implement traits for operator based version comparison
- add basic regression tests

 pbs-api-types/src/version.rs | 122 +++++++++++++++++++++++++++++++++++
 1 file changed, 122 insertions(+)

diff --git a/pbs-api-types/src/version.rs b/pbs-api-types/src/version.rs
index bd4c517da..09e725eb6 100644
--- a/pbs-api-types/src/version.rs
+++ b/pbs-api-types/src/version.rs
@@ -1,4 +1,5 @@
 //! Defines the types for the api version info endpoint
+use std::cmp::Ordering;
 use std::convert::TryFrom;
 
 use anyhow::{format_err, Context};
@@ -33,6 +34,7 @@ pub type ApiVersionMajor = u64;
 pub type ApiVersionMinor = u64;
 pub type ApiVersionRelease = u64;
 
+#[derive(PartialEq, Eq)]
 pub struct ApiVersion {
     pub major: ApiVersionMajor,
     pub minor: ApiVersionMinor,
@@ -66,3 +68,123 @@ impl TryFrom for ApiVersion {
         })
     }
 }
+
+impl PartialOrd for ApiVersion {
+    fn partial_cmp(&self, other: &Self) -> Option {
+        let ordering = match (
+            self.major.cmp(&other.major),
+            self.minor.cmp(&other.minor),
+            self.release.cmp(&other.release),
+        ) {
+            (Ordering::Equal, Ordering::Equal, ordering) => ordering,
+            (Ordering::Equal, ordering, _) => ordering,
+            (ordering, _, _) => ordering,
+        };
+
+        Some(ordering)
+    }
+}
+
+impl ApiVersion {
+    pub fn new(major: ApiVersionMajor, minor: ApiVersionMinor, release: ApiVersionRelease) -> Self {
+        Self {
+            major,
+            minor,
+            release,
+        }
+    }
+}
+
+#[test]
+fn same_level_version_comarison() {
+    let major_base = ApiVersion::new(2, 0, 0);
+    let major_less = ApiVersion::new(1, 0, 0);
+    let major_greater = ApiVersion::new(3, 0, 0);
+
+    let minor_base = ApiVersion::new(2, 2, 0);
+    let minor_less = ApiVersion::new(2, 1, 0);
+    let minor_greater = ApiVersion::new(2, 3, 0);
+
+    let release_base = ApiVersion::new(2, 2, 2);
+    let release_less = ApiVersion::new(2, 2, 1);
+    let release_greater = ApiVersion::new(2, 2, 3);
+
+    assert!(major_base == major_base);
+    assert!(minor_base == minor_base);
+    assert!(release_base == release_base);
+
+    assert!(major_base > major_less);
+    assert!(major_base >= major_less);
+    assert!(major_base != major_less);
+
+    assert!(major_base < major_greater);
+    assert!(major_base <= major_greater);
+    assert!(major_base != major_greater);
+
+    assert!(minor_base > minor_less);
+    assert!(minor_base >= minor_less);
+    assert!(minor_base != minor_less);
+
+    assert!(minor_base < minor_greater);
+    assert!(minor_base <= minor_greater);
+    assert!(minor_base != minor_greater);
+
+    assert!(release_base > release_less);
+    assert!(release_base >= release_less);
+    assert!(release_base != release_less);
+
+    assert!(release_base < release_greater);
+    assert!(release_base <= release_greater);
+    assert!(release_base != release_greater);
+}
+
+#[test]
+fn mixed_level_version_comarison() {
+    let major_base = ApiVersion::new(2, 0, 0);
+    let major_less = ApiVersion::new(1, 0, 0);
+    let major_greater = ApiVersion::new(3, 0, 0);
+
+    let minor_base = ApiVersion::new(2, 2, 0);
+    let minor_less = ApiVersion::new(2, 1, 0);
+    let minor_greater = ApiVersion::new(2, 3, 0);
+
+    let release_base = ApiVersion::new(2, 2, 2);
+    let release_less = ApiVersion::new(2, 2, 1);
+    let release_greater = ApiVersion::new(2, 2, 3);
+
+    assert!(major_base < minor_base);
+    assert!(major_base < minor_less);
+    assert!(major_base < minor_greater);
+
+    assert!(major_base < release_base);
+    assert!(major_base < release_less);
+    assert!(major_base < release_greater);
+
+    assert!(major_less < minor_base);
+    assert!(major_less < minor_less);
+    assert!(major_less < minor_greater);
+
+    assert!(major_less < release_base);
+    assert!(major_less < release_less);
+    assert!(major_less < release_greater);
+
+    assert!(major_greater > minor_base);
+    assert!(major_greater > minor_less);
+    assert!(major_greater > minor_greater);
+
+    assert!(major_greater > release_base);
+    assert!(major_greater > release_less);
+    assert!(major_greater > release_greater);
+
+    assert!(minor_base < release_base);
+    assert!(minor_base < release_less);
+    assert!(minor_base < release_greater);
+
+    assert!(minor_greater > release_base);
+    assert!(minor_greater > release_less);
+    assert!(minor_greater > release_greater);
+
+    assert!(minor_less < release_base);
+    assert!(minor_less < release_less);
+    assert!(minor_less < release_greater);
+}
-- 
2.39.5




From c.ebner at proxmox.com  Thu Nov 28 17:09:01 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Thu, 28 Nov 2024 17:09:01 +0100
Subject: [pbs-devel] [PATCH proxmox-backup 1/2] api types: version: add
 helper for min version checks
In-Reply-To: <20241128124925.318298-1-c.ebner@proxmox.com>
References: <20241128124925.318298-1-c.ebner@proxmox.com>
Message-ID: <29f14502-5496-4680-8a04-21e0a5657cec@proxmox.com>

superseded-by version 2:
https://lore.proxmox.com/pbs-devel/20241128160721.583578-1-c.ebner at proxmox.com/T/



From f.gruenbichler at proxmox.com  Fri Nov 29 08:57:16 2024
From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=)
Date: Fri, 29 Nov 2024 08:57:16 +0100 (CET)
Subject: [pbs-devel] [PATCH proxmox] sys: fs: set FD_CLOEXEC when
 creating temp files
In-Reply-To: <20241128145440.4119007-1-d.csapak@proxmox.com>
References: <20241128145440.4119007-1-d.csapak@proxmox.com>
Message-ID: <771443181.1837.1732867036375@webmail.proxmox.com>

> Dominik Csapak  hat am 28.11.2024 15:54 CET geschrieben:
> In general we want all open files to have set CLOEXEC since our
> reloading mechanism can basically fork at any moment and we don't want
> newer daemons to carry around old file descriptors, especially lock
> files.
> 
> Since `make_tmp_file` is called by many things (e.g. open_file_locked,
> logrotate, rrd), set FD_CLOEXEC after getting the filehandle.
> 
> This fixes an issue with e.g. tape backups not working because of such
> lingering lock files after a reload.

and also one that "leaked" an additional FD for every proxmox-backup-proxy reload via the RRD journal files - so this fixes a bug where PBS will eventually run into the open file limits if you keep reloading that service without ever stopping or restarting it.

might be a good addition to the commit message :)

> Signed-off-by: Dominik Csapak 
> ---
> there are other code parts where we open file without CLOEXEC, but
> wanted to send this for now.
> 
>  proxmox-sys/src/fs/file.rs | 7 +++++--
>  1 file changed, 5 insertions(+), 2 deletions(-)
> 
> diff --git a/proxmox-sys/src/fs/file.rs b/proxmox-sys/src/fs/file.rs
> index fbfc0b58..05d0aff0 100644
> --- a/proxmox-sys/src/fs/file.rs
> +++ b/proxmox-sys/src/fs/file.rs
> @@ -7,7 +7,7 @@ use std::time::Duration;
>  
>  use anyhow::{bail, format_err, Context as _, Error};
>  use nix::errno::Errno;
> -use nix::fcntl::OFlag;
> +use nix::fcntl::{FcntlArg, FdFlag, OFlag};
>  use nix::sys::stat;
>  use nix::unistd;
>  use nix::NixPath;
> @@ -128,7 +128,10 @@ pub fn make_tmp_file>(
>      let mut template = path.to_owned();
>      template.set_extension("tmp_XXXXXX");
>      let (mut file, tmp_path) = match unistd::mkstemp(&template) {
> -        Ok((fd, path)) => (unsafe { File::from_raw_fd(fd) }, path),
> +        Ok((fd, path)) => {
> +            nix::fcntl::fcntl(fd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))?;
> +            (unsafe { File::from_raw_fd(fd) }, path)
> +        }

unfortunately, this is still racy since the FD is open with O_CLOEXEC between the unistd::mkstemp and the fcntl - see the man page of fcntl which explicitly calls this out:

"In  multithreaded programs, using fcntl() F_SETFD to set the close-on-exec flag at the same time as another thread performs a fork(2) plus execve(2) is vulnerable to a race condition that may unintentionally leak the file descriptor to the program executed in the child process."

we could use libc::mkostemp (unsafe, path/template+flags -> raw fd or error as c_int) instead? and/or we could write a wrapper around it and propose it upstream for nix inclusion? ;) but since this seems to be the only place where we call mkstemp..

>          Err(err) => bail!("mkstemp {:?} failed: {}", template, err),
>      };
>  
> -- 
> 2.39.5
> 
> 
> 
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel



From d.csapak at proxmox.com  Fri Nov 29 09:02:39 2024
From: d.csapak at proxmox.com (Dominik Csapak)
Date: Fri, 29 Nov 2024 09:02:39 +0100
Subject: [pbs-devel] [PATCH proxmox] sys: fs: set FD_CLOEXEC when
 creating temp files
In-Reply-To: <771443181.1837.1732867036375@webmail.proxmox.com>
References: <20241128145440.4119007-1-d.csapak@proxmox.com>
 <771443181.1837.1732867036375@webmail.proxmox.com>
Message-ID: 

On 11/29/24 08:57, Fabian Gr?nbichler wrote:
>> Dominik Csapak  hat am 28.11.2024 15:54 CET geschrieben:
>> In general we want all open files to have set CLOEXEC since our
>> reloading mechanism can basically fork at any moment and we don't want
>> newer daemons to carry around old file descriptors, especially lock
>> files.
>>
>> Since `make_tmp_file` is called by many things (e.g. open_file_locked,
>> logrotate, rrd), set FD_CLOEXEC after getting the filehandle.
>>
>> This fixes an issue with e.g. tape backups not working because of such
>> lingering lock files after a reload.
> 
> and also one that "leaked" an additional FD for every proxmox-backup-proxy reload via the RRD journal files - so this fixes a bug where PBS will eventually run into the open file limits if you keep reloading that service without ever stopping or restarting it.

nice! did not have too much time yesterday to look into other benefits ;)

> 
> might be a good addition to the commit message :)
> 
>> Signed-off-by: Dominik Csapak 
>> ---
>> there are other code parts where we open file without CLOEXEC, but
>> wanted to send this for now.
>>
>>   proxmox-sys/src/fs/file.rs | 7 +++++--
>>   1 file changed, 5 insertions(+), 2 deletions(-)
>>
>> diff --git a/proxmox-sys/src/fs/file.rs b/proxmox-sys/src/fs/file.rs
>> index fbfc0b58..05d0aff0 100644
>> --- a/proxmox-sys/src/fs/file.rs
>> +++ b/proxmox-sys/src/fs/file.rs
>> @@ -7,7 +7,7 @@ use std::time::Duration;
>>   
>>   use anyhow::{bail, format_err, Context as _, Error};
>>   use nix::errno::Errno;
>> -use nix::fcntl::OFlag;
>> +use nix::fcntl::{FcntlArg, FdFlag, OFlag};
>>   use nix::sys::stat;
>>   use nix::unistd;
>>   use nix::NixPath;
>> @@ -128,7 +128,10 @@ pub fn make_tmp_file>(
>>       let mut template = path.to_owned();
>>       template.set_extension("tmp_XXXXXX");
>>       let (mut file, tmp_path) = match unistd::mkstemp(&template) {
>> -        Ok((fd, path)) => (unsafe { File::from_raw_fd(fd) }, path),
>> +        Ok((fd, path)) => {
>> +            nix::fcntl::fcntl(fd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))?;
>> +            (unsafe { File::from_raw_fd(fd) }, path)
>> +        }
> 
> unfortunately, this is still racy since the FD is open with O_CLOEXEC between the unistd::mkstemp and the fcntl - see the man page of fcntl which explicitly calls this out:
> 
> "In  multithreaded programs, using fcntl() F_SETFD to set the close-on-exec flag at the same time as another thread performs a fork(2) plus execve(2) is vulnerable to a race condition that may unintentionally leak the file descriptor to the program executed in the child process."
> 
> we could use libc::mkostemp (unsafe, path/template+flags -> raw fd or error as c_int) instead? and/or we could write a wrapper around it and propose it upstream for nix inclusion? ;) but since this seems to be the only place where we call mkstemp..
> 

yeah had the same though, and have a version here where i basically copied nix's mkstemp but with 
oflags + mkostemp call to libc, so i can send that if wanted

the question for me is if it's ok to use since mkostemp is only a glibc extension (since 2.7) and we
may use that in proxmox-backup-client which we want to statically build ?
(not sure how that static compilation works with such a thing though...)

>>           Err(err) => bail!("mkstemp {:?} failed: {}", template, err),
>>       };
>>   
>> -- 
>> 2.39.5
>>
>>
>>
>> _______________________________________________
>> pbs-devel mailing list
>> pbs-devel at lists.proxmox.com
>> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel




From f.gruenbichler at proxmox.com  Fri Nov 29 09:14:18 2024
From: f.gruenbichler at proxmox.com (=?UTF-8?Q?Fabian_Gr=C3=BCnbichler?=)
Date: Fri, 29 Nov 2024 09:14:18 +0100 (CET)
Subject: [pbs-devel] [PATCH proxmox] sys: fs: set FD_CLOEXEC when
 creating temp files
In-Reply-To: 
References: <20241128145440.4119007-1-d.csapak@proxmox.com>
 <771443181.1837.1732867036375@webmail.proxmox.com>
 
Message-ID: <708352934.1879.1732868058806@webmail.proxmox.com>

> Dominik Csapak  hat am 29.11.2024 09:02 CET geschrieben:
> On 11/29/24 08:57, Fabian Gr?nbichler wrote:
> >> Dominik Csapak  hat am 28.11.2024 15:54 CET geschrieben:
> >> @@ -128,7 +128,10 @@ pub fn make_tmp_file>(
> >>       let mut template = path.to_owned();
> >>       template.set_extension("tmp_XXXXXX");
> >>       let (mut file, tmp_path) = match unistd::mkstemp(&template) {
> >> -        Ok((fd, path)) => (unsafe { File::from_raw_fd(fd) }, path),
> >> +        Ok((fd, path)) => {
> >> +            nix::fcntl::fcntl(fd, FcntlArg::F_SETFD(FdFlag::FD_CLOEXEC))?;
> >> +            (unsafe { File::from_raw_fd(fd) }, path)
> >> +        }
> > 
> > unfortunately, this is still racy since the FD is open with O_CLOEXEC between the unistd::mkstemp and the fcntl - see the man page of fcntl which explicitly calls this out:
> > 
> > "In  multithreaded programs, using fcntl() F_SETFD to set the close-on-exec flag at the same time as another thread performs a fork(2) plus execve(2) is vulnerable to a race condition that may unintentionally leak the file descriptor to the program executed in the child process."
> > 
> > we could use libc::mkostemp (unsafe, path/template+flags -> raw fd or error as c_int) instead? and/or we could write a wrapper around it and propose it upstream for nix inclusion? ;) but since this seems to be the only place where we call mkstemp..
> > 
> 
> yeah had the same though, and have a version here where i basically copied nix's mkstemp but with 
> oflags + mkostemp call to libc, so i can send that if wanted
> 
> the question for me is if it's ok to use since mkostemp is only a glibc extension (since 2.7) and we
> may use that in proxmox-backup-client which we want to statically build ?
> (not sure how that static compilation works with such a thing though...)

that's a fair argument, if we want that it looks like we'd have to reimplement mkostemp ourselves I guess.. or we abandon this approach and evaluate allow-listing FDs we want to keep, and closing all others on fork..  but then we have to ensure no forking ever happens outside of our immediate control, which seems kinda annoying as well..



From t.lamprecht at proxmox.com  Fri Nov 29 11:21:39 2024
From: t.lamprecht at proxmox.com (Thomas Lamprecht)
Date: Fri, 29 Nov 2024 11:21:39 +0100
Subject: [pbs-devel] [PATCH proxmox] sys: fs: set FD_CLOEXEC when
 creating temp files
In-Reply-To: 
References: <20241128145440.4119007-1-d.csapak@proxmox.com>
 <771443181.1837.1732867036375@webmail.proxmox.com>
 
Message-ID: 

Am 29.11.24 um 09:02 schrieb Dominik Csapak:
> the question for me is if it's ok to use since mkostemp is only a glibc extension (since 2.7) and we
> may use that in proxmox-backup-client which we want to statically build ?
> (not sure how that static compilation works with such a thing though...)

FWIW, it's supported by MUSL:

http://git.musl-libc.org/cgit/musl/tree/src/temp/mkostemp.c



From c.ebner at proxmox.com  Fri Nov 29 11:41:33 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Fri, 29 Nov 2024 11:41:33 +0100
Subject: [pbs-devel] [PATCH proxmox-widget-toolkit 1/2] panel: disk list:
 return consistent value for unknown smart status
Message-ID: <20241129104134.127763-1-c.ebner@proxmox.com>

Until now, the reported smart value is returned unconditionally, even
if the drive might report an `UNKNOWN` status.
To allow for better handling of the unknown smart state, also return
the utils helper text in that case. This allows for better handling
of e.g. conditionally showing the smart values window.

Signed-off-by: Christian Ebner 
---
 src/panel/DiskList.js | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/src/panel/DiskList.js b/src/panel/DiskList.js
index dc10ac5..dfd8c8e 100644
--- a/src/panel/DiskList.js
+++ b/src/panel/DiskList.js
@@ -7,7 +7,12 @@ Ext.define('pmx-disk-list', {
 	{
 	    name: 'status',
 	    convert: function(value, rec) {
-		if (value) return value;
+		if (value) {
+		    if (value.toLowerCase() === 'unknown') {
+			return Proxmox.Utils.unknownText;
+		    }
+		    return value;
+		}
 		if (rec.data.health) {
 		    return rec.data.health;
 		}
-- 
2.39.5




From c.ebner at proxmox.com  Fri Nov 29 11:41:34 2024
From: c.ebner at proxmox.com (Christian Ebner)
Date: Fri, 29 Nov 2024 11:41:34 +0100
Subject: [pbs-devel] [PATCH proxmox-widget-toolkit 2/2] disk list: disable
 show smart values button if status unknown
In-Reply-To: <20241129104134.127763-1-c.ebner@proxmox.com>
References: <20241129104134.127763-1-c.ebner@proxmox.com>
Message-ID: <20241129104134.127763-2-c.ebner@proxmox.com>

Do not allow to open the smart values window by either double clicking
the record or clicking the show button, if the selected drives status
is unknown.

Fetching the smart values for such devices might fail. Devices which
do not support this can be, e.g. USB pen drives used as removable
datastores in Proxmox Backup Server.

Reported in the community forum:
https://forum.proxmox.com/threads/158217/

Signed-off-by: Christian Ebner 
---
 src/panel/DiskList.js | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/src/panel/DiskList.js b/src/panel/DiskList.js
index dfd8c8e..3a1632c 100644
--- a/src/panel/DiskList.js
+++ b/src/panel/DiskList.js
@@ -86,6 +86,9 @@ Ext.define('Proxmox.DiskList', {
 	    if (!selection || selection.length < 1) return;
 
 	    let rec = selection[0];
+	    if (!rec.data.status || rec.data.status === Proxmox.Utils.unknownText) {
+		return;
+	    }
 	    Ext.create('Proxmox.window.DiskSmart', {
 		baseurl: view.baseurl,
 		dev: rec.data.name,
@@ -369,7 +372,8 @@ Ext.define('Proxmox.DiskList', {
 		parentXType: 'treepanel',
 		disabled: true,
 		enableFn: function(rec) {
-		    if (!rec || rec.data.parent) {
+		    if (!rec || rec.data.parent || !rec.data.status ||
+			rec.data.status === Proxmox.Utils.unknownText) {
 			return false;
 		    } else {
 			return true;
-- 
2.39.5




From s.sterz at proxmox.com  Fri Nov 29 11:53:21 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Fri, 29 Nov 2024 11:53:21 +0100
Subject: [pbs-devel] [PATCH proxmox 4/4] notify: use proxmox-sendmail
 forward implementation
In-Reply-To: <20241129105321.143877-1-s.sterz@proxmox.com>
References: <20241129105321.143877-1-s.sterz@proxmox.com>
Message-ID: <20241129105321.143877-4-s.sterz@proxmox.com>

moves to depending on `proxmox-sendmail` for forwarding mails via
`sendmail` too.

Signed-off-by: Shannon Sterz 
---
 proxmox-notify/Cargo.toml                |  2 +-
 proxmox-notify/src/endpoints/sendmail.rs | 54 +-----------------------
 2 files changed, 3 insertions(+), 53 deletions(-)

diff --git a/proxmox-notify/Cargo.toml b/proxmox-notify/Cargo.toml
index 6e94930a..725bd210 100644
--- a/proxmox-notify/Cargo.toml
+++ b/proxmox-notify/Cargo.toml
@@ -39,7 +39,7 @@ proxmox-uuid = { workspace = true, features = ["serde"] }
 
 [features]
 default = ["sendmail", "gotify", "smtp", "webhook"]
-mail-forwarder = ["dep:mail-parser", "dep:proxmox-sys"]
+mail-forwarder = ["dep:mail-parser", "dep:proxmox-sys", "proxmox-sendmail/mail-forwarder"]
 sendmail = ["dep:proxmox-sys", "dep:base64", "dep:proxmox-sendmail"]
 gotify = ["dep:proxmox-http"]
 pve-context = ["dep:proxmox-sys"]
diff --git a/proxmox-notify/src/endpoints/sendmail.rs b/proxmox-notify/src/endpoints/sendmail.rs
index 1268d372..a2a1ed3a 100644
--- a/proxmox-notify/src/endpoints/sendmail.rs
+++ b/proxmox-notify/src/endpoints/sendmail.rs
@@ -1,6 +1,4 @@
-use std::io::Write;
-use std::process::{Command, Stdio};
-
+use proxmox_sendmail::Mail;
 use serde::{Deserialize, Serialize};
 
 use proxmox_schema::api_types::COMMENT_SCHEMA;
@@ -147,7 +145,7 @@ impl Endpoint for SendmailEndpoint {
             }
             #[cfg(feature = "mail-forwarder")]
             Content::ForwardedMail { raw, uid, .. } => {
-                forward(&recipients_str, &mailfrom, raw, *uid)
+                Mail::forward(&recipients_str, &mailfrom, raw, *uid)
                     .map_err(|err| Error::NotifyFailed(self.config.name.clone(), err.into()))
             }
         }
@@ -162,51 +160,3 @@ impl Endpoint for SendmailEndpoint {
         self.config.disable.unwrap_or_default()
     }
 }
-
-/// Forwards an email message to a given list of recipients.
-///
-/// ``sendmail`` is used for sending the mail, thus `message` must be
-/// compatible with that (the message is piped into stdin unmodified).
-#[cfg(feature = "mail-forwarder")]
-fn forward(mailto: &[&str], mailfrom: &str, message: &[u8], uid: Option) -> Result<(), Error> {
-    use std::os::unix::process::CommandExt;
-
-    if mailto.is_empty() {
-        return Err(Error::Generic(
-            "At least one recipient has to be specified!".into(),
-        ));
-    }
-
-    let mut builder = Command::new("/usr/sbin/sendmail");
-
-    builder
-        .args([
-            "-N", "never", // never send DSN (avoid mail loops)
-            "-f", mailfrom, "--",
-        ])
-        .args(mailto)
-        .stdin(Stdio::piped())
-        .stdout(Stdio::null())
-        .stderr(Stdio::null());
-
-    if let Some(uid) = uid {
-        builder.uid(uid);
-    }
-
-    let mut process = builder
-        .spawn()
-        .map_err(|err| Error::Generic(format!("could not spawn sendmail process: {err}")))?;
-
-    process
-        .stdin
-        .take()
-        .unwrap()
-        .write_all(message)
-        .map_err(|err| Error::Generic(format!("couldn't write to sendmail stdin: {err}")))?;
-
-    process
-        .wait()
-        .map_err(|err| Error::Generic(format!("sendmail did not exit successfully: {err}")))?;
-
-    Ok(())
-}
-- 
2.39.5




From s.sterz at proxmox.com  Fri Nov 29 11:53:20 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Fri, 29 Nov 2024 11:53:20 +0100
Subject: [pbs-devel] [PATCH proxmox 3/4] sendmail: add mail-forwarder feature
In-Reply-To: <20241129105321.143877-1-s.sterz@proxmox.com>
References: <20241129105321.143877-1-s.sterz@proxmox.com>
Message-ID: <20241129105321.143877-3-s.sterz@proxmox.com>

this moves the mail forwarding implementation from `proxmox-notify` into
`proxmox-sendmail` to cover more `sendmail` related use-cases in one
place.

Signed-off-by: Shannon Sterz 
---
 proxmox-sendmail/Cargo.toml |  4 +++
 proxmox-sendmail/src/lib.rs | 57 +++++++++++++++++++++++++++++++++++++
 2 files changed, 61 insertions(+)

diff --git a/proxmox-sendmail/Cargo.toml b/proxmox-sendmail/Cargo.toml
index 790b324b..e04e2595 100644
--- a/proxmox-sendmail/Cargo.toml
+++ b/proxmox-sendmail/Cargo.toml
@@ -14,3 +14,7 @@ anyhow = { workspace = true }
 base64 = { workspace = true }
 percent-encoding = { workspace = true }
 proxmox-time = { workspace = true }
+
+[features]
+default = []
+mail-forwarder = []
diff --git a/proxmox-sendmail/src/lib.rs b/proxmox-sendmail/src/lib.rs
index c97c3186..d41b266c 100644
--- a/proxmox-sendmail/src/lib.rs
+++ b/proxmox-sendmail/src/lib.rs
@@ -210,6 +210,56 @@ impl<'a> Mail<'a> {
         Ok(())
     }
 
+    /// Forwards an email message to a given list of recipients.
+    ///
+    /// `message` must be compatible with ``sendmail`` (the message is piped into stdin unmodified).
+    #[cfg(feature = "mail-forwarder")]
+    pub fn forward(
+        mailto: &[&str],
+        mailfrom: &str,
+        message: &[u8],
+        uid: Option,
+    ) -> Result<(), Error> {
+        use std::os::unix::process::CommandExt;
+
+        if mailto.is_empty() {
+            bail!("At least one recipient has to be specified!");
+        }
+
+        let mut builder = Command::new("/usr/sbin/sendmail");
+
+        builder
+            .args([
+                "-N", "never", // never send DSN (avoid mail loops)
+                "-f", mailfrom, "--",
+            ])
+            .args(mailto)
+            .stdin(Stdio::piped())
+            .stdout(Stdio::null())
+            .stderr(Stdio::null());
+
+        if let Some(uid) = uid {
+            builder.uid(uid);
+        }
+
+        let mut sendmail_process = builder
+            .spawn()
+            .with_context(|| "could not spawn sendmail process")?;
+
+        sendmail_process
+            .stdin
+            .take()
+            .unwrap()
+            .write_all(message)
+            .with_context(|| "couldn't write to sendmail stdin")?;
+
+        sendmail_process
+            .wait()
+            .with_context(|| "sendmail did not exit successfully")?;
+
+        Ok(())
+    }
+
     fn format_mail(&self, now: i64) -> Result {
         let mut body = String::new();
         let file_boundary = format!("----_=_NextPart_001_{}", now);
@@ -360,6 +410,13 @@ mod test {
         assert!(result.is_err());
     }
 
+    #[test]
+    #[cfg(feature = "mail-forwarder")]
+    fn forwarding_without_recipients_fails() {
+        let result = Mail::forward(&[], "me at example.com", String::from("text").as_bytes(), None);
+        assert!(result.is_err());
+    }
+
     #[test]
     fn simple_ascii_text_mail() {
         let mail = Mail::new(
-- 
2.39.5




From s.sterz at proxmox.com  Fri Nov 29 11:53:19 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Fri, 29 Nov 2024 11:53:19 +0100
Subject: [pbs-devel] [PATCH proxmox 2/4] notify: switchi sendmail endpoint
 over to new crate
In-Reply-To: <20241129105321.143877-1-s.sterz@proxmox.com>
References: <20241129105321.143877-1-s.sterz@proxmox.com>
Message-ID: <20241129105321.143877-2-s.sterz@proxmox.com>

use the new `proxmox-sendmail` crate instead of the bespoke
implementation in `proxmox-notify`.

Signed-off-by: Shannon Sterz 
---
 proxmox-notify/Cargo.toml                |   3 +-
 proxmox-notify/src/endpoints/sendmail.rs | 171 +----------------------
 2 files changed, 9 insertions(+), 165 deletions(-)

diff --git a/proxmox-notify/Cargo.toml b/proxmox-notify/Cargo.toml
index b5b3719e..6e94930a 100644
--- a/proxmox-notify/Cargo.toml
+++ b/proxmox-notify/Cargo.toml
@@ -32,6 +32,7 @@ proxmox-human-byte.workspace = true
 proxmox-schema = { workspace = true, features = ["api-macro", "api-types"] }
 proxmox-section-config = { workspace = true }
 proxmox-serde.workspace = true
+proxmox-sendmail = { workspace = true, optional = true }
 proxmox-sys = { workspace = true, optional = true }
 proxmox-time.workspace = true
 proxmox-uuid = { workspace = true, features = ["serde"] }
@@ -39,7 +40,7 @@ proxmox-uuid = { workspace = true, features = ["serde"] }
 [features]
 default = ["sendmail", "gotify", "smtp", "webhook"]
 mail-forwarder = ["dep:mail-parser", "dep:proxmox-sys"]
-sendmail = ["dep:proxmox-sys", "dep:base64"]
+sendmail = ["dep:proxmox-sys", "dep:base64", "dep:proxmox-sendmail"]
 gotify = ["dep:proxmox-http"]
 pve-context = ["dep:proxmox-sys"]
 pbs-context = ["dep:proxmox-sys"]
diff --git a/proxmox-notify/src/endpoints/sendmail.rs b/proxmox-notify/src/endpoints/sendmail.rs
index d31b9672..1268d372 100644
--- a/proxmox-notify/src/endpoints/sendmail.rs
+++ b/proxmox-notify/src/endpoints/sendmail.rs
@@ -137,15 +137,13 @@ impl Endpoint for SendmailEndpoint {
                     .clone()
                     .unwrap_or_else(|| context().default_sendmail_author());
 
-                sendmail(
-                    &recipients_str,
-                    &subject,
-                    &text_part,
-                    &html_part,
-                    &mailfrom,
-                    &author,
-                )
-                .map_err(|err| Error::NotifyFailed(self.config.name.clone(), err.into()))
+                let mut mail =
+                    Mail::new(&author, &mailfrom, &subject, &text_part).with_html_alt(&html_part);
+
+                recipients_str.iter().for_each(|r| mail.add_recipient(r));
+
+                mail.send()
+                    .map_err(|err| Error::NotifyFailed(self.config.name.clone(), err.into()))
             }
             #[cfg(feature = "mail-forwarder")]
             Content::ForwardedMail { raw, uid, .. } => {
@@ -165,107 +163,6 @@ impl Endpoint for SendmailEndpoint {
     }
 }
 
-/// Sends multi-part mail with text and/or html to a list of recipients
-///
-/// Includes the header `Auto-Submitted: auto-generated`, so that auto-replies
-/// (i.e. OOO replies) won't trigger.
-/// ``sendmail`` is used for sending the mail.
-fn sendmail(
-    mailto: &[&str],
-    subject: &str,
-    text: &str,
-    html: &str,
-    mailfrom: &str,
-    author: &str,
-) -> Result<(), Error> {
-    if mailto.is_empty() {
-        return Err(Error::Generic(
-            "At least one recipient has to be specified!".into(),
-        ));
-    }
-    let now = proxmox_time::epoch_i64();
-    let body = format_mail(mailto, mailfrom, author, subject, text, html, now)?;
-
-    let mut sendmail_process = Command::new("/usr/sbin/sendmail")
-        .arg("-B")
-        .arg("8BITMIME")
-        .arg("-f")
-        .arg(mailfrom)
-        .arg("--")
-        .args(mailto)
-        .stdin(Stdio::piped())
-        .spawn()
-        .map_err(|err| Error::Generic(format!("could not spawn sendmail process: {err}")))?;
-
-    sendmail_process
-        .stdin
-        .take()
-        .expect("stdin already taken")
-        .write_all(body.as_bytes())
-        .map_err(|err| Error::Generic(format!("couldn't write to sendmail stdin: {err}")))?;
-
-    sendmail_process
-        .wait()
-        .map_err(|err| Error::Generic(format!("sendmail did not exit successfully: {err}")))?;
-
-    Ok(())
-}
-
-fn format_mail(
-    mailto: &[&str],
-    mailfrom: &str,
-    author: &str,
-    subject: &str,
-    text: &str,
-    html: &str,
-    timestamp: i64,
-) -> Result {
-    use std::fmt::Write as _;
-
-    let recipients = mailto.join(",");
-    let boundary = format!("----_=_NextPart_001_{timestamp}");
-
-    let mut body = String::new();
-
-    // Format email header
-    body.push_str("Content-Type: multipart/alternative;\n");
-    let _ = writeln!(body, "\tboundary=\"{boundary}\"");
-    body.push_str("MIME-Version: 1.0\n");
-
-    if !subject.is_ascii() {
-        let _ = writeln!(body, "Subject: =?utf-8?B?{}?=", base64::encode(subject));
-    } else {
-        let _ = writeln!(body, "Subject: {subject}");
-    }
-    let _ = writeln!(body, "From: {author} <{mailfrom}>");
-    let _ = writeln!(body, "To: {recipients}");
-    let rfc2822_date = proxmox_time::epoch_to_rfc2822(timestamp)
-        .map_err(|err| Error::Generic(format!("failed to format time: {err}")))?;
-    let _ = writeln!(body, "Date: {rfc2822_date}");
-    body.push_str("Auto-Submitted: auto-generated;\n");
-    body.push('\n');
-
-    // Format email body
-    body.push_str("This is a multi-part message in MIME format.\n");
-    let _ = write!(body, "\n--{boundary}\n");
-
-    body.push_str("Content-Type: text/plain;\n");
-    body.push_str("\tcharset=\"UTF-8\"\n");
-    body.push_str("Content-Transfer-Encoding: 8bit\n");
-    body.push('\n');
-    body.push_str(text);
-    let _ = write!(body, "\n--{boundary}\n");
-
-    body.push_str("Content-Type: text/html;\n");
-    body.push_str("\tcharset=\"UTF-8\"\n");
-    body.push_str("Content-Transfer-Encoding: 8bit\n");
-    body.push('\n');
-    body.push_str(html);
-    let _ = write!(body, "\n--{boundary}--");
-
-    Ok(body)
-}
-
 /// Forwards an email message to a given list of recipients.
 ///
 /// ``sendmail`` is used for sending the mail, thus `message` must be
@@ -313,57 +210,3 @@ fn forward(mailto: &[&str], mailfrom: &str, message: &[u8], uid: Option) ->
 
     Ok(())
 }
-
-#[cfg(test)]
-mod test {
-    use super::*;
-
-    #[test]
-    fn email_without_recipients() {
-        let result = sendmail(&[], "Subject2", "", "HTML", "root", "Proxmox");
-        assert!(result.is_err());
-    }
-
-    #[test]
-    fn test_format_mail_multipart() {
-        let message = format_mail(
-            &["Tony Est "],
-            "foobar at example.com",
-            "Fred Oobar",
-            "This is the subject",
-            "This is the plain body",
-            "This is the HTML body",
-            1718977850,
-        )
-        .expect("format_message failed");
-
-        assert_eq!(
-            message,
-            r#"Content-Type: multipart/alternative;
-	boundary="----_=_NextPart_001_1718977850"
-MIME-Version: 1.0
-Subject: This is the subject
-From: Fred Oobar 
-To: Tony Est 
-Date: Fri, 21 Jun 2024 15:50:50 +0200
-Auto-Submitted: auto-generated;
-
-This is a multi-part message in MIME format.
-
-------_=_NextPart_001_1718977850
-Content-Type: text/plain;
-	charset="UTF-8"
-Content-Transfer-Encoding: 8bit
-
-This is the plain body
-------_=_NextPart_001_1718977850
-Content-Type: text/html;
-	charset="UTF-8"
-Content-Transfer-Encoding: 8bit
-
-This is the HTML body
-------_=_NextPart_001_1718977850--"#
-                .to_owned()
-        );
-    }
-}
-- 
2.39.5




From s.sterz at proxmox.com  Fri Nov 29 11:53:18 2024
From: s.sterz at proxmox.com (Shannon Sterz)
Date: Fri, 29 Nov 2024 11:53:18 +0100
Subject: [pbs-devel] [PATCH proxmox 1/4] sendmail: add sendmail crate
Message-ID: <20241129105321.143877-1-s.sterz@proxmox.com>

add the `proxmox-sendmail` crate that makes it easier to send mails via
the `sendmail` utility. features include:

- multipart/alternative support for html+plain text mails
- mulitpart/mixed support for mails with attachments
- automatic nesting of multipart/alternative and multipart/mixed parts
- masking recipients to avoid disclosing them to everyone
- encoding Subject, To, From, and attachment file names correctly
- adding an `Auto-Submitted` header to avoid triggering automated mails

also includes several tests to ensure that mails are formatted
correctly.

Signed-off-by: Shannon Sterz 
---
 Cargo.toml                  |   2 +
 proxmox-sendmail/Cargo.toml |  16 +
 proxmox-sendmail/src/lib.rs | 664 ++++++++++++++++++++++++++++++++++++
 3 files changed, 682 insertions(+)
 create mode 100644 proxmox-sendmail/Cargo.toml
 create mode 100644 proxmox-sendmail/src/lib.rs

diff --git a/Cargo.toml b/Cargo.toml
index 84fbe979..b62fcd50 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -33,6 +33,7 @@ members = [
     "proxmox-rrd-api-types",
     "proxmox-schema",
     "proxmox-section-config",
+    "proxmox-sendmail",
     "proxmox-serde",
     "proxmox-shared-cache",
     "proxmox-shared-memory",
@@ -138,6 +139,7 @@ proxmox-rest-server = { version = "0.8.0", path = "proxmox-rest-server" }
 proxmox-router = { version = "3.0.0", path = "proxmox-router" }
 proxmox-schema = { version = "3.1.2", path = "proxmox-schema" }
 proxmox-section-config = { version = "2.1.0", path = "proxmox-section-config" }
+proxmox-sendmail = { version = "0.1.0", path = "proxmox-sendmail" }
 proxmox-serde = { version = "0.1.1", path = "proxmox-serde", features = [ "serde_json" ] }
 proxmox-shared-memory = { version = "0.3.0", path = "proxmox-shared-memory" }
 proxmox-sortable-macro = { version = "0.1.3", path = "proxmox-sortable-macro" }
diff --git a/proxmox-sendmail/Cargo.toml b/proxmox-sendmail/Cargo.toml
new file mode 100644
index 00000000..790b324b
--- /dev/null
+++ b/proxmox-sendmail/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "proxmox-sendmail"
+version = "0.1.0"
+authors.workspace = true
+edition.workspace = true
+license.workspace = true
+repository.workspace = true
+homepage.workspace = true
+exclude.workspace = true
+rust-version.workspace = true
+
+[dependencies]
+anyhow = { workspace = true }
+base64 = { workspace = true }
+percent-encoding = { workspace = true }
+proxmox-time = { workspace = true }
diff --git a/proxmox-sendmail/src/lib.rs b/proxmox-sendmail/src/lib.rs
new file mode 100644
index 00000000..c97c3186
--- /dev/null
+++ b/proxmox-sendmail/src/lib.rs
@@ -0,0 +1,664 @@
+//!
+//! This library implements the [`Mail`] trait which makes it easy to send emails with attachments
+//! and alternative html parts to one or multiple receivers via ``sendmail``.
+//!
+
+use std::io::Write;
+use std::process::{Command, Stdio};
+
+use anyhow::{bail, Context, Error};
+use percent_encoding::{utf8_percent_encode, AsciiSet, CONTROLS};
+
+// Characters in this set will be encoded, so reproduce the inverse of the set described by RFC5987
+// Section 3.2.1 `attr-char`, as that describes all characters that **don't** need encoding:
+//
+// https://datatracker.ietf.org/doc/html/rfc5987#section-3.2.1
+//
+// `CONTROLS` contains all control characters 0x00 - 0x1f and 0x7f as well as all non-ascii
+// characters, so we need to add all characters here that aren't described in `attr-char` that are
+// in the range 0x20-0x7e
+const RFC5987SET: &AsciiSet = &CONTROLS
+    .add(b' ')
+    .add(b'"')
+    .add(b'%')
+    .add(b'&')
+    .add(b'\'')
+    .add(b'(')
+    .add(b')')
+    .add(b'*')
+    .add(b',')
+    .add(b'/')
+    .add(b':')
+    .add(b';')
+    .add(b'<')
+    .add(b'=')
+    .add(b'>')
+    .add(b'?')
+    .add(b'@')
+    .add(b'[')
+    .add(b'\\')
+    .add(b']')
+    .add(b'{')
+    .add(b'}');
+
+struct Recipient {
+    name: Option,
+    email: String,
+}
+
+struct Attachment<'a> {
+    filename: String,
+    mime: String,
+    content: &'a [u8],
+}
+
+pub struct Mail<'a> {
+    mail_author: String,
+    mail_from: String,
+    subject: String,
+    to: Vec,
+    body_txt: String,
+    body_html: Option,
+    attachments: Vec>,
+}
+
+impl<'a> Mail<'a> {
+    /// Creates a new mail with a mail author, from address, subject line and a plain text body.
+    ///
+    /// Note: If the author's name or the subject line contains UTF-8 characters they will be
+    /// appropriatelly encoded.
+    pub fn new(mail_author: &str, mail_from: &str, subject: &str, body_txt: &str) -> Self {
+        Self {
+            mail_author: mail_author.to_string(),
+            mail_from: mail_from.to_string(),
+            subject: subject.to_string(),
+            to: Vec::new(),
+            body_txt: body_txt.to_string(),
+            body_html: None,
+            attachments: Vec::new(),
+        }
+    }
+
+    /// Adds a recipient to the mail without specifying a name separately.
+    ///
+    /// Note: No formatting or encoding will be done here, the value will be passed to the `To:`
+    /// header directly.
+    pub fn add_recipient(&mut self, email: &str) {
+        self.to.push(Recipient {
+            name: None,
+            email: email.to_string(),
+        });
+    }
+
+    /// Builder-pattern method to conveniently add a recipient to an email without specifying a
+    /// name separately.
+    ///
+    /// Note: No formatting or encoding will be done here, the value will be passed to the `To:`
+    /// header directly.
+    pub fn with_recipient(mut self, email: &str) -> Self {
+        self.add_recipient(email);
+        self
+    }
+
+    /// Adds a recipient to the mail with a name.
+    ///
+    /// Notes:
+    ///
+    /// - If the name contains UTF-8 characters it will be encoded. Then the possibly encoded name
+    ///   and non-encoded email address will be passed to the `To:` header in this format:
+    ///   `{encoded_name} <{email}>`
+    /// - If multiple receivers are specified, they will be masked so as not to disclose them to
+    ///   other receivers.
+    pub fn add_recipient_and_name(&mut self, name: &str, email: &str) {
+        self.to.push(Recipient {
+            name: Some(name.to_string()),
+            email: email.to_string(),
+        });
+    }
+
+    /// Builder-style method to conveniently add a reciepient with a name to an email.
+    ///
+    /// Notes:
+    ///
+    /// - If the name contains UTF-8 characters it will be encoded. Then the possibly encoded name
+    ///   and non-encoded email address will be passed to the `To:` header in this format:
+    ///   `{encoded_name} <{email}>`
+    /// - If multiple receivers are specified, they will be masked so as not to disclose them to
+    ///   other receivers.
+    pub fn with_recipient_and_name(mut self, name: &str, email: &str) -> Self {
+        self.add_recipient_and_name(name, email);
+        self
+    }
+
+    /// Adds an attachment with a specified file name and mime-type to an email.
+    ///
+    /// Note: Adding attachements triggers `multipart/mixed` mode.
+    pub fn add_attachment(&mut self, filename: &str, mime_type: &str, content: &'a [u8]) {
+        self.attachments.push(Attachment {
+            filename: filename.to_string(),
+            mime: mime_type.to_string(),
+            content,
+        });
+    }
+
+    /// Builder-style method to conveniently add an attachment with a specific filename and
+    /// mime-type to an email.
+    ///
+    /// Note: Adding attachements triggers `multipart/mixed` mode.
+    pub fn with_attachment(mut self, filename: &str, mime_type: &str, content: &'a [u8]) -> Self {
+        self.add_attachment(filename, mime_type, content);
+        self
+    }
+
+    /// Set an alternative HTML part.
+    ///
+    /// Note: This triggers `multipart/alternative` mode. If both an HTML part and at least one
+    /// attachement are specified, the `multipart/alternative` part will be nested within the first
+    /// `multipart/mixed` part. This should ensure that the HTML is displayed properly by client's
+    /// that prioritize it over the plain text part (should be the default for most clients) while
+    /// also properly displaying the attachments.
+    pub fn set_html_alt(&mut self, body_html: &str) {
+        self.body_html.replace(body_html.to_string());
+    }
+
+    /// Builder-style method to add an alternative HTML part.
+    ///
+    /// Note: This triggers `multipart/alternative` mode. If both an HTML part and at least one
+    /// attachement are specified, the `multipart/alternative` part will be nested within the first
+    /// `multipart/mixed` part. This should ensure that the HTML is displayed properly by client's
+    /// that prioritize it over the plain text part (should be the default for most clients) while
+    /// also properly displaying the attachments.
+    pub fn with_html_alt(mut self, body_html: &str) -> Self {
+        self.set_html_alt(body_html);
+        self
+    }
+
+    /// Sends the email. This will fail if no reciepient's have been added.
+    ///
+    /// Note: An `Auto-Submitted: auto-generated` header is added to avoid triggering OOO and
+    /// similar mails.
+    pub fn send(&self) -> Result<(), Error> {
+        if self.to.is_empty() {
+            bail!("no recipients provided for the mail, cannot send it.");
+        }
+
+        let now = proxmox_time::epoch_i64();
+        let body = self.format_mail(now)?;
+
+        let mut sendmail_process = Command::new("/usr/sbin/sendmail")
+            .arg("-B")
+            .arg("8BITMIME")
+            .arg("-f")
+            .arg(&self.mail_from)
+            .arg("--")
+            .args(self.to.iter().map(|p| &p.email).collect::>())
+            .stdin(Stdio::piped())
+            .spawn()
+            .with_context(|| "could not spawn sendmail process")?;
+
+        sendmail_process
+            .stdin
+            .as_ref()
+            .unwrap()
+            .write_all(body.as_bytes())
+            .with_context(|| "couldn't write to sendmail stdin")?;
+
+        sendmail_process
+            .wait()
+            .with_context(|| "sendmail did not exit successfully")?;
+
+        Ok(())
+    }
+
+    fn format_mail(&self, now: i64) -> Result {
+        let mut body = String::new();
+        let file_boundary = format!("----_=_NextPart_001_{}", now);
+        let html_boundary = format!("----_=_NextPart_002_{}", now);
+
+        let to = if self.to.len() > 1 {
+            // don't disclose all recipients if the mail goes out to multiple
+            &Recipient {
+                name: Some("Undisclosed".to_string()),
+                email: "noreply".to_string(),
+            }
+        } else {
+            self.to
+                .first()
+                .expect("the checks before make sure there is at least one recipient")
+        };
+
+        if !self.attachments.is_empty() {
+            body.push_str("Content-Type: multipart/mixed;\n");
+            body.push_str(&format!("\tboundary=\"{file_boundary}\"\n"));
+            body.push_str("MIME-Version: 1.0\n");
+        } else if self.body_html.is_some() {
+            body.push_str("Content-Type: multipart/alternative;\n");
+            body.push_str(&format!("\tboundary=\"{html_boundary}\"\n"));
+            body.push_str("MIME-Version: 1.0\n");
+        } else if !self.subject.is_ascii()
+            || !self.mail_author.is_ascii()
+            || !to.name.as_ref().map(|t| t.is_ascii()).unwrap_or(true)
+        {
+            body.push_str("MIME-Version: 1.0\n");
+        }
+
+        let subject = if !self.subject.is_ascii() {
+            format!("Subject: =?utf-8?B?{}?=\n", base64::encode(&self.subject))
+        } else {
+            format!("Subject: {}\n", self.subject)
+        };
+
+        body.push_str(&subject);
+
+        let from = if !self.mail_author.is_ascii() {
+            format!(
+                "From: =?utf-8?B?{}?= <{}>\n",
+                base64::encode(&self.mail_author),
+                self.mail_from
+            )
+        } else {
+            format!("From: {} <{}>\n", self.mail_author, self.mail_from)
+        };
+
+        body.push_str(&from);
+
+        let to = if let Some(name) = &to.name {
+            if !name.is_ascii() {
+                format!("To: =?utf-8?B?{}?= <{}>\n", base64::encode(&name), to.email)
+            } else {
+                format!("To: {} <{}>\n", name, to.email)
+            }
+        } else {
+            format!("To: {}\n", to.email)
+        };
+
+        body.push_str(&to);
+
+        let rfc2822_date = proxmox_time::epoch_to_rfc2822(now)
+            .with_context(|| "could not convert epoch to rfc2822 date")?;
+        body.push_str(&format!("Date: {rfc2822_date}\n"));
+        body.push_str("Auto-Submitted: auto-generated;\n");
+
+        if self.body_html.is_some() && !self.attachments.is_empty() {
+            body.push_str("\nThis is a multi-part message in MIME format.\n");
+            body.push_str(&format!("\n--{file_boundary}\n"));
+            body.push_str(&format!(
+                "Content-Type: multipart/alternative; boundary=\"{html_boundary}\"\n"
+            ));
+            body.push_str("MIME-Version: 1.0\n");
+            body.push_str(&format!("\n--{html_boundary}\n"));
+        } else if self.body_html.is_some() {
+            body.push_str("\nThis is a multi-part message in MIME format.\n");
+            body.push_str(&format!("\n--{html_boundary}\n"));
+        } else if self.body_html.is_none() && !self.attachments.is_empty() {
+            body.push_str("\nThis is a multi-part message in MIME format.\n");
+            body.push_str(&format!("\n--{file_boundary}\n"));
+        }
+
+        body.push_str("Content-Type: text/plain;\n");
+        body.push_str("\tcharset=\"UTF-8\"\n");
+        body.push_str("Content-Transfer-Encoding: 8bit\n\n");
+        body.push_str(&self.body_txt);
+
+        if let Some(html) = &self.body_html {
+            body.push_str(&format!("\n--{html_boundary}\n"));
+            body.push_str("Content-Type: text/html;\n");
+            body.push_str("\tcharset=\"UTF-8\"\n");
+            body.push_str("Content-Transfer-Encoding: 8bit\n\n");
+            body.push_str(html);
+            body.push_str(&format!("\n--{html_boundary}--"));
+        }
+
+        for attachment in &self.attachments {
+            let filename = &attachment.filename;
+
+            body.push_str(&format!("\n--{file_boundary}\n"));
+            body.push_str(&format!(
+                "Content-Type: {}; name=\"{filename}\"\n",
+                attachment.mime
+            ));
+
+            // both `filename` and `filename*` are included for additional compatability
+            body.push_str(&format!(
+                "Content-Disposition: attachment; filename=\"{filename}\"; filename*=UTF-8''{}\n",
+                utf8_percent_encode(filename, RFC5987SET)
+            ));
+            body.push_str("Content-Transfer-Encoding: base64\n\n");
+
+            // wrap the base64 string every 72 characters. this improves compatability
+            let base64 = base64::encode(attachment.content)
+                .chars()
+                .enumerate()
+                .flat_map(|(i, c)| {
+                    if i != 0 && i % 72 == 0 {
+                        Some('\n')
+                    } else {
+                        None
+                    }
+                    .into_iter()
+                    .chain(std::iter::once(c))
+                })
+                .collect::();
+            body.push_str(&base64);
+        }
+
+        if !self.attachments.is_empty() {
+            body.push_str(&format!("\n--{file_boundary}--"));
+        }
+
+        Ok(body)
+    }
+}
+
+#[cfg(test)]
+mod test {
+    use super::*;
+
+    #[test]
+    fn email_without_recipients_fails() {
+        let result = Mail::new("Sender", "mail at example.com", "hi", "body").send();
+        assert!(result.is_err());
+    }
+
+    #[test]
+    fn simple_ascii_text_mail() {
+        let mail = Mail::new(
+            "Sender Name",
+            "mailfrom at example.com",
+            "Subject Line",
+            "This is just ascii text.\nNothing too special.",
+        )
+        .with_recipient_and_name("Receiver Name", "receiver at example.com");
+
+        let body = mail.format_mail(0).expect("could not format mail");
+
+        assert_eq!(
+            body,
+            r#"Subject: Subject Line
+From: Sender Name 
+To: Receiver Name 
+Date: Thu, 01 Jan 1970 01:00:00 +0100
+Auto-Submitted: auto-generated;
+Content-Type: text/plain;
+	charset="UTF-8"
+Content-Transfer-Encoding: 8bit
+
+This is just ascii text.
+Nothing too special."#
+        )
+    }
+
+    #[test]
+    fn multiple_receiver_redaction() {
+        let mail = Mail::new(
+            "Sender Name",
+            "mailfrom at example.com",
+            "Subject Line",
+            "This is just ascii text.\nNothing too special.",
+        )
+        .with_recipient_and_name("Receiver Name", "receiver at example.com")
+        .with_recipient("two at example.com");
+
+        let body = mail.format_mail(0).expect("could not format mail");
+
+        assert_eq!(
+            body,
+            r#"Subject: Subject Line
+From: Sender Name 
+To: Undisclosed 
+Date: Thu, 01 Jan 1970 01:00:00 +0100
+Auto-Submitted: auto-generated;
+Content-Type: text/plain;
+	charset="UTF-8"
+Content-Transfer-Encoding: 8bit
+
+This is just ascii text.
+Nothing too special."#
+        )
+    }
+
+    #[test]
+    fn simple_utf8_text_mail() {
+        let mail = Mail::new(
+            "UTF-8 Sender Name ?",
+            "differentfrom at example.com",
+            "Subject Line ?",
+            "This utf-8 email should handle emojis\n??\nand weird german characters: ????\nand more.",
+        )
+        .with_recipient_and_name("Receiver Name?", "receiver at example.com");
+
+        let body = mail.format_mail(1732806251).expect("could not format mail");
+
+        assert_eq!(
+            body,
+            r#"MIME-Version: 1.0
+Subject: =?utf-8?B?U3ViamVjdCBMaW5lIPCfp5E=?=
+From: =?utf-8?B?VVRGLTggU2VuZGVyIE5hbWUg8J+Tpw==?= 
+To: =?utf-8?B?UmVjZWl2ZXIgTmFtZfCfk6k=?= 
+Date: Thu, 28 Nov 2024 16:04:11 +0100
+Auto-Submitted: auto-generated;
+Content-Type: text/plain;
+	charset="UTF-8"
+Content-Transfer-Encoding: 8bit
+
+This utf-8 email should handle emojis
+??
+and weird german characters: ????
+and more."#
+        )
+    }
+
+    #[test]
+    fn multipart_html_alternative() {
+        let mail = Mail::new(
+            "Sender Name",
+            "from at example.com",
+            "Subject Line",
+            "Lorem Ipsum Dolor Sit\nAmet",
+        )
+        .with_recipient("receiver at example.com")
+        .with_html_alt("\n\t
\n\t\tLorem Ipsum Dolor Sit Amet\n\t
\n"); + let body = mail.format_mail(1732806251).expect("could not format mail"); + assert_eq!( + body, + r#"Content-Type: multipart/alternative; + boundary="----_=_NextPart_002_1732806251" +MIME-Version: 1.0 +Subject: Subject Line +From: Sender Name +To: receiver at example.com +Date: Thu, 28 Nov 2024 16:04:11 +0100 +Auto-Submitted: auto-generated; + +This is a multi-part message in MIME format. + +------_=_NextPart_002_1732806251 +Content-Type: text/plain; + charset="UTF-8" +Content-Transfer-Encoding: 8bit + +Lorem Ipsum Dolor Sit +Amet +------_=_NextPart_002_1732806251 +Content-Type: text/html; + charset="UTF-8" +Content-Transfer-Encoding: 8bit + + +
+		Lorem Ipsum Dolor Sit Amet
+	
+ +------_=_NextPart_002_1732806251--"# + ) + } + + #[test] + fn multipart_plain_text_attachments_mixed() { + let bin: [u8; 62] = [ + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, + ]; + + let mail = Mail::new( + "Sender Name", + "from at example.com", + "Subject Line", + "Lorem Ipsum Dolor Sit\nAmet", + ) + .with_recipient_and_name("Receiver Name", "receiver at example.com") + .with_attachment("deadbeef.bin", "application/octet-stream", &bin); + + let body = mail.format_mail(1732806251).expect("could not format mail"); + assert_eq!( + body, + r#"Content-Type: multipart/mixed; + boundary="----_=_NextPart_001_1732806251" +MIME-Version: 1.0 +Subject: Subject Line +From: Sender Name +To: Receiver Name +Date: Thu, 28 Nov 2024 16:04:11 +0100 +Auto-Submitted: auto-generated; + +This is a multi-part message in MIME format. + +------_=_NextPart_001_1732806251 +Content-Type: text/plain; + charset="UTF-8" +Content-Transfer-Encoding: 8bit + +Lorem Ipsum Dolor Sit +Amet +------_=_NextPart_001_1732806251 +Content-Type: application/octet-stream; name="deadbeef.bin" +Content-Disposition: attachment; filename="deadbeef.bin"; filename*=UTF-8''deadbeef.bin +Content-Transfer-Encoding: base64 + +3q2+796tvu/erb7v3q3erb7v3q2+796tvu/erd6tvu/erb7v3q2+796t3q2+796tvu/erb7v +3q2+796tvu8= +------_=_NextPart_001_1732806251--"# + ) + } + + #[test] + fn multipart_plain_text_html_alternative_attachments() { + let bin: [u8; 62] = [ + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, + 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef, + ]; + + let mail = Mail::new( + "Sender Name", + "from at example.com", + "Subject Line", + "Lorem Ipsum Dolor Sit\nAmet", + ) + .with_recipient_and_name("Receiver Name", "receiver at example.com") + .with_attachment("deadbeef.bin", "application/octet-stream", &bin) + .with_attachment("??.bin", "image/bmp", &bin) + .with_html_alt("\n\t
\n\t\tLorem Ipsum Dolor Sit Amet\n\t
\n"); + + let body = mail.format_mail(1732806251).expect("could not format mail"); + + assert_eq!( + body, + r#"Content-Type: multipart/mixed; + boundary="----_=_NextPart_001_1732806251" +MIME-Version: 1.0 +Subject: Subject Line +From: Sender Name +To: Receiver Name +Date: Thu, 28 Nov 2024 16:04:11 +0100 +Auto-Submitted: auto-generated; + +This is a multi-part message in MIME format. + +------_=_NextPart_001_1732806251 +Content-Type: multipart/alternative; boundary="----_=_NextPart_002_1732806251" +MIME-Version: 1.0 + +------_=_NextPart_002_1732806251 +Content-Type: text/plain; + charset="UTF-8" +Content-Transfer-Encoding: 8bit + +Lorem Ipsum Dolor Sit +Amet +------_=_NextPart_002_1732806251 +Content-Type: text/html; + charset="UTF-8" +Content-Transfer-Encoding: 8bit + + +
+		Lorem Ipsum Dolor Sit Amet
+	
+ +------_=_NextPart_002_1732806251-- +------_=_NextPart_001_1732806251 +Content-Type: application/octet-stream; name="deadbeef.bin" +Content-Disposition: attachment; filename="deadbeef.bin"; filename*=UTF-8''deadbeef.bin +Content-Transfer-Encoding: base64 + +3q2+796tvu/erb7v3q3erb7v3q2+796tvu/erd6tvu/erb7v3q2+796t3q2+796tvu/erb7v +3q2+796tvu8= +------_=_NextPart_001_1732806251 +Content-Type: image/bmp; name="??.bin" +Content-Disposition: attachment; filename="??.bin"; filename*=UTF-8''%F0%9F%90%84%F0%9F%92%80.bin +Content-Transfer-Encoding: base64 + +3q2+796tvu/erb7v3q3erb7v3q2+796tvu/erd6tvu/erb7v3q2+796t3q2+796tvu/erb7v +3q2+796tvu8= +------_=_NextPart_001_1732806251--"# + ) + } + + #[test] + fn test_format_mail_multipart() { + let mail = Mail::new( + "Fred Oobar", + "foobar at example.com", + "This is the subject", + "This is the plain body", + ) + .with_recipient_and_name("Tony Est", "test at example.com") + .with_html_alt("This is the HTML body"); + + let body = mail.format_mail(1718977850).expect("could not format mail"); + + assert_eq!( + body, + r#"Content-Type: multipart/alternative; + boundary="----_=_NextPart_002_1718977850" +MIME-Version: 1.0 +Subject: This is the subject +From: Fred Oobar +To: Tony Est +Date: Fri, 21 Jun 2024 15:50:50 +0200 +Auto-Submitted: auto-generated; + +This is a multi-part message in MIME format. + +------_=_NextPart_002_1718977850 +Content-Type: text/plain; + charset="UTF-8" +Content-Transfer-Encoding: 8bit + +This is the plain body +------_=_NextPart_002_1718977850 +Content-Type: text/html; + charset="UTF-8" +Content-Transfer-Encoding: 8bit + +This is the HTML body +------_=_NextPart_002_1718977850--"# + ); + } +} -- 2.39.5 From t.lamprecht at proxmox.com Fri Nov 29 13:31:41 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Fri, 29 Nov 2024 13:31:41 +0100 Subject: [pbs-devel] [PATCH proxmox] sys: fs: set FD_CLOEXEC when creating temp files In-Reply-To: References: <20241128145440.4119007-1-d.csapak@proxmox.com> <771443181.1837.1732867036375@webmail.proxmox.com> Message-ID: <370c1808-5812-4c19-a084-3313243b3771@proxmox.com> Am 29.11.24 um 11:21 schrieb Thomas Lamprecht: > Am 29.11.24 um 09:02 schrieb Dominik Csapak: >> the question for me is if it's ok to use since mkostemp is only a glibc extension (since 2.7) and we >> may use that in proxmox-backup-client which we want to statically build ? >> (not sure how that static compilation works with such a thing though...) > > FWIW, it's supported by MUSL: > > http://git.musl-libc.org/cgit/musl/tree/src/temp/mkostemp.c And just to clarify, the rust libc crate supports different libc targets and exposes mkostemp among others (all?) for both, the GNU libc a musl libc targets. So using that method might be indeed the best option for now and should not hinder any static builds, as if not with glibc then we highly probably do them with musl. From d.csapak at proxmox.com Fri Nov 29 14:12:33 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Fri, 29 Nov 2024 14:12:33 +0100 Subject: [pbs-devel] [PATCH proxmox] sys: fs: set FD_CLOEXEC when creating temp files In-Reply-To: <370c1808-5812-4c19-a084-3313243b3771@proxmox.com> References: <20241128145440.4119007-1-d.csapak@proxmox.com> <771443181.1837.1732867036375@webmail.proxmox.com> <370c1808-5812-4c19-a084-3313243b3771@proxmox.com> Message-ID: <88232a5c-0180-4189-9c5c-7684ca92ec54@proxmox.com> On 11/29/24 13:31, Thomas Lamprecht wrote: > Am 29.11.24 um 11:21 schrieb Thomas Lamprecht: >> Am 29.11.24 um 09:02 schrieb Dominik Csapak: >>> the question for me is if it's ok to use since mkostemp is only a glibc extension (since 2.7) and we >>> may use that in proxmox-backup-client which we want to statically build ? >>> (not sure how that static compilation works with such a thing though...) >> >> FWIW, it's supported by MUSL: >> >> http://git.musl-libc.org/cgit/musl/tree/src/temp/mkostemp.c > > And just to clarify, the rust libc crate supports different libc targets and > exposes mkostemp among others (all?) for both, the GNU libc a musl libc > targets. > > So using that method might be indeed the best option for now and should not > hinder any static builds, as if not with glibc then we highly probably do > them with musl. great, thanks for looking that up. I'll send a v2 with using mkostemp (with a small nix like wrapper around, so we can it as a drop in replacement, and maybe send it upstream?) From f.gruenbichler at proxmox.com Fri Nov 29 14:13:27 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=) Date: Fri, 29 Nov 2024 14:13:27 +0100 Subject: [pbs-devel] [RFC proxmox 0/2] worker task setup improvements Message-ID: <20241129131329.765815-1-f.gruenbichler@proxmox.com> two of the more prominent issues, RFC for now since we want more testing and more follow-ups before applying. Fabian Gr?nbichler (2): rest-server: handle failure in worker task setup correctly rest-server: close race window when updating worker task count proxmox-rest-server/src/worker_task.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) -- 2.39.5 From f.gruenbichler at proxmox.com Fri Nov 29 14:13:28 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=) Date: Fri, 29 Nov 2024 14:13:28 +0100 Subject: [pbs-devel] [PATCH proxmox 1/2] rest-server: handle failure in worker task setup correctly In-Reply-To: <20241129131329.765815-1-f.gruenbichler@proxmox.com> References: <20241129131329.765815-1-f.gruenbichler@proxmox.com> Message-ID: <20241129131329.765815-2-f.gruenbichler@proxmox.com> if setting up a new worker fails after it has been inserted into the WORKER_TASK_LIST, we need to clean it up instead of bubbling up the error right away, else we "leak" the worker task and it never finishes.. Signed-off-by: Fabian Gr?nbichler --- we probably want to optimize update_active_workers as well to reduce the lock contention there that triggers this issue in the first place.. proxmox-rest-server/src/worker_task.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/proxmox-rest-server/src/worker_task.rs b/proxmox-rest-server/src/worker_task.rs index 6e76c2ca..3ca93965 100644 --- a/proxmox-rest-server/src/worker_task.rs +++ b/proxmox-rest-server/src/worker_task.rs @@ -923,7 +923,12 @@ impl WorkerTask { set_worker_count(hash.len()); } - setup.update_active_workers(Some(&upid))?; + let res = setup.update_active_workers(Some(&upid)); + if res.is_err() { + // needed to undo the insertion into WORKER_TASK_LIST above + worker.log_result(&res); + res? + } Ok((worker, logger)) } -- 2.39.5 From f.gruenbichler at proxmox.com Fri Nov 29 14:13:29 2024 From: f.gruenbichler at proxmox.com (=?UTF-8?q?Fabian=20Gr=C3=BCnbichler?=) Date: Fri, 29 Nov 2024 14:13:29 +0100 Subject: [pbs-devel] [PATCH proxmox 2/2] rest-server: close race window when updating worker task count In-Reply-To: <20241129131329.765815-1-f.gruenbichler@proxmox.com> References: <20241129131329.765815-1-f.gruenbichler@proxmox.com> Message-ID: <20241129131329.765815-3-f.gruenbichler@proxmox.com> this mimics how the count is updated when spawning a new task - the lock scope needs to cover the count update itself, else there's a race when multiple worker's log their result at the same time.. Co-developed-by: Dominik Csapak Signed-off-by: Fabian Gr?nbichler --- proxmox-rest-server/src/worker_task.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/proxmox-rest-server/src/worker_task.rs b/proxmox-rest-server/src/worker_task.rs index 3ca93965..018d18c0 100644 --- a/proxmox-rest-server/src/worker_task.rs +++ b/proxmox-rest-server/src/worker_task.rs @@ -1023,7 +1023,8 @@ impl WorkerTask { WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id); let _ = self.setup.update_active_workers(None); - set_worker_count(WORKER_TASK_LIST.lock().unwrap().len()); + let lock = WORKER_TASK_LIST.lock().unwrap(); + set_worker_count(lock.len()); } /// Log a message. -- 2.39.5 From t.lamprecht at proxmox.com Fri Nov 29 14:27:17 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Fri, 29 Nov 2024 14:27:17 +0100 Subject: [pbs-devel] [PATCH proxmox 2/2] rest-server: close race window when updating worker task count In-Reply-To: <20241129131329.765815-3-f.gruenbichler@proxmox.com> References: <20241129131329.765815-1-f.gruenbichler@proxmox.com> <20241129131329.765815-3-f.gruenbichler@proxmox.com> Message-ID: <00e24e50-5df8-4c62-abe2-e14916c4a7ba@proxmox.com> Am 29.11.24 um 14:13 schrieb Fabian Gr?nbichler: > this mimics how the count is updated when spawning a new task - the lock scope > needs to cover the count update itself, else there's a race when multiple > worker's log their result at the same time.. > > Co-developed-by: Dominik Csapak > Signed-off-by: Fabian Gr?nbichler > --- > proxmox-rest-server/src/worker_task.rs | 3 ++- > 1 file changed, 2 insertions(+), 1 deletion(-) > > diff --git a/proxmox-rest-server/src/worker_task.rs b/proxmox-rest-server/src/worker_task.rs > index 3ca93965..018d18c0 100644 > --- a/proxmox-rest-server/src/worker_task.rs > +++ b/proxmox-rest-server/src/worker_task.rs > @@ -1023,7 +1023,8 @@ impl WorkerTask { > > WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id); > let _ = self.setup.update_active_workers(None); > - set_worker_count(WORKER_TASK_LIST.lock().unwrap().len()); > + let lock = WORKER_TASK_LIST.lock().unwrap(); why not use this also for the remove operation above? I.e. something like: let locked_worker_tasks = WORKER_TASK_LIST.lock().unwrap(); locked_worker_tasks.remove(&self.upid.task_id); set_worker_count(locked_worker_tasks.len()) If there are technical reason speaking against this, which I hope not, then a comment would be definitively warranted, otherwise using a single lock would IMO make this a bit clearer and locking twice isn't exactly cheaper. Looks OK besides that, but would still want to take a closer look. > + set_worker_count(lock.len()); > } > > /// Log a message. From t.lamprecht at proxmox.com Fri Nov 29 14:34:00 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Fri, 29 Nov 2024 14:34:00 +0100 Subject: [pbs-devel] [PATCH proxmox 1/2] rest-server: handle failure in worker task setup correctly In-Reply-To: <20241129131329.765815-2-f.gruenbichler@proxmox.com> References: <20241129131329.765815-1-f.gruenbichler@proxmox.com> <20241129131329.765815-2-f.gruenbichler@proxmox.com> Message-ID: <01720968-bf49-4693-abdc-fd961fdd7aae@proxmox.com> Am 29.11.24 um 14:13 schrieb Fabian Gr?nbichler: > if setting up a new worker fails after it has been inserted into the > WORKER_TASK_LIST, we need to clean it up instead of bubbling up the error right > away, else we "leak" the worker task and it never finishes.. > > Signed-off-by: Fabian Gr?nbichler > --- > we probably want to optimize update_active_workers as well to reduce the lock > contention there that triggers this issue in the first place.. > > proxmox-rest-server/src/worker_task.rs | 7 ++++++- > 1 file changed, 6 insertions(+), 1 deletion(-) > > diff --git a/proxmox-rest-server/src/worker_task.rs b/proxmox-rest-server/src/worker_task.rs > index 6e76c2ca..3ca93965 100644 > --- a/proxmox-rest-server/src/worker_task.rs > +++ b/proxmox-rest-server/src/worker_task.rs > @@ -923,7 +923,12 @@ impl WorkerTask { > set_worker_count(hash.len()); > } > > - setup.update_active_workers(Some(&upid))?; > + let res = setup.update_active_workers(Some(&upid)); > + if res.is_err() { > + // needed to undo the insertion into WORKER_TASK_LIST above > + worker.log_result(&res); > + res? > + } Seems OK from a quick look, need a bit more time for a proper review. What the quick look can give though is style nits, i.e. IMO a bit unidiomatic for our code. Would prefer one of: Combined return path through matching match setup.update_active_workers(Some(&upid)) { Err(err) => { // needed to undo the insertion into the active WORKER_TASK_LIST above worker.log_result(&res); Err(err) } Ok(_) => Ok((worker, logger)) } or similar than yours but avoid the outer variable: if let Err(err) = setup.update_active_workers(Some(&upid)) { // needed to undo the insertion into the active WORKER_TASK_LIST above worker.log_result(&res); return Err(err); } IMO both fit slightly (!) better for how errors are commonly dealt with in rust and are thus a bit easier to understand correctly on reading. > > Ok((worker, logger)) > } From d.csapak at proxmox.com Fri Nov 29 15:20:58 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Fri, 29 Nov 2024 15:20:58 +0100 Subject: [pbs-devel] [PATCH proxmox 2/2] rest-server: close race window when updating worker task count In-Reply-To: <00e24e50-5df8-4c62-abe2-e14916c4a7ba@proxmox.com> References: <20241129131329.765815-1-f.gruenbichler@proxmox.com> <20241129131329.765815-3-f.gruenbichler@proxmox.com> <00e24e50-5df8-4c62-abe2-e14916c4a7ba@proxmox.com> Message-ID: <211810f3-3eef-42bf-b17d-6f8f5f24c8a8@proxmox.com> On 11/29/24 14:27, Thomas Lamprecht wrote: > Am 29.11.24 um 14:13 schrieb Fabian Gr?nbichler: >> this mimics how the count is updated when spawning a new task - the lock scope >> needs to cover the count update itself, else there's a race when multiple >> worker's log their result at the same time.. >> >> Co-developed-by: Dominik Csapak >> Signed-off-by: Fabian Gr?nbichler >> --- >> proxmox-rest-server/src/worker_task.rs | 3 ++- >> 1 file changed, 2 insertions(+), 1 deletion(-) >> >> diff --git a/proxmox-rest-server/src/worker_task.rs b/proxmox-rest-server/src/worker_task.rs >> index 3ca93965..018d18c0 100644 >> --- a/proxmox-rest-server/src/worker_task.rs >> +++ b/proxmox-rest-server/src/worker_task.rs >> @@ -1023,7 +1023,8 @@ impl WorkerTask { >> >> WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id); >> let _ = self.setup.update_active_workers(None); >> - set_worker_count(WORKER_TASK_LIST.lock().unwrap().len()); >> + let lock = WORKER_TASK_LIST.lock().unwrap(); > > why not use this also for the remove operation above? I.e. something like: > > let locked_worker_tasks = WORKER_TASK_LIST.lock().unwrap(); > > locked_worker_tasks.remove(&self.upid.task_id); > > set_worker_count(locked_worker_tasks.len()) > > If there are technical reason speaking against this, which I hope not, then a > comment would be definitively warranted, otherwise using a single lock would > IMO make this a bit clearer and locking twice isn't exactly cheaper. here the reason of the split lock is that the 'self.setup.update_active_workers` internally can take a lock to the WORKER_TASK_LIST, so we can't hold one over that call not super sure if can reorder these, so that we reduce the count before updating though. From what i understand though we want to remove ourselves from the list of actives tasks before reducing that counter. as fabian indicated in the other patch, we should probably split up the 'update_active_workers' into seperate methods to * add one worker * remove one worker * housekeeping for leftover workers then we could design the removal in a way that does not rely on the WORKER_TASK_LIST in the first place thus we could remove it from the active list before removing it from the internal hashmap (and could take a lock around both, the list and the count) > > Looks OK besides that, but would still want to take a closer look. > >> + set_worker_count(lock.len()); >> } >> >> /// Log a message. > > > > _______________________________________________ > pbs-devel mailing list > pbs-devel at lists.proxmox.com > https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel From d.csapak at proxmox.com Fri Nov 29 15:28:00 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Fri, 29 Nov 2024 15:28:00 +0100 Subject: [pbs-devel] [PATCH proxmox v2 1/2] sys: fs: set CLOEXEC when creating temp files Message-ID: <20241129142801.3334969-1-d.csapak@proxmox.com> In general we want all open files to have set CLOEXEC since our reloading mechanism can basically fork at any moment and we don't want newer daemons to carry around old file descriptors, especially lock files. Since `make_tmp_file` is called by many things (e.g. open_file_locked, logrotate, rrd), set O_CLOEXEC with mkostemp. This fixes issues with leftover file descriptors e.g. tape backups not working because of lingering locks after a reload, or having deleted rrd files open. Signed-off-by: Dominik Csapak --- changes from v1: * introduce mkostemp helper which is similar to nix's mkstemp helper (the code is a copy of mkstemp aside from the call to libcmkostemp + the oflag handling) I did it this way, since we may be able to upstream this, have to look more closer at this though. proxmox-sys/src/fs/file.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/proxmox-sys/src/fs/file.rs b/proxmox-sys/src/fs/file.rs index fbfc0b58..74b9e74e 100644 --- a/proxmox-sys/src/fs/file.rs +++ b/proxmox-sys/src/fs/file.rs @@ -116,6 +116,29 @@ pub fn file_read_firstline>(path: P) -> Result { read_firstline(path).map_err(|err| format_err!("unable to read {path:?} - {err}")) } +#[inline] +/// Creates a tmpfile like [`nix::unistd::mkstemp`], but with [`nix::fctnl::Oflag`] set. +/// +/// Note that some flags are masked out since they can produce an error, see mkostemp(2) for details. +// code is mostly copied from nix mkstemp +fn mkostemp( + template: &P, + oflag: OFlag, +) -> nix::Result<(std::os::fd::RawFd, PathBuf)> { + use std::os::unix::ffi::OsStringExt; + let mut path = template.with_nix_path(|path| path.to_bytes_with_nul().to_owned())?; + let p = path.as_mut_ptr().cast(); + + let flags = OFlag::intersection(OFlag::O_APPEND | OFlag::O_CLOEXEC | OFlag::O_SYNC, oflag); + + let fd = unsafe { libc::mkostemp(p, flags.bits()) }; + let last = path.pop(); // drop the trailing nul + debug_assert!(last == Some(b'\0')); + let pathname = std::ffi::OsString::from_vec(path); + Errno::result(fd)?; + Ok((fd, PathBuf::from(pathname))) +} + /// Takes a Path and CreateOptions, creates a tmpfile from it and returns /// a RawFd and PathBuf for it pub fn make_tmp_file>( @@ -127,7 +150,7 @@ pub fn make_tmp_file>( // use mkstemp here, because it works with different processes, threads, even tokio tasks let mut template = path.to_owned(); template.set_extension("tmp_XXXXXX"); - let (mut file, tmp_path) = match unistd::mkstemp(&template) { + let (mut file, tmp_path) = match mkostemp(&template, OFlag::O_CLOEXEC) { Ok((fd, path)) => (unsafe { File::from_raw_fd(fd) }, path), Err(err) => bail!("mkstemp {:?} failed: {}", template, err), }; -- 2.39.5 From d.csapak at proxmox.com Fri Nov 29 15:28:01 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Fri, 29 Nov 2024 15:28:01 +0100 Subject: [pbs-devel] [PATCH proxmox v2 2/2] sys: open directories with O_CLOEXEC In-Reply-To: <20241129142801.3334969-1-d.csapak@proxmox.com> References: <20241129142801.3334969-1-d.csapak@proxmox.com> Message-ID: <20241129142801.3334969-2-d.csapak@proxmox.com> so they don't linger around in case of a daemon reload. Signed-off-by: Dominik Csapak --- new in v2 proxmox-sys/src/fd.rs | 2 +- proxmox-sys/src/fs/dir.rs | 15 +++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/proxmox-sys/src/fd.rs b/proxmox-sys/src/fd.rs index 8d85bd2e..386e4222 100644 --- a/proxmox-sys/src/fd.rs +++ b/proxmox-sys/src/fd.rs @@ -24,7 +24,7 @@ pub fn change_cloexec(fd: RawFd, on: bool) -> Result<(), anyhow::Error> { } pub(crate) fn cwd() -> Result { - open(".", OFlag::O_DIRECTORY, stat::Mode::empty()) + open(".", crate::fs::DIR_FLAGS, stat::Mode::empty()) } pub fn open

(path: &P, oflag: OFlag, mode: Mode) -> Result diff --git a/proxmox-sys/src/fs/dir.rs b/proxmox-sys/src/fs/dir.rs index c903ab87..a093ed99 100644 --- a/proxmox-sys/src/fs/dir.rs +++ b/proxmox-sys/src/fs/dir.rs @@ -14,6 +14,9 @@ use proxmox_lang::try_block; use crate::fs::{fchown, CreateOptions}; +/// The default [`OFlag`] we want to use when opening directories. +pub(crate) const DIR_FLAGS: OFlag = OFlag::O_DIRECTORY.union(OFlag::O_CLOEXEC); + /// Creates directory at the provided path with specified ownership. /// /// Errors if the directory already exists. @@ -66,7 +69,7 @@ pub fn ensure_dir_exists>( Err(err) => bail!("unable to create directory {path:?} - {err}",), } - let fd = nix::fcntl::open(path, OFlag::O_DIRECTORY, stat::Mode::empty()) + let fd = nix::fcntl::open(path, DIR_FLAGS, stat::Mode::empty()) .map(|fd| unsafe { OwnedFd::from_raw_fd(fd) }) .map_err(|err| format_err!("unable to open created directory {path:?} - {err}"))?; // umask defaults to 022 so make sure the mode is fully honowed: @@ -120,7 +123,7 @@ fn create_path_do( Some(Component::Prefix(_)) => bail!("illegal prefix path component encountered"), Some(Component::RootDir) => { let _ = iter.next(); - crate::fd::open(c"/", OFlag::O_DIRECTORY, stat::Mode::empty())? + crate::fd::open(c"/", DIR_FLAGS, stat::Mode::empty())? } Some(Component::CurDir) => { let _ = iter.next(); @@ -128,7 +131,7 @@ fn create_path_do( } Some(Component::ParentDir) => { let _ = iter.next(); - crate::fd::open(c"..", OFlag::O_DIRECTORY, stat::Mode::empty())? + crate::fd::open(c"..", DIR_FLAGS, stat::Mode::empty())? } Some(Component::Normal(_)) => { // simply do not advance the iterator, heavy lifting happens in create_path_at_do() @@ -154,7 +157,7 @@ fn create_path_at_do( None => return Ok(created), Some(Component::ParentDir) => { - at = crate::fd::openat(&at, c"..", OFlag::O_DIRECTORY, stat::Mode::empty())?; + at = crate::fd::openat(&at, c"..", DIR_FLAGS, stat::Mode::empty())?; } Some(Component::Normal(path)) => { @@ -175,7 +178,7 @@ fn create_path_at_do( Err(e) => return Err(e.into()), Ok(_) => true, }; - at = crate::fd::openat(&at, path, OFlag::O_DIRECTORY, stat::Mode::empty())?; + at = crate::fd::openat(&at, path, DIR_FLAGS, stat::Mode::empty())?; if let (true, Some(opts)) = (created, opts) { if opts.owner.is_some() || opts.group.is_some() { @@ -222,7 +225,7 @@ pub fn make_tmp_dir>( if let Some(options) = options { if let Err(err) = try_block!({ - let mut fd = crate::fd::open(&path, OFlag::O_DIRECTORY, stat::Mode::empty())?; + let mut fd = crate::fd::open(&path, DIR_FLAGS, stat::Mode::empty())?; options.apply_to(&mut fd, &path)?; Ok::<(), Error>(()) }) { -- 2.39.5 From t.lamprecht at proxmox.com Fri Nov 29 15:38:50 2024 From: t.lamprecht at proxmox.com (Thomas Lamprecht) Date: Fri, 29 Nov 2024 15:38:50 +0100 Subject: [pbs-devel] [PATCH proxmox 1/4] sendmail: add sendmail crate In-Reply-To: <20241129105321.143877-1-s.sterz@proxmox.com> References: <20241129105321.143877-1-s.sterz@proxmox.com> Message-ID: <57784056-9375-48fe-8dd5-3b6f88f55c9d@proxmox.com> Am 29.11.24 um 11:53 schrieb Shannon Sterz: > add the `proxmox-sendmail` crate that makes it easier to send mails via > the `sendmail` utility. features include: > > - multipart/alternative support for html+plain text mails > - mulitpart/mixed support for mails with attachments > - automatic nesting of multipart/alternative and multipart/mixed parts > - masking recipients to avoid disclosing them to everyone > - encoding Subject, To, From, and attachment file names correctly > - adding an `Auto-Submitted` header to avoid triggering automated mails > > also includes several tests to ensure that mails are formatted > correctly. Looks quite nice to me, but no need to rush this, so maybe another opinion (@Lukas) wouldn't hurt here. From d.csapak at proxmox.com Fri Nov 29 15:53:41 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Fri, 29 Nov 2024 15:53:41 +0100 Subject: [pbs-devel] [RFC proxmox 0/2] worker task setup improvements In-Reply-To: <20241129131329.765815-1-f.gruenbichler@proxmox.com> References: <20241129131329.765815-1-f.gruenbichler@proxmox.com> Message-ID: <065a7c23-d82e-42bd-ab8a-3aeabcd513e6@proxmox.com> regardless of the style comments thomas already wrote, just wanted to report that it fixed my local testcase: * start very many restore tasks in parallel, until they run into an error with task state update (did with proxmox-backup-client restore > /dev/null) * reload the daemon * stop all the tasks (e.g. by killing the backup-client on the client side) see that there are no more running tasks in the gui, but still old running proxmox-backup-proxy daemons This is fixed with the first patch of the series From d.csapak at proxmox.com Fri Nov 29 16:37:44 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Fri, 29 Nov 2024 16:37:44 +0100 Subject: [pbs-devel] [PATCH proxmox] daemon: clean up middle process of double fork Message-ID: <20241129153744.4128441-1-d.csapak@proxmox.com> so we don't leave around a zombie process when the old daemon still needs to run, because of e.g. a running task. Since this is mostly a cosmetic issue though, only try a clean up once, so we don't unnecessarily block or run into other issues here. (It could happen that it didn't exit at that point, but it's very unlikely.) Signed-off-by: Dominik Csapak --- maybe the comment could be improved, but i tried to be not overly verbose there, since it's not really an issue anyway proxmox-daemon/src/server.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/proxmox-daemon/src/server.rs b/proxmox-daemon/src/server.rs index efea9078..edc64795 100644 --- a/proxmox-daemon/src/server.rs +++ b/proxmox-daemon/src/server.rs @@ -165,10 +165,12 @@ impl Reloader { // No matter how we managed to get here, this is the time where we bail out quickly: unsafe { libc::_exit(-1) } } - Ok(ForkResult::Parent { child }) => { + Ok(ForkResult::Parent { + child: middle_child, + }) => { log::debug!( "forked off a new server (first pid: {}), waiting for 2nd pid", - child + middle_child ); std::mem::drop(pnew); let mut pold = std::fs::File::from(pold); @@ -211,6 +213,13 @@ impl Reloader { log::error!("child vanished during reload: {}", e); } + // try exactly once to get rid of the zombie process of middle_child, but + // non blocking and without error handling, since it's just cosmetic + let _ = nix::sys::wait::waitpid( + middle_child, + Some(nix::sys::wait::WaitPidFlag::WNOHANG), + ); + Ok(()) } Err(e) => { -- 2.39.5 From d.csapak at proxmox.com Fri Nov 29 16:39:08 2024 From: d.csapak at proxmox.com (Dominik Csapak) Date: Fri, 29 Nov 2024 16:39:08 +0100 Subject: [pbs-devel] [PATCH proxmox-backup] add missing O_CLOEXEC flags to `openat` calls Message-ID: <20241129153908.4141576-1-d.csapak@proxmox.com> since we don't want to have lingering file descriptors on reload, which does a fork/exec. Signed-off-by: Dominik Csapak --- pbs-client/src/pxar/dir_stack.rs | 2 +- pbs-datastore/src/backup_info.rs | 2 +- pbs-datastore/src/snapshot_reader.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pbs-client/src/pxar/dir_stack.rs b/pbs-client/src/pxar/dir_stack.rs index 616d7545b..6fe55f170 100644 --- a/pbs-client/src/pxar/dir_stack.rs +++ b/pbs-client/src/pxar/dir_stack.rs @@ -57,7 +57,7 @@ impl PxarDir { let dir = Dir::openat( parent, self.file_name.as_os_str(), - OFlag::O_DIRECTORY, + OFlag::O_DIRECTORY | OFlag::O_CLOEXEC, Mode::empty(), )?; diff --git a/pbs-datastore/src/backup_info.rs b/pbs-datastore/src/backup_info.rs index be262773b..1ca279aca 100644 --- a/pbs-datastore/src/backup_info.rs +++ b/pbs-datastore/src/backup_info.rs @@ -143,7 +143,7 @@ impl BackupGroup { match openat( l2_fd, &manifest_path, - OFlag::O_RDONLY, + OFlag::O_RDONLY | OFlag::O_CLOEXEC, nix::sys::stat::Mode::empty(), ) { Ok(rawfd) => { diff --git a/pbs-datastore/src/snapshot_reader.rs b/pbs-datastore/src/snapshot_reader.rs index 95e59a421..dea51cbef 100644 --- a/pbs-datastore/src/snapshot_reader.rs +++ b/pbs-datastore/src/snapshot_reader.rs @@ -102,7 +102,7 @@ impl SnapshotReader { let raw_fd = nix::fcntl::openat( self.locked_dir.as_raw_fd(), Path::new(filename), - nix::fcntl::OFlag::O_RDONLY, + nix::fcntl::OFlag::O_RDONLY | nix::fcntl::OFlag::O_CLOEXEC, nix::sys::stat::Mode::empty(), )?; let file = unsafe { File::from_raw_fd(raw_fd) }; -- 2.39.5