[pbs-devel] [RFC v2 proxmox-backup 08/21] datastore: namespace: add filter for trash status
Christian Ebner
c.ebner at proxmox.com
Thu May 8 15:05:42 CEST 2025
As for snapshots, allow to filter namespaces based on their trash
status while iterating.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
pbs-datastore/examples/ls-snapshots.rs | 8 ++-
pbs-datastore/src/datastore.rs | 24 ++++---
pbs-datastore/src/hierarchy.rs | 91 ++++++++++++++++++++++++--
pbs-datastore/src/lib.rs | 1 +
src/api2/admin/namespace.rs | 16 +++--
src/api2/tape/backup.rs | 8 ++-
src/backup/hierarchy.rs | 26 +++++---
src/server/pull.rs | 8 ++-
src/server/sync.rs | 5 +-
9 files changed, 149 insertions(+), 38 deletions(-)
diff --git a/pbs-datastore/examples/ls-snapshots.rs b/pbs-datastore/examples/ls-snapshots.rs
index 2eeea4892..1d3707a17 100644
--- a/pbs-datastore/examples/ls-snapshots.rs
+++ b/pbs-datastore/examples/ls-snapshots.rs
@@ -2,7 +2,7 @@ use std::path::PathBuf;
use anyhow::{bail, Error};
-use pbs_datastore::DataStore;
+use pbs_datastore::{DataStore, NamespaceListFilter};
fn run() -> Result<(), Error> {
let base: PathBuf = match std::env::args().nth(1) {
@@ -20,7 +20,11 @@ fn run() -> Result<(), Error> {
let store = unsafe { DataStore::open_path("", base, None)? };
- for ns in store.recursive_iter_backup_ns_ok(Default::default(), max_depth)? {
+ for ns in store.recursive_iter_backup_ns_ok(
+ Default::default(),
+ max_depth,
+ NamespaceListFilter::Active,
+ )? {
println!("found namespace store:/{}", ns);
for group in store.iter_backup_groups(ns)? {
diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
index 867324380..aee69768f 100644
--- a/pbs-datastore/src/datastore.rs
+++ b/pbs-datastore/src/datastore.rs
@@ -32,7 +32,9 @@ use crate::backup_info::{BackupDir, BackupGroup, BackupInfo, ListBackupFilter, O
use crate::chunk_store::ChunkStore;
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
-use crate::hierarchy::{ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive};
+use crate::hierarchy::{
+ ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive, NamespaceListFilter,
+};
use crate::index::IndexFile;
use crate::task_tracking::{self, update_active_operations};
use crate::DataBlob;
@@ -617,7 +619,7 @@ impl DataStore {
let mut stats = BackupGroupDeleteStats::default();
if delete_groups {
log::info!("removing whole namespace recursively below {store}:/{ns}",);
- for ns in self.recursive_iter_backup_ns(ns.to_owned())? {
+ for ns in self.recursive_iter_backup_ns(ns.to_owned(), NamespaceListFilter::Active)? {
let (removed_ns_groups, delete_stats) = self.remove_namespace_groups(&ns?)?;
stats.add(&delete_stats);
removed_all_requested = removed_all_requested && removed_ns_groups;
@@ -629,7 +631,7 @@ impl DataStore {
// now try to delete the actual namespaces, bottom up so that we can use safe rmdir that
// will choke if a new backup/group appeared in the meantime (but not on an new empty NS)
let mut children = self
- .recursive_iter_backup_ns(ns.to_owned())?
+ .recursive_iter_backup_ns(ns.to_owned(), NamespaceListFilter::All)?
.collect::<Result<Vec<BackupNamespace>, Error>>()?;
children.sort_by_key(|b| std::cmp::Reverse(b.depth()));
@@ -851,8 +853,9 @@ impl DataStore {
pub fn iter_backup_ns(
self: &Arc<DataStore>,
ns: BackupNamespace,
+ filter: NamespaceListFilter,
) -> Result<ListNamespaces, Error> {
- ListNamespaces::new(Arc::clone(self), ns)
+ ListNamespaces::new(Arc::clone(self), ns, filter)
}
/// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
@@ -862,10 +865,11 @@ impl DataStore {
pub fn iter_backup_ns_ok(
self: &Arc<DataStore>,
ns: BackupNamespace,
+ filter: NamespaceListFilter,
) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
let this = Arc::clone(self);
Ok(
- ListNamespaces::new(Arc::clone(self), ns)?.filter_map(move |ns| match ns {
+ ListNamespaces::new(Arc::clone(self), ns, filter)?.filter_map(move |ns| match ns {
Ok(ns) => Some(ns),
Err(err) => {
log::error!("list groups error on datastore {} - {}", this.name(), err);
@@ -882,8 +886,9 @@ impl DataStore {
pub fn recursive_iter_backup_ns(
self: &Arc<DataStore>,
ns: BackupNamespace,
+ filter: NamespaceListFilter,
) -> Result<ListNamespacesRecursive, Error> {
- ListNamespacesRecursive::new(Arc::clone(self), ns)
+ ListNamespacesRecursive::new(Arc::clone(self), ns, filter)
}
/// Get a streaming iter over single-level backup namespaces of a datatstore, filtered by Ok
@@ -894,12 +899,13 @@ impl DataStore {
self: &Arc<DataStore>,
ns: BackupNamespace,
max_depth: Option<usize>,
+ filter: NamespaceListFilter,
) -> Result<impl Iterator<Item = BackupNamespace> + 'static, Error> {
let this = Arc::clone(self);
Ok(if let Some(depth) = max_depth {
- ListNamespacesRecursive::new_max_depth(Arc::clone(self), ns, depth)?
+ ListNamespacesRecursive::new_max_depth(Arc::clone(self), ns, depth, filter)?
} else {
- ListNamespacesRecursive::new(Arc::clone(self), ns)?
+ ListNamespacesRecursive::new(Arc::clone(self), ns, filter)?
}
.filter_map(move |ns| match ns {
Ok(ns) => Some(ns),
@@ -1136,7 +1142,7 @@ impl DataStore {
let arc_self = Arc::new(self.clone());
for namespace in arc_self
- .recursive_iter_backup_ns(BackupNamespace::root())
+ .recursive_iter_backup_ns(BackupNamespace::root(), NamespaceListFilter::All)
.context("creating namespace iterator failed")?
{
let namespace = namespace.context("iterating namespaces failed")?;
diff --git a/pbs-datastore/src/hierarchy.rs b/pbs-datastore/src/hierarchy.rs
index e0bf84419..f6385ba6a 100644
--- a/pbs-datastore/src/hierarchy.rs
+++ b/pbs-datastore/src/hierarchy.rs
@@ -8,7 +8,7 @@ use anyhow::{bail, format_err, Error};
use pbs_api_types::{BackupNamespace, BackupType, BACKUP_DATE_REGEX, BACKUP_ID_REGEX};
use proxmox_sys::fs::get_file_type;
-use crate::backup_info::{BackupDir, BackupGroup};
+use crate::backup_info::{BackupDir, BackupGroup, TRASH_MARKER_FILENAME};
use crate::DataStore;
/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
@@ -268,31 +268,49 @@ where
}
}
+#[derive(Clone, Copy, Debug)]
+pub enum NamespaceListFilter {
+ Active,
+ All,
+ Trashed,
+}
+
/// A iterator for a (single) level of Namespaces
pub struct ListNamespaces {
ns: BackupNamespace,
base_path: PathBuf,
ns_state: Option<proxmox_sys::fs::ReadDir>,
+ filter: NamespaceListFilter,
}
impl ListNamespaces {
/// construct a new single-level namespace iterator on a datastore with an optional anchor ns
- pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
+ pub fn new(
+ store: Arc<DataStore>,
+ ns: BackupNamespace,
+ filter: NamespaceListFilter,
+ ) -> Result<Self, Error> {
Ok(ListNamespaces {
ns,
base_path: store.base_path(),
ns_state: None,
+ filter,
})
}
/// to allow constructing the iter directly on a path, e.g., provided by section config
///
/// NOTE: it's recommended to use the datastore one constructor or go over the recursive iter
- pub fn new_from_path(path: PathBuf, ns: Option<BackupNamespace>) -> Result<Self, Error> {
+ pub fn new_from_path(
+ path: PathBuf,
+ ns: Option<BackupNamespace>,
+ filter: NamespaceListFilter,
+ ) -> Result<Self, Error> {
Ok(ListNamespaces {
ns: ns.unwrap_or_default(),
base_path: path,
ns_state: None,
+ filter,
})
}
}
@@ -328,6 +346,50 @@ impl Iterator for ListNamespaces {
};
if let Ok(name) = entry.file_name().to_str() {
if name != "." && name != ".." {
+ use nix::fcntl::OFlag;
+ use nix::sys::stat::Mode;
+
+ match self.filter {
+ NamespaceListFilter::All => (),
+ NamespaceListFilter::Active => {
+ let mut trash_path = self.base_path.to_owned();
+ if !self.ns.is_root() {
+ trash_path.push(self.ns.path());
+ }
+ trash_path.push("ns");
+ trash_path.push(name);
+ trash_path.push(TRASH_MARKER_FILENAME);
+ match proxmox_sys::fd::openat(
+ &libc::AT_FDCWD,
+ &trash_path,
+ OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
+ Mode::empty(),
+ ) {
+ Ok(_) => continue,
+ Err(nix::errno::Errno::ENOENT) => (),
+ Err(err) => return Some(Err(err.into())),
+ }
+ }
+ NamespaceListFilter::Trashed => {
+ let mut trash_path = self.base_path.to_owned();
+ if !self.ns.is_root() {
+ trash_path.push(self.ns.path());
+ }
+ trash_path.push("ns");
+ trash_path.push(name);
+ trash_path.push(TRASH_MARKER_FILENAME);
+ match proxmox_sys::fd::openat(
+ &libc::AT_FDCWD,
+ &trash_path,
+ OFlag::O_PATH | OFlag::O_CLOEXEC | OFlag::O_NOFOLLOW,
+ Mode::empty(),
+ ) {
+ Ok(_) => (),
+ Err(nix::errno::Errno::ENOENT) => continue,
+ Err(err) => return Some(Err(err.into())),
+ }
+ }
+ }
return Some(BackupNamespace::from_parent_ns(&self.ns, name.to_string()));
}
}
@@ -368,12 +430,17 @@ pub struct ListNamespacesRecursive {
/// the maximal recursion depth from the anchor start ns (depth == 0) downwards
max_depth: u8,
state: Option<Vec<ListNamespaces>>, // vector to avoid code recursion
+ filter: NamespaceListFilter,
}
impl ListNamespacesRecursive {
/// Creates an recursive namespace iterator.
- pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
- Self::new_max_depth(store, ns, pbs_api_types::MAX_NAMESPACE_DEPTH)
+ pub fn new(
+ store: Arc<DataStore>,
+ ns: BackupNamespace,
+ filter: NamespaceListFilter,
+ ) -> Result<Self, Error> {
+ Self::new_max_depth(store, ns, pbs_api_types::MAX_NAMESPACE_DEPTH, filter)
}
/// Creates an recursive namespace iterator that iterates recursively until depth is reached.
@@ -386,6 +453,7 @@ impl ListNamespacesRecursive {
store: Arc<DataStore>,
ns: BackupNamespace,
max_depth: usize,
+ filter: NamespaceListFilter,
) -> Result<Self, Error> {
if max_depth > pbs_api_types::MAX_NAMESPACE_DEPTH {
let limit = pbs_api_types::MAX_NAMESPACE_DEPTH + 1;
@@ -399,6 +467,7 @@ impl ListNamespacesRecursive {
ns,
max_depth: max_depth as u8,
state: None,
+ filter,
})
}
}
@@ -418,7 +487,11 @@ impl Iterator for ListNamespacesRecursive {
match iter.next() {
Some(Ok(ns)) => {
if state.len() < self.max_depth as usize {
- match ListNamespaces::new(Arc::clone(&self.store), ns.to_owned()) {
+ match ListNamespaces::new(
+ Arc::clone(&self.store),
+ ns.to_owned(),
+ self.filter,
+ ) {
Ok(iter) => state.push(iter),
Err(err) => log::error!("failed to create child ns iter {err}"),
}
@@ -434,7 +507,11 @@ impl Iterator for ListNamespacesRecursive {
// first next call ever: initialize state vector and start iterating at our level
let mut state = Vec::with_capacity(pbs_api_types::MAX_NAMESPACE_DEPTH);
if self.max_depth as usize > 0 {
- match ListNamespaces::new(Arc::clone(&self.store), self.ns.to_owned()) {
+ match ListNamespaces::new(
+ Arc::clone(&self.store),
+ self.ns.to_owned(),
+ self.filter,
+ ) {
Ok(list_ns) => state.push(list_ns),
Err(err) => {
// yield the error but set the state to Some to avoid re-try, a future
diff --git a/pbs-datastore/src/lib.rs b/pbs-datastore/src/lib.rs
index 5014b6c09..7390c6df8 100644
--- a/pbs-datastore/src/lib.rs
+++ b/pbs-datastore/src/lib.rs
@@ -208,6 +208,7 @@ pub use datastore::{
mod hierarchy;
pub use hierarchy::{
ListGroups, ListGroupsType, ListNamespaces, ListNamespacesRecursive, ListSnapshots,
+ NamespaceListFilter,
};
mod snapshot_reader;
diff --git a/src/api2/admin/namespace.rs b/src/api2/admin/namespace.rs
index 6cf88d89e..e5463524a 100644
--- a/src/api2/admin/namespace.rs
+++ b/src/api2/admin/namespace.rs
@@ -9,7 +9,7 @@ use pbs_api_types::{
DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
};
-use pbs_datastore::DataStore;
+use pbs_datastore::{DataStore, NamespaceListFilter};
use crate::backup::{check_ns_modification_privs, check_ns_privs, NS_PRIVS_OK};
@@ -99,12 +99,14 @@ pub fn list_namespaces(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
- let iter = match datastore.recursive_iter_backup_ns_ok(parent, max_depth) {
- Ok(iter) => iter,
- // parent NS doesn't exists and user has no privs on it, avoid info leakage.
- Err(_) if parent_access.is_err() => http_bail!(FORBIDDEN, "permission check failed"),
- Err(err) => return Err(err),
- };
+ let iter =
+ match datastore.recursive_iter_backup_ns_ok(parent, max_depth, NamespaceListFilter::Active)
+ {
+ Ok(iter) => iter,
+ // parent NS doesn't exists and user has no privs on it, avoid info leakage.
+ Err(_) if parent_access.is_err() => http_bail!(FORBIDDEN, "permission check failed"),
+ Err(err) => return Err(err),
+ };
let ns_to_item =
|ns: BackupNamespace| -> NamespaceListItem { NamespaceListItem { ns, comment: None } };
diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs
index 17c8bc605..53554c54b 100644
--- a/src/api2/tape/backup.rs
+++ b/src/api2/tape/backup.rs
@@ -18,7 +18,7 @@ use pbs_api_types::{
use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::{BackupDir, BackupInfo, ListBackupFilter};
-use pbs_datastore::{DataStore, StoreProgress};
+use pbs_datastore::{DataStore, NamespaceListFilter, StoreProgress};
use crate::tape::TapeNotificationMode;
use crate::{
@@ -392,7 +392,11 @@ fn backup_worker(
}
let mut group_list = Vec::new();
- let namespaces = datastore.recursive_iter_backup_ns_ok(root_namespace, setup.max_depth)?;
+ let namespaces = datastore.recursive_iter_backup_ns_ok(
+ root_namespace,
+ setup.max_depth,
+ NamespaceListFilter::Active,
+ )?;
for ns in namespaces {
group_list.extend(datastore.list_backup_groups(ns)?);
}
diff --git a/src/backup/hierarchy.rs b/src/backup/hierarchy.rs
index 8dd71fcf7..ec8d4b9c8 100644
--- a/src/backup/hierarchy.rs
+++ b/src/backup/hierarchy.rs
@@ -7,7 +7,9 @@ use pbs_api_types::{
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
-use pbs_datastore::{backup_info::BackupGroup, DataStore, ListGroups, ListNamespacesRecursive};
+use pbs_datastore::{
+ backup_info::BackupGroup, DataStore, ListGroups, ListNamespacesRecursive, NamespaceListFilter,
+};
/// Asserts that `privs` are fulfilled on datastore + (optional) namespace.
pub fn check_ns_privs(
@@ -75,12 +77,15 @@ pub fn can_access_any_namespace(
) -> bool {
// NOTE: traversing the datastore could be avoided if we had an "ACL tree: is there any priv
// below /datastore/{store}" helper
- let mut iter =
- if let Ok(iter) = store.recursive_iter_backup_ns_ok(BackupNamespace::root(), None) {
- iter
- } else {
- return false;
- };
+ let mut iter = if let Ok(iter) = store.recursive_iter_backup_ns_ok(
+ BackupNamespace::root(),
+ None,
+ NamespaceListFilter::Active,
+ ) {
+ iter
+ } else {
+ return false;
+ };
let wanted =
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
let name = store.name();
@@ -129,7 +134,12 @@ impl<'a> ListAccessibleBackupGroups<'a> {
owner_and_priv: Option<u64>,
auth_id: Option<&'a Authid>,
) -> Result<Self, Error> {
- let ns_iter = ListNamespacesRecursive::new_max_depth(Arc::clone(store), ns, max_depth)?;
+ let ns_iter = ListNamespacesRecursive::new_max_depth(
+ Arc::clone(store),
+ ns,
+ max_depth,
+ NamespaceListFilter::Active,
+ )?;
Ok(ListAccessibleBackupGroups {
auth_id,
ns_iter,
diff --git a/src/server/pull.rs b/src/server/pull.rs
index 50d7b0806..d3c6fcf6a 100644
--- a/src/server/pull.rs
+++ b/src/server/pull.rs
@@ -25,7 +25,7 @@ use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{BackupManifest, FileInfo};
use pbs_datastore::read_chunk::AsyncReadChunk;
-use pbs_datastore::{check_backup_owner, DataStore, StoreProgress};
+use pbs_datastore::{check_backup_owner, DataStore, NamespaceListFilter, StoreProgress};
use pbs_tools::sha::sha256;
use super::sync::{
@@ -750,7 +750,11 @@ fn check_and_remove_vanished_ns(
let mut local_ns_list: Vec<BackupNamespace> = params
.target
.store
- .recursive_iter_backup_ns_ok(params.target.ns.clone(), Some(max_depth))?
+ .recursive_iter_backup_ns_ok(
+ params.target.ns.clone(),
+ Some(max_depth),
+ NamespaceListFilter::Active,
+ )?
.filter(|ns| {
let user_privs =
user_info.lookup_privs(¶ms.owner, &ns.acl_path(params.target.store.name()));
diff --git a/src/server/sync.rs b/src/server/sync.rs
index ce338fbbe..308a03977 100644
--- a/src/server/sync.rs
+++ b/src/server/sync.rs
@@ -26,7 +26,9 @@ use pbs_api_types::{
use pbs_client::{BackupReader, BackupRepository, HttpClient, RemoteChunkReader};
use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::read_chunk::AsyncReadChunk;
-use pbs_datastore::{BackupManifest, DataStore, ListNamespacesRecursive, LocalChunkReader};
+use pbs_datastore::{
+ BackupManifest, DataStore, ListNamespacesRecursive, LocalChunkReader, NamespaceListFilter,
+};
use crate::backup::ListAccessibleBackupGroups;
use crate::server::jobstate::Job;
@@ -425,6 +427,7 @@ impl SyncSource for LocalSource {
self.store.clone(),
self.ns.clone(),
max_depth.unwrap_or(MAX_NAMESPACE_DEPTH),
+ NamespaceListFilter::Active,
)?
.collect();
--
2.39.5
More information about the pbs-devel
mailing list