[pbs-devel] [PATCH proxmox-backup 1/2] fix #6195: api: datastore: add endpoint for moving namespaces
Hannes Laimer
h.laimer at proxmox.com
Wed Sep 3 12:49:03 CEST 2025
On 03.09.25 12:40, Shannon Sterz wrote:
> On Wed Sep 3, 2025 at 12:11 PM CEST, Hannes Laimer wrote:
>> Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
>> ---
>> pbs-datastore/src/datastore.rs | 121 +++++++++++++++++++++++++++++++++
>> src/api2/admin/namespace.rs | 43 +++++++++++-
>> 2 files changed, 162 insertions(+), 2 deletions(-)
>>
>> diff --git a/pbs-datastore/src/datastore.rs b/pbs-datastore/src/datastore.rs
>> index 7cf020fc..531927ff 100644
>> --- a/pbs-datastore/src/datastore.rs
>> +++ b/pbs-datastore/src/datastore.rs
>> @@ -706,6 +706,127 @@ impl DataStore {
>> Ok(ns)
>> }
>>
>> + pub fn namespace_move(
>> + self: &Arc<Self>,
>> + ns: BackupNamespace,
>> + mut target: BackupNamespace,
>> + ) -> Result<(), Error> {
>> + log::info!(
>> + "moving ns '{}' to ns '{}' in datastore {}",
>> + ns,
>> + target,
>> + self.name()
>> + );
>> + if ns.is_root() {
>> + bail!("can't move root");
>> + }
>> + if !self.namespace_exists(&ns) {
>> + bail!("cannot move a namespace that does not exist");
>> + }
>> + if !self.namespace_exists(&target) {
>> + bail!("cannot move to a namespace that does not exist");
>> + }
>> + let Some(base_name) = ns.clone().pop() else {
>> + bail!("source namespace can't be root");
>> + };
>> + target.push(base_name.clone())?;
>> + if self.namespace_exists(&target) {
>> + bail!("'{}' already exists", target);
>> + }
>> +
>> + let source_path = self.namespace_path(&ns);
>> + let target_path = self.namespace_path(&target);
>> + if target_path.starts_with(&source_path) {
>> + bail!("cannot move namespace into one of its sub-namespaces");
>> + }
>> +
>> + let iter = self.recursive_iter_backup_ns_ok(ns.to_owned(), None)?;
>> + let source_depth = ns.depth();
>> + let source_height = iter.map(|ns| ns.depth() - source_depth).max().unwrap_or(1);
>> + target.check_max_depth(source_height - 1)?;
>> +
>> + if let DatastoreBackend::S3(s3_client) = self.backend()? {
>> + let src_ns_rel = ns.path();
>> + let src_ns_rel_str = src_ns_rel
>> + .to_str()
>> + .ok_or_else(|| format_err!("invalid source namespace path"))?;
>> +
>> + let dst_ns_rel = target.path();
>> + let dst_ns_rel_str = dst_ns_rel
>> + .to_str()
>> + .ok_or_else(|| format_err!("invalid destination namespace path"))?;
>> +
>> + let list_prefix = S3PathPrefix::Some(format!("{S3_CONTENT_PREFIX}/{src_ns_rel_str}"));
>> + let mut next_continuation_token: Option<String> = None;
>> + let store_prefix = format!("{}/{S3_CONTENT_PREFIX}/", self.name());
>> +
>> + loop {
>> + let list_objects_result = proxmox_async::runtime::block_on(
>> + s3_client.list_objects_v2(&list_prefix, next_continuation_token.as_deref()),
>> + )?;
>> +
>> + let objects_to_move: Vec<S3ObjectKey> = list_objects_result
>> + .contents
>> + .iter()
>> + .map(|c| c.key.clone())
>> + .collect();
>> +
>> + for object_key in objects_to_move {
>> + let object_key_str = object_key.to_string();
>> + let object_path =
>> + object_key_str.strip_prefix(&store_prefix).ok_or_else(|| {
>> + format_err!(
>> + "failed to strip store prefix '{}' from object key '{}'",
>> + store_prefix,
>> + object_key_str
>> + )
>> + })?;
>> +
>> + let src_key_rel = S3ObjectKey::try_from(
>> + format!("{S3_CONTENT_PREFIX}/{}", object_path).as_str(),
>> + )?;
>> +
>> + let src_rel_with_sep = format!("{}/", src_ns_rel_str);
>> + let rest = object_path
>> + .strip_prefix(&src_rel_with_sep)
>> + .ok_or_else(|| format_err!("unexpected object path: {}", object_path))?;
>> +
>> + let dest_rel_path = format!("{}/{}", dst_ns_rel_str, rest);
>> + let dest_rel_path = std::path::Path::new(&dest_rel_path);
>> + let file_name = dest_rel_path
>> + .file_name()
>> + .and_then(|n| n.to_str())
>> + .ok_or_else(|| format_err!("invalid destination object file name"))?;
>> + let parent = dest_rel_path
>> + .parent()
>> + .unwrap_or_else(|| std::path::Path::new(""));
>> + let dest_key = crate::s3::object_key_from_path(parent, file_name)?;
>> +
>> + proxmox_async::runtime::block_on(
>> + s3_client.copy_object(src_key_rel.clone(), dest_key),
>> + )?;
>> + }
>> +
>> + if list_objects_result.is_truncated {
>> + next_continuation_token = list_objects_result.next_continuation_token;
>> + } else {
>> + break;
>> + }
>> + }
>> +
>> + // delete objects under the old namespace prefix
>> + let delete_error =
>> + proxmox_async::runtime::block_on(s3_client.delete_objects_by_prefix(&list_prefix))?;
>> + if delete_error {
>> + bail!("deleting objects under old namespace prefix failed");
>> + }
>> + }
>> +
>> + std::fs::create_dir_all(&target_path)?;
>> + std::fs::rename(source_path, target_path)?;
>
> hm won't this break running back up tasks that assume they can write
> manifests/index into an existing location?
>
you're right. I'm not sure why, by in my head
`DataStore::lookup_datastore(&store, Some(Operation::Write))`
behaved like a lock, but it obviously does not.
Setting a maintenance mode and waiting for running operations should
fix this. I'll send a v2.
Thanks for taking a look!
>> + Ok(())
>> + }
>> +
>> /// Returns if the given namespace exists on the datastore
>> pub fn namespace_exists(&self, ns: &BackupNamespace) -> bool {
>> let mut path = self.base_path();
>> diff --git a/src/api2/admin/namespace.rs b/src/api2/admin/namespace.rs
>> index 6cf88d89..71992098 100644
>> --- a/src/api2/admin/namespace.rs
>> +++ b/src/api2/admin/namespace.rs
>> @@ -6,7 +6,7 @@ use proxmox_schema::*;
>>
>> use pbs_api_types::{
>> Authid, BackupGroupDeleteStats, BackupNamespace, NamespaceListItem, Operation,
>> - DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PROXMOX_SAFE_ID_FORMAT,
>> + DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_MODIFY, PROXMOX_SAFE_ID_FORMAT,
>> };
>>
>> use pbs_datastore::DataStore;
>> @@ -123,6 +123,44 @@ pub fn list_namespaces(
>> Ok(namespace_list)
>> }
>>
>> +#[api(
>> + input: {
>> + properties: {
>> + store: { schema: DATASTORE_SCHEMA },
>> + ns: {
>> + type: BackupNamespace,
>> + },
>> + "target-ns": {
>> + type: BackupNamespace,
>> + optional: true,
>> + },
>> + },
>> + },
>> + access: {
>> + permission: &Permission::Anybody,
>> + description: "Requires DATASTORE_MODIFY on both the source /datastore/{store}/{ns} and\
>> + the target /datastore/{store}/[{target-ns}]",
>> + },
>> +)]
>> +/// Move a namespace into a different one. No target means the root namespace is the target.
>> +pub fn move_namespace(
>> + store: String,
>> + ns: BackupNamespace,
>> + target_ns: Option<BackupNamespace>,
>> + _info: &ApiMethod,
>> + rpcenv: &mut dyn RpcEnvironment,
>> +) -> Result<(), Error> {
>> + let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
>> + let target = target_ns.unwrap_or(BackupNamespace::root());
>> +
>> + check_ns_modification_privs(&store, &ns, &auth_id)?;
>> + check_ns_privs(&store, &target, &auth_id, PRIV_DATASTORE_MODIFY)?;
>> +
>> + let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
>> +
>> + datastore.namespace_move(ns, target)
>> +}
>> +
>> #[api(
>> input: {
>> properties: {
>> @@ -192,4 +230,5 @@ pub fn delete_namespace(
>> pub const ROUTER: Router = Router::new()
>> .get(&API_METHOD_LIST_NAMESPACES)
>> .post(&API_METHOD_CREATE_NAMESPACE)
>> - .delete(&API_METHOD_DELETE_NAMESPACE);
>> + .delete(&API_METHOD_DELETE_NAMESPACE)
>> + .subdirs(&[("move", &Router::new().post(&API_METHOD_MOVE_NAMESPACE))]);
>
More information about the pbs-devel
mailing list