[pbs-devel] [PATCH proxmox-backup v13 08/26] api: removable datastore creation
Fabian Grünbichler
f.gruenbichler at proxmox.com
Thu Nov 21 15:22:47 CET 2024
On November 13, 2024 4:00 pm, Hannes Laimer wrote:
> Devices can contains multiple datastores, the only limitations is that
> they are not allowed to be nested.
> If the specified path already contains a datastore, `reuse datastore` has
> to be set so it'll be added without creating a chunckstore.
>
> Signed-off-by: Hannes Laimer <h.laimer at proxmox.com>
> ---
> changes since v12:
> * use recently added 'reuse datastore'
> * allow creation even if device is already used by datastore, just no
> nesting
>
> src/api2/config/datastore.rs | 50 +++++++++++++++++++++++++++++++-----
> 1 file changed, 44 insertions(+), 6 deletions(-)
>
> diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
> index 374c302f..9140a7a4 100644
> --- a/src/api2/config/datastore.rs
> +++ b/src/api2/config/datastore.rs
> @@ -20,7 +20,8 @@ use pbs_config::BackupLockGuard;
> use pbs_datastore::chunk_store::ChunkStore;
>
> use crate::api2::admin::{
> - prune::list_prune_jobs, sync::list_sync_jobs, verify::list_verification_jobs,
> + datastore::do_mount_device, prune::list_prune_jobs, sync::list_sync_jobs,
> + verify::list_verification_jobs,
> };
> use crate::api2::config::prune::{delete_prune_job, do_create_prune_job};
> use crate::api2::config::sync::delete_sync_job;
> @@ -31,6 +32,7 @@ use pbs_config::CachedUserInfo;
> use proxmox_rest_server::WorkerTask;
>
> use crate::server::jobstate;
> +use crate::tools::disks::unmount_by_mountpoint;
>
> #[api(
> input: {
> @@ -72,7 +74,11 @@ pub(crate) fn do_create_datastore(
> datastore: DataStoreConfig,
> reuse_datastore: bool,
> ) -> Result<(), Error> {
> - let path: PathBuf = datastore.path.clone().into();
> + let path: PathBuf = datastore.absolute_path().into();
> + let need_unmount = datastore.get_mount_point().is_some() && {
nit: would be easier to read as
let need_unmount = ;
if need_unmount {do_mount_device(..)?; }
> + do_mount_device(datastore.clone())?;
> + true
> + };
>
> if path.parent().is_none() {
> bail!("cannot create datastore in root path");
this can fail (well, not really for a removable datastore), but also
some parsing code between this
> @@ -84,24 +90,32 @@ pub(crate) fn do_create_datastore(
and this, and this repeats below as well..
it might be better to wrap most of the body after the mounting, check
for any error, then do the cleanup/unmounting in one place?
> )?;
>
> if reuse_datastore {
> - ChunkStore::verify_chunkstore(&path)?;
> + if let Err(e) = ChunkStore::verify_chunkstore(&path) {
> + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok();
> + return Err(e);
> + }
then this
> } else {
> if let Ok(dir) = std::fs::read_dir(&path) {
> for file in dir {
> let name = file?.file_name();
> if !name.to_str().map_or(false, |name| name.starts_with('.')) {
> + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok();
> bail!("datastore path is not empty");
and this
> }
> }
> }
> let backup_user = pbs_config::backup_user()?;
> - let _store = ChunkStore::create(
> + let res = ChunkStore::create(
> &datastore.name,
> - path,
> + path.clone(),
> backup_user.uid,
> backup_user.gid,
> tuning.sync_level.unwrap_or_default(),
> - )?;
> + );
> + if let Err(e) = res {
> + let _ = need_unmount && unmount_by_mountpoint(&path).is_ok();
and this could all just return/bubble up the error, and the cleanup
logic lives on call level higher..
> + return Err(e);
> + }
> }
>
> config.set_data(&datastore.name, "datastore", &datastore)?;
> @@ -145,6 +159,30 @@ pub fn create_datastore(
> param_bail!("name", "datastore '{}' already exists.", config.name);
> }
>
> + if !config.path.starts_with("/") {
> + param_bail!("path", "expected an abolute path, '{}' is not", config.path);
> + }
but the schema is now updated to allow relative paths for removable
datastores? doesn't this need another condition to only apply for
removable datastores? I guess this was only tested via the
create_datastore_disk code path, which calls do_create_datastore
directly, and not this API endpoint..
> +
> + if let Some(uuid) = &config.backing_device {
but this here should apply to all datastores? it causes GC confusion
also for regular ones if they get nested.. and since this only affects
attempts to create datastores, it should be okay to make it fatal?
> + for (store_name, (_, store_config)) in §ion_config.sections {
> + if let (Some(store_uuid), Some(store_path)) = (
> + store_config["backing-device"].as_str(),
> + store_config["path"].as_str(),
> + ) {
> + // We don't allow two datastores to be nested in each other, so if
> + // ds1: /a/b -> can't create new one at /, /a or /a/b/..., /a/c is fine
> + if store_uuid == uuid
> + && (store_path.starts_with(&config.path) || config.path.starts_with(store_path))
> + {
> + param_bail!(
> + "path",
> + "can't nest datastores, '{store_name}' already in '{store_path}'",
"nested datastores not allowed: "
is a bit easier/nicer to read I think
> + );
> + }
> + };
> + }
> + }
> +
> let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
> let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
>
> --
> 2.39.5
>
>
>
> _______________________________________________
> pbs-devel mailing list
> pbs-devel at lists.proxmox.com
> https://lists.proxmox.com/cgi-bin/mailman/listinfo/pbs-devel
>
>
>
More information about the pbs-devel
mailing list