[pbs-devel] [PATCH proxmox-backup v5 1/2] fix #5439: allow to reuse existing datastore

Gabriel Goller g.goller at proxmox.com
Thu Aug 29 12:45:10 CEST 2024


Disallow creating datastores in non-empty directories. Allow adding
existing datastores via a 'reuse-datastore' checkmark. This only checks
if all the necessary directories (.chunks + subdirectories and .lock)
exist and have the correct permissions. Note that the reuse-datastore
path does not open the datastore, so that we don't drop the
ProcessLocker of an existing datastore.

Signed-off-by: Gabriel Goller <g.goller at proxmox.com>
---

v5, thanks @Wolfgang:
 - remove unnecessary call to chunk_dir_accessible()
 - match on exact permissions
 - remove unused worker reference

v4, thanks @Thomas:
 - move reuse-datastore checkbox to "advanced options"

v3, thanks @Fabian:
 - don't open chunkstore on existing datastore, as this drops the
   previous ProcessLocker
 - factor out `ChunkStore::open` checks and call them in reuse-datastore
   path as well

v2, thanks @Fabian:
 - also check on frontend for root
 - forbid datastore creation if dir not empty
 - add reuse-datastore option
 - verify chunkstore directories permissions and owners

 pbs-datastore/src/chunk_store.rs | 73 ++++++++++++++++++++++++++++----
 src/api2/config/datastore.rs     | 49 ++++++++++++++++-----
 src/api2/node/disks/directory.rs |  4 +-
 src/api2/node/disks/zfs.rs       |  4 +-
 4 files changed, 109 insertions(+), 21 deletions(-)

diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs
index dd0061ea56ca..35a066f24f8c 100644
--- a/pbs-datastore/src/chunk_store.rs
+++ b/pbs-datastore/src/chunk_store.rs
@@ -156,6 +156,22 @@ impl ChunkStore {
         lockfile_path
     }
 
+    /// Check if the chunkstore path is absolute and that we can
+    /// access it. Returns the absolute '.chunks' path on success.
+    pub fn chunk_dir_accessible(base: &Path) -> Result<PathBuf, Error> {
+        if !base.is_absolute() {
+            bail!("expected absolute path - got {:?}", base);
+        }
+
+        let chunk_dir = Self::chunk_dir(base);
+
+        if let Err(err) = std::fs::metadata(&chunk_dir) {
+            bail!("unable to open chunk store at {chunk_dir:?} - {err}");
+        }
+
+        Ok(chunk_dir)
+    }
+
     /// Opens the chunk store with a new process locker.
     ///
     /// Note that this must be used with care, as it's dangerous to create two instances on the
@@ -168,15 +184,7 @@ impl ChunkStore {
     ) -> Result<Self, Error> {
         let base: PathBuf = base.into();
 
-        if !base.is_absolute() {
-            bail!("expected absolute path - got {:?}", base);
-        }
-
-        let chunk_dir = Self::chunk_dir(&base);
-
-        if let Err(err) = std::fs::metadata(&chunk_dir) {
-            bail!("unable to open chunk store '{name}' at {chunk_dir:?} - {err}");
-        }
+        let chunk_dir = ChunkStore::chunk_dir_accessible(&base)?;
 
         let lockfile_path = Self::lockfile_path(&base);
 
@@ -561,6 +569,53 @@ impl ChunkStore {
         // unwrap: only `None` in unit tests
         ProcessLocker::try_exclusive_lock(self.locker.clone().unwrap())
     }
+
+    /// Checks permissions and owner of passed path.
+    fn check_permissions<T: AsRef<Path>>(path: T, file_mode: u32) -> Result<(), Error> {
+        match nix::sys::stat::stat(path.as_ref()) {
+            Ok(stat) => {
+                if stat.st_uid != u32::from(pbs_config::backup_user()?.uid)
+                    || stat.st_gid != u32::from(pbs_config::backup_group()?.gid)
+                    || stat.st_mode != file_mode
+                {
+                    bail!(
+                            "unable to open existing chunk store path {:?} - permissions or owner not correct",
+                            path.as_ref(),
+                        );
+                }
+            }
+            Err(err) => {
+                bail!(
+                    "unable to open existing chunk store path {:?} - {err}",
+                    path.as_ref(),
+                );
+            }
+        }
+        Ok(())
+    }
+
+    /// Verify vital files in datastore. Checks the owner and permissions of: the chunkstore, it's
+    /// subdirectories and the lock file.
+    pub fn verify_chunkstore<T: AsRef<Path>>(path: T) -> Result<(), Error> {
+        // Check datastore root path perm/owner
+        ChunkStore::check_permissions(path.as_ref(), 0o700)?;
+
+        let chunk_dir = Self::chunk_dir(path.as_ref());
+        // Check datastore .chunks path perm/owner
+        ChunkStore::check_permissions(&chunk_dir, 0o700)?;
+
+        // Check all .chunks subdirectories
+        for i in 0..64 * 1024 {
+            let mut l1path = chunk_dir.clone();
+            l1path.push(format!("{:04x}", i));
+            ChunkStore::check_permissions(&l1path, 0o700)?;
+        }
+
+        // Check .lock file
+        let lockfile_path = Self::lockfile_path(path.as_ref());
+        ChunkStore::check_permissions(lockfile_path, 0o600)?;
+        Ok(())
+    }
 }
 
 #[test]
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index ca6edf05acd1..87a1f6bc4fca 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -1,7 +1,7 @@
 use std::path::PathBuf;
 
 use ::serde::{Deserialize, Serialize};
-use anyhow::Error;
+use anyhow::{bail, Error};
 use hex::FromHex;
 use serde_json::Value;
 use tracing::warn;
@@ -70,21 +70,43 @@ pub(crate) fn do_create_datastore(
     _lock: BackupLockGuard,
     mut config: SectionConfigData,
     datastore: DataStoreConfig,
+    reuse_datastore: bool,
 ) -> Result<(), Error> {
     let path: PathBuf = datastore.path.clone().into();
 
+    if path.parent().is_none() {
+        bail!("cannot create datastore in root path");
+    }
+
     let tuning: DatastoreTuning = serde_json::from_value(
         DatastoreTuning::API_SCHEMA
             .parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?,
     )?;
-    let backup_user = pbs_config::backup_user()?;
-    let _store = ChunkStore::create(
-        &datastore.name,
-        path,
-        backup_user.uid,
-        backup_user.gid,
-        tuning.sync_level.unwrap_or_default(),
-    )?;
+
+    if reuse_datastore {
+        ChunkStore::verify_chunkstore(&path)?;
+    } else {
+        let datastore_empty = std::fs::read_dir(path.clone()).map_or(true, |mut d| {
+            d.all(|dir| {
+                dir.map_or(false, |file| {
+                    file.file_name()
+                        .to_str()
+                        .map_or(false, |name| name.starts_with('.'))
+                })
+            })
+        });
+        if !datastore_empty {
+            bail!("path not empty!");
+        }
+        let backup_user = pbs_config::backup_user()?;
+        let _store = ChunkStore::create(
+            &datastore.name,
+            path,
+            backup_user.uid,
+            backup_user.gid,
+            tuning.sync_level.unwrap_or_default(),
+        )?;
+    }
 
     config.set_data(&datastore.name, "datastore", &datastore)?;
 
@@ -101,6 +123,12 @@ pub(crate) fn do_create_datastore(
                 type: DataStoreConfig,
                 flatten: true,
             },
+            "reuse-datastore": {
+                type: Boolean,
+                optional: true,
+                default: false,
+                description: "Re-use existing datastore directory."
+            }
         },
     },
     access: {
@@ -110,6 +138,7 @@ pub(crate) fn do_create_datastore(
 /// Create new datastore config.
 pub fn create_datastore(
     config: DataStoreConfig,
+    reuse_datastore: bool,
     rpcenv: &mut dyn RpcEnvironment,
 ) -> Result<String, Error> {
     let lock = pbs_config::datastore::lock_config()?;
@@ -154,7 +183,7 @@ pub fn create_datastore(
         auth_id.to_string(),
         to_stdout,
         move |_worker| {
-            do_create_datastore(lock, section_config, config)?;
+            do_create_datastore(lock, section_config, config, reuse_datastore)?;
 
             if let Some(prune_job_config) = prune_job_config {
                 do_create_prune_job(prune_job_config)
diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index 2aa9571d7150..fe6dccaad57b 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -213,7 +213,9 @@ pub fn create_datastore_disk(
                     bail!("datastore '{}' already exists.", datastore.name);
                 }
 
-                crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?;
+                crate::api2::config::datastore::do_create_datastore(
+                    lock, config, datastore, false,
+                )?;
             }
 
             Ok(())
diff --git a/src/api2/node/disks/zfs.rs b/src/api2/node/disks/zfs.rs
index 666fe3e82cfe..a31320c5451a 100644
--- a/src/api2/node/disks/zfs.rs
+++ b/src/api2/node/disks/zfs.rs
@@ -313,7 +313,9 @@ pub fn create_zpool(
                     bail!("datastore '{}' already exists.", datastore.name);
                 }
 
-                crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?;
+                crate::api2::config::datastore::do_create_datastore(
+                    lock, config, datastore, false,
+                )?;
             }
 
             Ok(())
-- 
2.39.2





More information about the pbs-devel mailing list