[pbs-devel] [PATCH proxmox-backup v3 1/2] fix #5439: allow to reuse existing datastore
Gabriel Goller
g.goller at proxmox.com
Thu Jul 18 14:29:48 CEST 2024
Disallow creating datastores in non-empty directories. Allow adding
existing datastores via a 'reuse-datastore' checkmark. This only checks
if all the necessary directories (.chunks + subdirectories and .lock)
exist and have the correct permissions. Note that the reuse-datastore
path does not open the datastore, so that we don't drop the
ProcessLocker of an existing datastore.
Signed-off-by: Gabriel Goller <g.goller at proxmox.com>
---
v3, thanks @Fabian:
- don't open chunkstore on existing datastore, as this drops the
previous ProcessLocker
- factor out `ChunkStore::open` checks and call them in reuse-datastore
path as well
v2, thanks @Fabian:
- also check on frontend for root
- forbid datastore creation if dir not empty
- add reuse-datastore option
- verify chunkstore directories permissions and owners
pbs-datastore/src/chunk_store.rs | 75 +++++++++++++++++++++++++++-----
src/api2/config/datastore.rs | 52 +++++++++++++++++-----
src/api2/node/disks/directory.rs | 4 +-
src/api2/node/disks/zfs.rs | 4 +-
4 files changed, 112 insertions(+), 23 deletions(-)
diff --git a/pbs-datastore/src/chunk_store.rs b/pbs-datastore/src/chunk_store.rs
index dd0061ea56ca..1a150fd92579 100644
--- a/pbs-datastore/src/chunk_store.rs
+++ b/pbs-datastore/src/chunk_store.rs
@@ -156,27 +156,35 @@ impl ChunkStore {
lockfile_path
}
+ /// Check if the chunkstore path is absolute and that we can
+ /// access it. Returns the absolute '.chunks' path on success.
+ pub fn chunk_dir_accessible(base: &Path) -> Result<PathBuf, Error> {
+ if !base.is_absolute() {
+ bail!("expected absolute path - got {:?}", base);
+ }
+
+ let chunk_dir = Self::chunk_dir(base);
+
+ if let Err(err) = std::fs::metadata(&chunk_dir) {
+ bail!("unable to open chunk store at {chunk_dir:?} - {err}");
+ }
+
+ Ok(chunk_dir)
+ }
+
/// Opens the chunk store with a new process locker.
///
/// Note that this must be used with care, as it's dangerous to create two instances on the
/// same base path, as closing the underlying ProcessLocker drops all locks from this process
/// on the lockfile (even if separate FDs)
- pub(crate) fn open<P: Into<PathBuf>>(
+ pub fn open<P: Into<PathBuf>>(
name: &str,
base: P,
sync_level: DatastoreFSyncLevel,
) -> Result<Self, Error> {
let base: PathBuf = base.into();
- if !base.is_absolute() {
- bail!("expected absolute path - got {:?}", base);
- }
-
- let chunk_dir = Self::chunk_dir(&base);
-
- if let Err(err) = std::fs::metadata(&chunk_dir) {
- bail!("unable to open chunk store '{name}' at {chunk_dir:?} - {err}");
- }
+ let chunk_dir = ChunkStore::chunk_dir_accessible(&base)?;
let lockfile_path = Self::lockfile_path(&base);
@@ -561,6 +569,53 @@ impl ChunkStore {
// unwrap: only `None` in unit tests
ProcessLocker::try_exclusive_lock(self.locker.clone().unwrap())
}
+
+ /// Checks permissions and owner of passed path.
+ fn check_permissions<T: AsRef<Path>>(path: T, file_mode: u32) -> Result<(), Error> {
+ match nix::sys::stat::stat(path.as_ref()) {
+ Ok(stat) => {
+ if stat.st_uid != u32::from(pbs_config::backup_user()?.uid)
+ || stat.st_gid != u32::from(pbs_config::backup_group()?.gid)
+ || stat.st_mode & 0o700 != file_mode
+ {
+ bail!(
+ "unable to open existing chunk store path {:?} - permissions or owner not correct",
+ path.as_ref(),
+ );
+ }
+ }
+ Err(err) => {
+ bail!(
+ "unable to open existing chunk store path {:?} - {err}",
+ path.as_ref(),
+ );
+ }
+ }
+ Ok(())
+ }
+
+ /// Verify vital files in datastore. Checks the owner and permissions of: the chunkstore, it's
+ /// subdirectories and the lock file.
+ pub fn verify_chunkstore<T: AsRef<Path>>(path: T) -> Result<(), Error> {
+ // Check datastore root path perm/owner
+ ChunkStore::check_permissions(path.as_ref(), 0o700)?;
+
+ let chunk_dir = Self::chunk_dir(path.as_ref());
+ // Check datastore .chunks path perm/owner
+ ChunkStore::check_permissions(&chunk_dir, 0o700)?;
+
+ // Check all .chunks subdirectories
+ for i in 0..64 * 1024 {
+ let mut l1path = chunk_dir.clone();
+ l1path.push(format!("{:04x}", i));
+ ChunkStore::check_permissions(&l1path, 0o700)?;
+ }
+
+ // Check .lock file
+ let lockfile_path = Self::lockfile_path(path.as_ref());
+ ChunkStore::check_permissions(lockfile_path, 0o600)?;
+ Ok(())
+ }
}
#[test]
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index dff09a6f8e19..eeb78e77404a 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -1,7 +1,7 @@
use std::path::PathBuf;
use ::serde::{Deserialize, Serialize};
-use anyhow::Error;
+use anyhow::{bail, Error};
use hex::FromHex;
use serde_json::Value;
use tracing::warn;
@@ -70,21 +70,44 @@ pub(crate) fn do_create_datastore(
_lock: BackupLockGuard,
mut config: SectionConfigData,
datastore: DataStoreConfig,
+ reuse_datastore: bool,
) -> Result<(), Error> {
let path: PathBuf = datastore.path.clone().into();
+ if path.parent().is_none() {
+ bail!("cannot create datastore in root path");
+ }
+
let tuning: DatastoreTuning = serde_json::from_value(
DatastoreTuning::API_SCHEMA
.parse_property_string(datastore.tuning.as_deref().unwrap_or(""))?,
)?;
- let backup_user = pbs_config::backup_user()?;
- let _store = ChunkStore::create(
- &datastore.name,
- path,
- backup_user.uid,
- backup_user.gid,
- tuning.sync_level.unwrap_or_default(),
- )?;
+
+ if reuse_datastore {
+ ChunkStore::verify_chunkstore(&path)?;
+ ChunkStore::chunk_dir_accessible(&path)?;
+ } else {
+ let datastore_empty = std::fs::read_dir(path.clone()).map_or(true, |mut d| {
+ d.all(|dir| {
+ dir.map_or(false, |file| {
+ file.file_name()
+ .to_str()
+ .map_or(false, |name| name.starts_with('.'))
+ })
+ })
+ });
+ if !datastore_empty {
+ bail!("path not empty!");
+ }
+ let backup_user = pbs_config::backup_user()?;
+ let _store = ChunkStore::create(
+ &datastore.name,
+ path,
+ backup_user.uid,
+ backup_user.gid,
+ tuning.sync_level.unwrap_or_default(),
+ )?;
+ }
config.set_data(&datastore.name, "datastore", &datastore)?;
@@ -101,6 +124,12 @@ pub(crate) fn do_create_datastore(
type: DataStoreConfig,
flatten: true,
},
+ "reuse-datastore": {
+ type: Boolean,
+ optional: true,
+ default: false,
+ description: "Re-use existing datastore directory."
+ }
},
},
access: {
@@ -110,6 +139,7 @@ pub(crate) fn do_create_datastore(
/// Create new datastore config.
pub fn create_datastore(
config: DataStoreConfig,
+ reuse_datastore: bool,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let lock = pbs_config::datastore::lock_config()?;
@@ -153,8 +183,8 @@ pub fn create_datastore(
Some(config.name.to_string()),
auth_id.to_string(),
to_stdout,
- move |_worker| {
- do_create_datastore(lock, section_config, config)?;
+ move |worker| {
+ do_create_datastore(lock, section_config, config, reuse_datastore)?;
if let Some(prune_job_config) = prune_job_config {
do_create_prune_job(prune_job_config)
diff --git a/src/api2/node/disks/directory.rs b/src/api2/node/disks/directory.rs
index b3b4e12b4cb8..1ad9cc06415f 100644
--- a/src/api2/node/disks/directory.rs
+++ b/src/api2/node/disks/directory.rs
@@ -213,7 +213,9 @@ pub fn create_datastore_disk(
bail!("datastore '{}' already exists.", datastore.name);
}
- crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?;
+ crate::api2::config::datastore::do_create_datastore(
+ lock, config, datastore, false,
+ )?;
}
Ok(())
diff --git a/src/api2/node/disks/zfs.rs b/src/api2/node/disks/zfs.rs
index 469d5c606756..5fd3b3b7aec8 100644
--- a/src/api2/node/disks/zfs.rs
+++ b/src/api2/node/disks/zfs.rs
@@ -313,7 +313,9 @@ pub fn create_zpool(
bail!("datastore '{}' already exists.", datastore.name);
}
- crate::api2::config::datastore::do_create_datastore(lock, config, datastore)?;
+ crate::api2::config::datastore::do_create_datastore(
+ lock, config, datastore, false,
+ )?;
}
Ok(())
--
2.43.0
More information about the pbs-devel
mailing list