[pbs-devel] [PATCH proxmox-backup rebase 02/15] move ApiConfig, FileLogger and CommandoSocket to pbs-server workspace
Dietmar Maurer
dietmar at proxmox.com
Mon Sep 20 11:13:27 CEST 2021
ApiConfig: avoid using pbs_config::backup_user()
CommandoSocket: avoid using pbs_config::backup_user()
FileLogger: avoid using pbs_config::backup_user()
- use atomic_open_or_create_file()
Auth Trait: moved definitions to pbs-server/src/lib.rs
- removed CachedUserInfo patrameter
- return user as String (not Authid)
---
pbs-server/Cargo.toml | 15 ++++++
.../config.rs => pbs-server/src/api_config.rs | 13 +++--
.../src}/command_socket.rs | 18 ++++---
{src/tools => pbs-server/src}/file_logger.rs | 46 +++++++++-------
pbs-server/src/lib.rs | 54 +++++++++++++++++++
{src/server => pbs-server/src}/state.rs | 4 +-
src/api2/admin/datastore.rs | 2 +-
src/api2/node/mod.rs | 4 +-
src/backup/datastore.rs | 3 +-
src/backup/verify.rs | 6 +--
src/bin/proxmox-backup-api.rs | 23 +++++---
src/bin/proxmox-backup-proxy.rs | 37 ++++++++-----
src/bin/proxmox-restore-daemon.rs | 4 +-
src/bin/proxmox_restore_daemon/auth.rs | 10 ++--
src/server/auth.rs | 33 +++---------
src/server/mod.rs | 13 +----
src/server/rest.rs | 25 +++++----
src/server/worker_task.rs | 22 ++++----
src/tools/daemon.rs | 9 ++--
src/tools/mod.rs | 24 ---------
tests/worker-task-abort.rs | 9 ++--
21 files changed, 210 insertions(+), 164 deletions(-)
rename src/server/config.rs => pbs-server/src/api_config.rs (92%)
rename {src/server => pbs-server/src}/command_socket.rs (94%)
rename {src/tools => pbs-server/src}/file_logger.rs (81%)
rename {src/server => pbs-server/src}/state.rs (97%)
diff --git a/pbs-server/Cargo.toml b/pbs-server/Cargo.toml
index e933c1e6..366a6e08 100644
--- a/pbs-server/Cargo.toml
+++ b/pbs-server/Cargo.toml
@@ -7,3 +7,18 @@ description = "REST server implementation"
[dependencies]
anyhow = "1.0"
+futures = "0.3"
+handlebars = "3.0"
+http = "0.2"
+hyper = { version = "0.14", features = [ "full" ] }
+lazy_static = "1.4"
+libc = "0.2"
+nix = "0.19.1"
+serde = { version = "1.0", features = [] }
+serde_json = "1.0"
+tokio = { version = "1.6", features = ["signal", "process"] }
+
+proxmox = { version = "0.13.0", features = [ "router"] }
+
+# fixme: remove this dependency (pbs_tools::broadcast_future)
+pbs-tools = { path = "../pbs-tools" }
diff --git a/src/server/config.rs b/pbs-server/src/api_config.rs
similarity index 92%
rename from src/server/config.rs
rename to pbs-server/src/api_config.rs
index 195d7a88..a319e204 100644
--- a/src/server/config.rs
+++ b/pbs-server/src/api_config.rs
@@ -12,8 +12,7 @@ use serde::Serialize;
use proxmox::api::{ApiMethod, Router, RpcEnvironmentType};
use proxmox::tools::fs::{create_path, CreateOptions};
-use crate::tools::{FileLogger, FileLogOptions};
-use super::auth::ApiAuth;
+use crate::{ApiAuth, FileLogger, FileLogOptions, CommandoSocket};
pub struct ApiConfig {
basedir: PathBuf,
@@ -134,7 +133,9 @@ impl ApiConfig {
pub fn enable_file_log<P>(
&mut self,
path: P,
- commando_sock: &mut super::CommandoSocket,
+ dir_opts: Option<CreateOptions>,
+ file_opts: Option<CreateOptions>,
+ commando_sock: &mut CommandoSocket,
) -> Result<(), Error>
where
P: Into<PathBuf>
@@ -142,15 +143,13 @@ impl ApiConfig {
let path: PathBuf = path.into();
if let Some(base) = path.parent() {
if !base.exists() {
- let backup_user = pbs_config::backup_user()?;
- let opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
- create_path(base, None, Some(opts)).map_err(|err| format_err!("{}", err))?;
+ create_path(base, None, dir_opts).map_err(|err| format_err!("{}", err))?;
}
}
let logger_options = FileLogOptions {
append: true,
- owned_by_backup: true,
+ file_opts: file_opts.unwrap_or(CreateOptions::default()),
..Default::default()
};
let request_log = Arc::new(Mutex::new(FileLogger::new(&path, logger_options)?));
diff --git a/src/server/command_socket.rs b/pbs-server/src/command_socket.rs
similarity index 94%
rename from src/server/command_socket.rs
rename to pbs-server/src/command_socket.rs
index e3bd0c12..1d62d21d 100644
--- a/src/server/command_socket.rs
+++ b/pbs-server/src/command_socket.rs
@@ -10,17 +10,17 @@ use tokio::net::UnixListener;
use serde::Serialize;
use serde_json::Value;
use nix::sys::socket;
+use nix::unistd::Gid;
-/// Listens on a Unix Socket to handle simple command asynchronously
-fn create_control_socket<P, F>(path: P, func: F) -> Result<impl Future<Output = ()>, Error>
+// Listens on a Unix Socket to handle simple command asynchronously
+fn create_control_socket<P, F>(path: P, gid: Gid, func: F) -> Result<impl Future<Output = ()>, Error>
where
P: Into<PathBuf>,
F: Fn(Value) -> Result<Value, Error> + Send + Sync + 'static,
{
let path: PathBuf = path.into();
- let backup_user = pbs_config::backup_user()?;
- let backup_gid = backup_user.gid.as_raw();
+ let gid = gid.as_raw();
let socket = UnixListener::bind(&path)?;
@@ -47,7 +47,7 @@ where
// check permissions (same gid, root user, or backup group)
let mygid = unsafe { libc::getgid() };
- if !(cred.uid() == 0 || cred.gid() == mygid || cred.gid() == backup_gid) {
+ if !(cred.uid() == 0 || cred.gid() == mygid || cred.gid() == gid) {
eprintln!("no permissions for {:?}", cred);
continue;
}
@@ -93,7 +93,7 @@ where
}
}.boxed();
- let abort_future = super::last_worker_future().map_err(|_| {});
+ let abort_future = crate::last_worker_future().map_err(|_| {});
let task = futures::future::select(
control_future,
abort_future,
@@ -154,15 +154,17 @@ pub type CommandoSocketFn = Box<(dyn Fn(Option<&Value>) -> Result<Value, Error>
/// You need to call `spawn()` to make the socket active.
pub struct CommandoSocket {
socket: PathBuf,
+ gid: Gid,
commands: HashMap<String, CommandoSocketFn>,
}
impl CommandoSocket {
- pub fn new<P>(path: P) -> Self
+ pub fn new<P>(path: P, gid: Gid) -> Self
where P: Into<PathBuf>,
{
CommandoSocket {
socket: path.into(),
+ gid,
commands: HashMap::new(),
}
}
@@ -170,7 +172,7 @@ impl CommandoSocket {
/// Spawn the socket and consume self, meaning you cannot register commands anymore after
/// calling this.
pub fn spawn(self) -> Result<(), Error> {
- let control_future = create_control_socket(self.socket.to_owned(), move |param| {
+ let control_future = create_control_socket(self.socket.to_owned(), self.gid, move |param| {
let param = param
.as_object()
.ok_or_else(|| format_err!("unable to parse parameters (expected json object)"))?;
diff --git a/src/tools/file_logger.rs b/pbs-server/src/file_logger.rs
similarity index 81%
rename from src/tools/file_logger.rs
rename to pbs-server/src/file_logger.rs
index 5b8db2c5..9755f987 100644
--- a/src/tools/file_logger.rs
+++ b/pbs-server/src/file_logger.rs
@@ -1,6 +1,10 @@
-use anyhow::Error;
use std::io::Write;
+use anyhow::Error;
+use nix::fcntl::OFlag;
+
+use proxmox::tools::fs::{CreateOptions, atomic_open_or_create_file};
+
/// Log messages with optional automatically added timestamps into files
///
/// Logs messages to file, and optionally to standard output.
@@ -9,8 +13,7 @@ use std::io::Write;
/// #### Example:
/// ```
/// # use anyhow::{bail, format_err, Error};
-/// use proxmox_backup::flog;
-/// use proxmox_backup::tools::{FileLogger, FileLogOptions};
+/// use pbs_server::{flog, FileLogger, FileLogOptions};
///
/// # std::fs::remove_file("test.log");
/// let options = FileLogOptions {
@@ -23,7 +26,7 @@ use std::io::Write;
/// # std::fs::remove_file("test.log");
/// ```
-#[derive(Debug, Default)]
+#[derive(Default)]
/// Options to control the behavior of a ['FileLogger'] instance
pub struct FileLogOptions {
/// Open underlying log file in append mode, useful when multiple concurrent processes
@@ -39,13 +42,11 @@ pub struct FileLogOptions {
pub to_stdout: bool,
/// Prefix messages logged to the file with the current local time as RFC 3339
pub prefix_time: bool,
- /// if set, the file is tried to be chowned by the backup:backup user/group
- /// Note, this is not designed race free as anybody could set it to another user afterwards
- /// anyway. It must thus be used by all processes which doe not run as backup uid/gid.
- pub owned_by_backup: bool,
+ /// File owner/group and mode
+ pub file_opts: CreateOptions,
+
}
-#[derive(Debug)]
pub struct FileLogger {
file: std::fs::File,
file_name: std::path::PathBuf,
@@ -82,19 +83,24 @@ impl FileLogger {
file_name: P,
options: &FileLogOptions,
) -> Result<std::fs::File, Error> {
- let file = std::fs::OpenOptions::new()
- .read(options.read)
- .write(true)
- .append(options.append)
- .create_new(options.exclusive)
- .create(!options.exclusive)
- .open(&file_name)?;
-
- if options.owned_by_backup {
- let backup_user = pbs_config::backup_user()?;
- nix::unistd::chown(file_name.as_ref(), Some(backup_user.uid), Some(backup_user.gid))?;
+
+ let mut flags = OFlag::O_CLOEXEC;
+
+ if options.read {
+ flags |= OFlag::O_RDWR;
+ } else {
+ flags |= OFlag::O_WRONLY;
+ }
+
+ if options.append {
+ flags |= OFlag::O_APPEND;
+ }
+ if options.exclusive {
+ flags |= OFlag::O_EXCL;
}
+ let file = atomic_open_or_create_file(&file_name, flags, &[], options.file_opts.clone())?;
+
Ok(file)
}
diff --git a/pbs-server/src/lib.rs b/pbs-server/src/lib.rs
index e69de29b..38dd610c 100644
--- a/pbs-server/src/lib.rs
+++ b/pbs-server/src/lib.rs
@@ -0,0 +1,54 @@
+use anyhow::{bail, Error};
+
+mod state;
+pub use state::*;
+
+mod command_socket;
+pub use command_socket::*;
+
+mod file_logger;
+pub use file_logger::{FileLogger, FileLogOptions};
+
+mod api_config;
+pub use api_config::ApiConfig;
+
+pub enum AuthError {
+ Generic(Error),
+ NoData,
+}
+
+impl From<Error> for AuthError {
+ fn from(err: Error) -> Self {
+ AuthError::Generic(err)
+ }
+}
+
+pub trait ApiAuth {
+ fn check_auth(
+ &self,
+ headers: &http::HeaderMap,
+ method: &hyper::Method,
+ ) -> Result<String, AuthError>;
+}
+
+static mut SHUTDOWN_REQUESTED: bool = false;
+
+pub fn request_shutdown() {
+ unsafe {
+ SHUTDOWN_REQUESTED = true;
+ }
+ crate::server_shutdown();
+}
+
+#[inline(always)]
+pub fn shutdown_requested() -> bool {
+ unsafe { SHUTDOWN_REQUESTED }
+}
+
+pub fn fail_on_shutdown() -> Result<(), Error> {
+ if shutdown_requested() {
+ bail!("Server shutdown requested - aborting task");
+ }
+ Ok(())
+}
+
diff --git a/src/server/state.rs b/pbs-server/src/state.rs
similarity index 97%
rename from src/server/state.rs
rename to pbs-server/src/state.rs
index d294c935..468ef0aa 100644
--- a/src/server/state.rs
+++ b/pbs-server/src/state.rs
@@ -42,7 +42,7 @@ pub fn server_state_init() -> Result<(), Error> {
while stream.recv().await.is_some() {
println!("got shutdown request (SIGINT)");
SERVER_STATE.lock().unwrap().reload_request = false;
- crate::tools::request_shutdown();
+ crate::request_shutdown();
}
}.boxed();
@@ -57,7 +57,7 @@ pub fn server_state_init() -> Result<(), Error> {
while stream.recv().await.is_some() {
println!("got reload request (SIGHUP)");
SERVER_STATE.lock().unwrap().reload_request = true;
- crate::tools::request_shutdown();
+ crate::request_shutdown();
}
}.boxed();
diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
index 33700a90..690671a0 100644
--- a/src/api2/admin/datastore.rs
+++ b/src/api2/admin/datastore.rs
@@ -1505,7 +1505,7 @@ pub fn pxar_file_download(
EntryKind::Directory => {
let (sender, receiver) = tokio::sync::mpsc::channel(100);
let channelwriter = AsyncChannelWriter::new(sender, 1024 * 1024);
- crate::server::spawn_internal_task(
+ pbs_server::spawn_internal_task(
create_zip(channelwriter, decoder, path.clone(), false)
);
Body::wrap_stream(ReceiverStream::new(receiver).map_err(move |err| {
diff --git a/src/api2/node/mod.rs b/src/api2/node/mod.rs
index 898e6291..21b93e1c 100644
--- a/src/api2/node/mod.rs
+++ b/src/api2/node/mod.rs
@@ -295,12 +295,12 @@ fn upgrade_to_websocket(
let (ws, response) = WebSocket::new(parts.headers.clone())?;
- crate::server::spawn_internal_task(async move {
+ pbs_server::spawn_internal_task(async move {
let conn: Upgraded = match hyper::upgrade::on(Request::from_parts(parts, req_body))
.map_err(Error::from)
.await
{
- Ok(upgraded) => upgraded,
+ Ok(upgraded) => upgraded,
_ => bail!("error"),
};
diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs
index d248ecaf..bfa4e406 100644
--- a/src/backup/datastore.rs
+++ b/src/backup/datastore.rs
@@ -29,8 +29,7 @@ use pbs_tools::format::HumanByte;
use pbs_tools::fs::{lock_dir_noblock, DirLockGuard};
use pbs_tools::process_locker::ProcessLockSharedGuard;
use pbs_config::{open_backup_lockfile, BackupLockGuard};
-
-use crate::tools::fail_on_shutdown;
+use pbs_server::fail_on_shutdown;
lazy_static! {
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStore>>> = Mutex::new(HashMap::new());
diff --git a/src/backup/verify.rs b/src/backup/verify.rs
index 6e188c5f..405b8a33 100644
--- a/src/backup/verify.rs
+++ b/src/backup/verify.rs
@@ -172,7 +172,7 @@ fn verify_index_chunks(
let check_abort = |pos: usize| -> Result<(), Error> {
if pos & 1023 == 0 {
verify_worker.worker.check_abort()?;
- crate::tools::fail_on_shutdown()?;
+ pbs_server::fail_on_shutdown()?;
}
Ok(())
};
@@ -184,7 +184,7 @@ fn verify_index_chunks(
for (pos, _) in chunk_list {
verify_worker.worker.check_abort()?;
- crate::tools::fail_on_shutdown()?;
+ pbs_server::fail_on_shutdown()?;
let info = index.chunk_info(pos).unwrap();
@@ -376,7 +376,7 @@ pub fn verify_backup_dir_with_lock(
});
verify_worker.worker.check_abort()?;
- crate::tools::fail_on_shutdown()?;
+ pbs_server::fail_on_shutdown()?;
if let Err(err) = result {
task_log!(
diff --git a/src/bin/proxmox-backup-api.rs b/src/bin/proxmox-backup-api.rs
index c8751bc5..f8539b22 100644
--- a/src/bin/proxmox-backup-api.rs
+++ b/src/bin/proxmox-backup-api.rs
@@ -3,8 +3,10 @@ use futures::*;
use proxmox::try_block;
use proxmox::api::RpcEnvironmentType;
+use proxmox::tools::fs::CreateOptions;
use pbs_tools::auth::private_auth_key;
+use pbs_server::ApiConfig;
use proxmox_backup::server::{
self,
@@ -57,16 +59,25 @@ async fn run() -> Result<(), Error> {
}
let _ = csrf_secret(); // load with lazy_static
- let mut config = server::ApiConfig::new(
+ let mut config = ApiConfig::new(
pbs_buildcfg::JS_DIR,
&proxmox_backup::api2::ROUTER,
RpcEnvironmentType::PRIVILEGED,
default_api_auth(),
)?;
- let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
+ let backup_user = pbs_config::backup_user()?;
+ let mut commando_sock = pbs_server::CommandoSocket::new(crate::server::our_ctrl_sock(), backup_user.gid);
- config.enable_file_log(pbs_buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
+ let dir_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
+ let file_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
+
+ config.enable_file_log(
+ pbs_buildcfg::API_ACCESS_LOG_FN,
+ Some(dir_opts),
+ Some(file_opts),
+ &mut commando_sock,
+ )?;
let rest_server = RestServer::new(config);
@@ -78,7 +89,7 @@ async fn run() -> Result<(), Error> {
Ok(ready
.and_then(|_| hyper::Server::builder(incoming)
.serve(rest_server)
- .with_graceful_shutdown(server::shutdown_future())
+ .with_graceful_shutdown(pbs_server::shutdown_future())
.map_err(Error::from)
)
.map(|e| {
@@ -97,7 +108,7 @@ async fn run() -> Result<(), Error> {
let init_result: Result<(), Error> = try_block!({
server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
- server::server_state_init()?;
+ pbs_server::server_state_init()?;
Ok(())
});
@@ -107,7 +118,7 @@ async fn run() -> Result<(), Error> {
server.await?;
log::info!("server shutting down, waiting for active workers to complete");
- proxmox_backup::server::last_worker_future().await?;
+ pbs_server::last_worker_future().await?;
log::info!("done - exit server");
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 4240711f..d087764e 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -12,13 +12,15 @@ use serde_json::Value;
use proxmox::try_block;
use proxmox::api::RpcEnvironmentType;
use proxmox::sys::linux::socket::set_tcp_keepalive;
+use proxmox::tools::fs::CreateOptions;
+
+use pbs_server::ApiConfig;
use proxmox_backup::{
backup::DataStore,
server::{
auth::default_api_auth,
WorkerTask,
- ApiConfig,
rest::*,
jobstate::{
self,
@@ -106,9 +108,18 @@ async fn run() -> Result<(), Error> {
config.register_template("index", &indexpath)?;
config.register_template("console", "/usr/share/pve-xtermjs/index.html.hbs")?;
- let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
+ let backup_user = pbs_config::backup_user()?;
+ let mut commando_sock = pbs_server::CommandoSocket::new(crate::server::our_ctrl_sock(), backup_user.gid);
+
+ let dir_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
+ let file_opts = CreateOptions::new().owner(backup_user.uid).group(backup_user.gid);
- config.enable_file_log(pbs_buildcfg::API_ACCESS_LOG_FN, &mut commando_sock)?;
+ config.enable_file_log(
+ pbs_buildcfg::API_ACCESS_LOG_FN,
+ Some(dir_opts),
+ Some(file_opts),
+ &mut commando_sock,
+ )?;
let rest_server = RestServer::new(config);
@@ -158,7 +169,7 @@ async fn run() -> Result<(), Error> {
Ok(ready
.and_then(|_| hyper::Server::builder(connections)
.serve(rest_server)
- .with_graceful_shutdown(server::shutdown_future())
+ .with_graceful_shutdown(pbs_server::shutdown_future())
.map_err(Error::from)
)
.map_err(|err| eprintln!("server error: {}", err))
@@ -174,7 +185,7 @@ async fn run() -> Result<(), Error> {
let init_result: Result<(), Error> = try_block!({
server::register_task_control_commands(&mut commando_sock)?;
commando_sock.spawn()?;
- server::server_state_init()?;
+ pbs_server::server_state_init()?;
Ok(())
});
@@ -187,7 +198,7 @@ async fn run() -> Result<(), Error> {
server.await?;
log::info!("server shutting down, waiting for active workers to complete");
- proxmox_backup::server::last_worker_future().await?;
+ pbs_server::last_worker_future().await?;
log::info!("done - exit server");
Ok(())
@@ -304,14 +315,14 @@ async fn accept_connection(
}
fn start_stat_generator() {
- let abort_future = server::shutdown_future();
+ let abort_future = pbs_server::shutdown_future();
let future = Box::pin(run_stat_generator());
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
}
fn start_task_scheduler() {
- let abort_future = server::shutdown_future();
+ let abort_future = pbs_server::shutdown_future();
let future = Box::pin(run_task_scheduler());
let task = futures::future::select(future, abort_future);
tokio::spawn(task.map(|_| ()));
@@ -706,12 +717,12 @@ async fn schedule_task_log_rotate() {
async fn command_reopen_logfiles() -> Result<(), Error> {
// only care about the most recent daemon instance for each, proxy & api, as other older ones
// should not respond to new requests anyway, but only finish their current one and then exit.
- let sock = server::our_ctrl_sock();
- let f1 = server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
+ let sock = crate::server::our_ctrl_sock();
+ let f1 = pbs_server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
- let pid = server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
- let sock = server::ctrl_sock_from_pid(pid);
- let f2 = server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
+ let pid = crate::server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_API_PID_FN)?;
+ let sock = crate::server::ctrl_sock_from_pid(pid);
+ let f2 = pbs_server::send_command(sock, "{\"command\":\"api-access-log-reopen\"}\n");
match futures::join!(f1, f2) {
(Err(e1), Err(e2)) => Err(format_err!("reopen commands failed, proxy: {}; api: {}", e1, e2)),
diff --git a/src/bin/proxmox-restore-daemon.rs b/src/bin/proxmox-restore-daemon.rs
index e9018ecc..45dd2c95 100644
--- a/src/bin/proxmox-restore-daemon.rs
+++ b/src/bin/proxmox-restore-daemon.rs
@@ -15,9 +15,11 @@ use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
use proxmox::api::RpcEnvironmentType;
-use proxmox_backup::server::{rest::*, ApiConfig};
use pbs_client::DEFAULT_VSOCK_PORT;
+use pbs_server::ApiConfig;
+
+use proxmox_backup::server::rest::*;
mod proxmox_restore_daemon;
use proxmox_restore_daemon::*;
diff --git a/src/bin/proxmox_restore_daemon/auth.rs b/src/bin/proxmox_restore_daemon/auth.rs
index 30309bb8..e24ef160 100644
--- a/src/bin/proxmox_restore_daemon/auth.rs
+++ b/src/bin/proxmox_restore_daemon/auth.rs
@@ -4,10 +4,7 @@ use std::io::prelude::*;
use anyhow::{bail, format_err, Error};
-use pbs_api_types::Authid;
-
-use pbs_config::CachedUserInfo;
-use proxmox_backup::server::auth::{ApiAuth, AuthError};
+use pbs_server::{ApiAuth, AuthError};
const TICKET_FILE: &str = "/ticket";
@@ -20,11 +17,10 @@ impl ApiAuth for StaticAuth {
&self,
headers: &http::HeaderMap,
_method: &hyper::Method,
- _user_info: &CachedUserInfo,
- ) -> Result<Authid, AuthError> {
+ ) -> Result<String, AuthError> {
match headers.get(hyper::header::AUTHORIZATION) {
Some(header) if header.to_str().unwrap_or("") == &self.ticket => {
- Ok(Authid::root_auth_id().to_owned())
+ Ok(String::from("root at pam"))
}
_ => {
return Err(AuthError::Generic(format_err!(
diff --git a/src/server/auth.rs b/src/server/auth.rs
index 19933177..3e2d0c89 100644
--- a/src/server/auth.rs
+++ b/src/server/auth.rs
@@ -1,11 +1,12 @@
//! Provides authentication primitives for the HTTP server
-use anyhow::{format_err, Error};
+use anyhow::format_err;
use std::sync::Arc;
use pbs_tools::ticket::{self, Ticket};
use pbs_config::{token_shadow, CachedUserInfo};
use pbs_api_types::{Authid, Userid};
+use pbs_server::{ApiAuth, AuthError};
use crate::auth_helpers::*;
use crate::tools;
@@ -13,26 +14,6 @@ use crate::tools;
use hyper::header;
use percent_encoding::percent_decode_str;
-pub enum AuthError {
- Generic(Error),
- NoData,
-}
-
-impl From<Error> for AuthError {
- fn from(err: Error) -> Self {
- AuthError::Generic(err)
- }
-}
-
-pub trait ApiAuth {
- fn check_auth(
- &self,
- headers: &http::HeaderMap,
- method: &hyper::Method,
- user_info: &CachedUserInfo,
- ) -> Result<Authid, AuthError>;
-}
-
struct UserAuthData {
ticket: String,
csrf_token: Option<String>,
@@ -80,8 +61,10 @@ impl ApiAuth for UserApiAuth {
&self,
headers: &http::HeaderMap,
method: &hyper::Method,
- user_info: &CachedUserInfo,
- ) -> Result<Authid, AuthError> {
+ ) -> Result<String, AuthError> {
+
+ let user_info = CachedUserInfo::new()?;
+
let auth_data = Self::extract_auth_data(headers);
match auth_data {
Some(AuthData::User(user_auth_data)) => {
@@ -111,7 +94,7 @@ impl ApiAuth for UserApiAuth {
}
}
- Ok(auth_id)
+ Ok(auth_id.to_string())
}
Some(AuthData::ApiToken(api_token)) => {
let mut parts = api_token.splitn(2, ':');
@@ -133,7 +116,7 @@ impl ApiAuth for UserApiAuth {
token_shadow::verify_secret(&tokenid, &tokensecret)?;
- Ok(tokenid)
+ Ok(tokenid.to_string())
}
None => Err(AuthError::NoData),
}
diff --git a/src/server/mod.rs b/src/server/mod.rs
index 52c6e7bc..2377f267 100644
--- a/src/server/mod.rs
+++ b/src/server/mod.rs
@@ -52,21 +52,12 @@ pub use environment::*;
mod upid;
pub use upid::*;
-mod state;
-pub use state::*;
-
-mod command_socket;
-pub use command_socket::*;
-
mod worker_task;
pub use worker_task::*;
mod h2service;
pub use h2service::*;
-pub mod config;
-pub use config::*;
-
pub mod formatter;
#[macro_use]
@@ -98,7 +89,7 @@ pub mod pull;
pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
let proxy_pid = crate::server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
let sock = crate::server::ctrl_sock_from_pid(proxy_pid);
- let _: Value = crate::server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
+ let _: Value = pbs_server::send_raw_command(sock, "{\"command\":\"reload-certificate\"}\n")
.await?;
Ok(())
}
@@ -106,7 +97,7 @@ pub(crate) async fn reload_proxy_certificate() -> Result<(), Error> {
pub(crate) async fn notify_datastore_removed() -> Result<(), Error> {
let proxy_pid = crate::server::read_pid(pbs_buildcfg::PROXMOX_BACKUP_PROXY_PID_FN)?;
let sock = crate::server::ctrl_sock_from_pid(proxy_pid);
- let _: Value = crate::server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
+ let _: Value = pbs_server::send_raw_command(sock, "{\"command\":\"datastore-removed\"}\n")
.await?;
Ok(())
}
diff --git a/src/server/rest.rs b/src/server/rest.rs
index a648832a..84053902 100644
--- a/src/server/rest.rs
+++ b/src/server/rest.rs
@@ -29,21 +29,20 @@ use proxmox::api::{
RpcEnvironmentType,
};
use proxmox::http_err;
+use proxmox::tools::fs::CreateOptions;
use pbs_tools::compression::{DeflateEncoder, Level};
use pbs_tools::stream::AsyncReaderStream;
use pbs_api_types::{Authid, Userid};
+use pbs_server::{ApiConfig, FileLogger, FileLogOptions, AuthError};
-use super::auth::AuthError;
use super::environment::RestEnvironment;
use super::formatter::*;
-use super::ApiConfig;
use crate::auth_helpers::*;
use pbs_config::CachedUserInfo;
use crate::tools;
use crate::tools::compression::CompressionMethod;
-use crate::tools::FileLogger;
extern "C" {
fn tzset();
@@ -196,10 +195,16 @@ fn log_response(
}
}
pub fn auth_logger() -> Result<FileLogger, Error> {
- let logger_options = tools::FileLogOptions {
+ let backup_user = pbs_config::backup_user()?;
+
+ let file_opts = CreateOptions::new()
+ .owner(backup_user.uid)
+ .group(backup_user.gid);
+
+ let logger_options = FileLogOptions {
append: true,
prefix_time: true,
- owned_by_backup: true,
+ file_opts,
..Default::default()
};
FileLogger::new(pbs_buildcfg::API_AUTH_LOG_FN, logger_options)
@@ -681,7 +686,6 @@ async fn handle_request(
rpcenv.set_client_ip(Some(*peer));
- let user_info = CachedUserInfo::new()?;
let auth = &api.api_auth;
let delay_unauth_time = std::time::Instant::now() + std::time::Duration::from_millis(3000);
@@ -708,8 +712,8 @@ async fn handle_request(
}
if auth_required {
- match auth.check_auth(&parts.headers, &method, &user_info) {
- Ok(authid) => rpcenv.set_auth_id(Some(authid.to_string())),
+ match auth.check_auth(&parts.headers, &method) {
+ Ok(authid) => rpcenv.set_auth_id(Some(authid)),
Err(auth_err) => {
let err = match auth_err {
AuthError::Generic(err) => err,
@@ -738,6 +742,8 @@ async fn handle_request(
}
Some(api_method) => {
let auth_id = rpcenv.get_auth_id();
+ let user_info = CachedUserInfo::new()?;
+
if !check_api_permission(
api_method.access.permission,
auth_id.as_deref(),
@@ -779,8 +785,9 @@ async fn handle_request(
if comp_len == 0 {
let language = extract_lang_header(&parts.headers);
- match auth.check_auth(&parts.headers, &method, &user_info) {
+ match auth.check_auth(&parts.headers, &method) {
Ok(auth_id) => {
+ let auth_id: Authid = auth_id.parse()?;
if !auth_id.is_token() {
let userid = auth_id.user();
let new_csrf_token = assemble_csrf_prevention_token(csrf_secret(), userid);
diff --git a/src/server/worker_task.rs b/src/server/worker_task.rs
index 2ef8ba9d..9f28e3c2 100644
--- a/src/server/worker_task.rs
+++ b/src/server/worker_task.rs
@@ -20,12 +20,10 @@ use pbs_buildcfg;
use pbs_tools::logrotate::{LogRotate, LogRotateFiles};
use pbs_api_types::{Authid, TaskStateType, UPID};
use pbs_config::{open_backup_lockfile, BackupLockGuard};
+use pbs_server::{CommandoSocket, FileLogger, FileLogOptions};
use super::UPIDExt;
-use crate::server;
-use crate::tools::{FileLogger, FileLogOptions};
-
macro_rules! taskdir {
($subdir:expr) => (concat!(pbs_buildcfg::PROXMOX_BACKUP_LOG_DIR_M!(), "/tasks", $subdir))
}
@@ -41,7 +39,7 @@ lazy_static! {
/// checks if the task UPID refers to a worker from this process
fn is_local_worker(upid: &UPID) -> bool {
- upid.pid == server::pid() && upid.pstart == server::pstart()
+ upid.pid == crate::server::pid() && upid.pstart == crate::server::pstart()
}
/// Test if the task is still running
@@ -54,14 +52,14 @@ pub async fn worker_is_active(upid: &UPID) -> Result<bool, Error> {
return Ok(false);
}
- let sock = server::ctrl_sock_from_pid(upid.pid);
+ let sock = crate::server::ctrl_sock_from_pid(upid.pid);
let cmd = json!({
"command": "worker-task-status",
"args": {
"upid": upid.to_string(),
},
});
- let status = super::send_command(sock, &cmd).await?;
+ let status = pbs_server::send_command(sock, &cmd).await?;
if let Some(active) = status.as_bool() {
Ok(active)
@@ -84,7 +82,7 @@ pub fn worker_is_active_local(upid: &UPID) -> bool {
}
pub fn register_task_control_commands(
- commando_sock: &mut super::CommandoSocket,
+ commando_sock: &mut CommandoSocket,
) -> Result<(), Error> {
fn get_upid(args: Option<&Value>) -> Result<UPID, Error> {
let args = if let Some(args) = args { args } else { bail!("missing args") };
@@ -128,14 +126,14 @@ pub fn abort_worker_async(upid: UPID) {
pub async fn abort_worker(upid: UPID) -> Result<(), Error> {
- let sock = server::ctrl_sock_from_pid(upid.pid);
+ let sock = crate::server::ctrl_sock_from_pid(upid.pid);
let cmd = json!({
"command": "worker-task-abort",
"args": {
"upid": upid.to_string(),
},
});
- super::send_command(sock, &cmd).map_ok(|_| ()).await
+ pbs_server::send_command(sock, &cmd).map_ok(|_| ()).await
}
fn parse_worker_status_line(line: &str) -> Result<(String, UPID, Option<TaskState>), Error> {
@@ -579,7 +577,6 @@ impl Iterator for TaskListInfoIterator {
/// task/future. Each task can `log()` messages, which are stored
/// persistently to files. Task should poll the `abort_requested`
/// flag, and stop execution when requested.
-#[derive(Debug)]
pub struct WorkerTask {
upid: UPID,
data: Mutex<WorkerTaskData>,
@@ -593,7 +590,6 @@ impl std::fmt::Display for WorkerTask {
}
}
-#[derive(Debug)]
struct WorkerTaskData {
logger: FileLogger,
progress: f64, // 0..1
@@ -642,7 +638,7 @@ impl WorkerTask {
{
let mut hash = WORKER_TASK_LIST.lock().unwrap();
hash.insert(task_id, worker.clone());
- super::set_worker_count(hash.len());
+ pbs_server::set_worker_count(hash.len());
}
update_active_workers(Some(&upid))?;
@@ -729,7 +725,7 @@ impl WorkerTask {
WORKER_TASK_LIST.lock().unwrap().remove(&self.upid.task_id);
let _ = update_active_workers(None);
- super::set_worker_count(WORKER_TASK_LIST.lock().unwrap().len());
+ pbs_server::set_worker_count(WORKER_TASK_LIST.lock().unwrap().len());
}
/// Log a message.
diff --git a/src/tools/daemon.rs b/src/tools/daemon.rs
index d298bf16..8c56e053 100644
--- a/src/tools/daemon.rs
+++ b/src/tools/daemon.rs
@@ -16,7 +16,6 @@ use futures::future::{self, Either};
use proxmox::tools::io::{ReadExt, WriteExt};
-use crate::server;
use crate::tools::{fd_change_cloexec, self};
#[link(name = "systemd")]
@@ -274,11 +273,11 @@ where
).await?;
let server_future = create_service(listener, NotifyReady)?;
- let shutdown_future = server::shutdown_future();
+ let shutdown_future = pbs_server::shutdown_future();
let finish_future = match future::select(server_future, shutdown_future).await {
Either::Left((_, _)) => {
- crate::tools::request_shutdown(); // make sure we are in shutdown mode
+ pbs_server::request_shutdown(); // make sure we are in shutdown mode
None
}
Either::Right((_, server_future)) => Some(server_future),
@@ -286,7 +285,7 @@ where
let mut reloader = Some(reloader);
- if server::is_reload_request() {
+ if pbs_server::is_reload_request() {
log::info!("daemon reload...");
if let Err(e) = systemd_notify(SystemdNotify::Reloading) {
log::error!("failed to notify systemd about the state change: {}", e);
@@ -305,7 +304,7 @@ where
}
// FIXME: this is a hack, replace with sd_notify_barrier when available
- if server::is_reload_request() {
+ if pbs_server::is_reload_request() {
wait_service_is_not_state(service_name, "reloading").await?;
}
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
index 64e592b2..f8b363f5 100644
--- a/src/tools/mod.rs
+++ b/src/tools/mod.rs
@@ -31,9 +31,6 @@ pub mod ticket;
pub mod parallel_handler;
pub use parallel_handler::ParallelHandler;
-mod file_logger;
-pub use file_logger::{FileLogger, FileLogOptions};
-
/// Shortcut for md5 sums.
pub fn md5sum(data: &[u8]) -> Result<DigestBytes, Error> {
hash(MessageDigest::md5(), data).map_err(Error::from)
@@ -123,27 +120,6 @@ pub fn fd_change_cloexec(fd: RawFd, on: bool) -> Result<(), Error> {
Ok(())
}
-static mut SHUTDOWN_REQUESTED: bool = false;
-
-pub fn request_shutdown() {
- unsafe {
- SHUTDOWN_REQUESTED = true;
- }
- crate::server::server_shutdown();
-}
-
-#[inline(always)]
-pub fn shutdown_requested() -> bool {
- unsafe { SHUTDOWN_REQUESTED }
-}
-
-pub fn fail_on_shutdown() -> Result<(), Error> {
- if shutdown_requested() {
- bail!("Server shutdown requested - aborting task");
- }
- Ok(())
-}
-
/// safe wrapper for `nix::sys::socket::socketpair` defaulting to `O_CLOEXEC` and guarding the file
/// descriptors.
pub fn socketpair() -> Result<(Fd, Fd), Error> {
diff --git a/tests/worker-task-abort.rs b/tests/worker-task-abort.rs
index 736ae659..fe211571 100644
--- a/tests/worker-task-abort.rs
+++ b/tests/worker-task-abort.rs
@@ -1,6 +1,5 @@
use anyhow::{bail, Error};
-#[macro_use]
extern crate proxmox_backup;
extern crate tokio;
@@ -10,8 +9,8 @@ use proxmox::try_block;
use pbs_api_types::{Authid, UPID};
+use pbs_server::{flog, CommandoSocket};
use proxmox_backup::server;
-use proxmox_backup::tools;
fn garbage_collection(worker: &server::WorkerTask) -> Result<(), Error> {
@@ -45,11 +44,11 @@ fn worker_task_abort() -> Result<(), Error> {
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async move {
- let mut commando_sock = server::CommandoSocket::new(server::our_ctrl_sock());
+ let mut commando_sock = CommandoSocket::new(server::our_ctrl_sock(), nix::unistd::Gid::current());
let init_result: Result<(), Error> = try_block!({
server::register_task_control_commands(&mut commando_sock)?;
- server::server_state_init()?;
+ pbs_server::server_state_init()?;
Ok(())
});
@@ -73,7 +72,7 @@ fn worker_task_abort() -> Result<(), Error> {
println!("WORKER {}", worker);
let result = garbage_collection(&worker);
- tools::request_shutdown();
+ pbs_server::request_shutdown();
if let Err(err) = result {
println!("got expected error: {}", err);
--
2.30.2
More information about the pbs-devel
mailing list