[pbs-devel] [PATCH proxmox-backup 15/22] file-restore-daemon: add binary with virtio-vsock API server
Stefan Reiter
s.reiter at proxmox.com
Tue Feb 16 18:07:03 CET 2021
Implements the base of a small daemon to run within a file-restore VM.
The binary spawns an API server on a virtio-vsock socket, listening for
connections from the host. This happens mostly manually via the standard
Unix socket API, since tokio/hyper do not have support for vsock built
in. Once we have the accept'ed file descriptor, we can create a
UnixStream and use our tower service implementation for that.
The binary is deliberately not installed in the usual $PATH location,
since it shouldn't be executed on the host by a user anyway.
For now, only one simple API call ('status') is implemented, to
demonstrate and test proxmox::api functionality.
Since the REST server implementation uses the log!() macro, we can
redirect its output to stdout by registering env_logger as the logging
target. env_logger is already in our dependency tree via zstd/bindgen.
Signed-off-by: Stefan Reiter <s.reiter at proxmox.com>
---
Cargo.toml | 1 +
Makefile | 9 ++-
debian/control | 1 +
debian/proxmox-backup-client.install | 1 +
src/api2/types/file_restore.rs | 12 +++
src/api2/types/mod.rs | 3 +
src/bin/proxmox-restore-daemon.rs | 104 ++++++++++++++++++++++++++
src/bin/proxmox_restore_daemon/api.rs | 45 +++++++++++
src/bin/proxmox_restore_daemon/mod.rs | 3 +
9 files changed, 178 insertions(+), 1 deletion(-)
create mode 100644 src/api2/types/file_restore.rs
create mode 100644 src/bin/proxmox-restore-daemon.rs
create mode 100644 src/bin/proxmox_restore_daemon/api.rs
create mode 100644 src/bin/proxmox_restore_daemon/mod.rs
diff --git a/Cargo.toml b/Cargo.toml
index 28ca8e64..de42c2ff 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -29,6 +29,7 @@ bitflags = "1.2.1"
bytes = "1.0"
crc32fast = "1"
endian_trait = { version = "0.6", features = ["arrays"] }
+env_logger = "0.7"
anyhow = "1.0"
futures = "0.3"
h2 = { version = "0.3", features = [ "stream" ] }
diff --git a/Makefile b/Makefile
index 3b865083..f177e79d 100644
--- a/Makefile
+++ b/Makefile
@@ -25,6 +25,10 @@ SERVICE_BIN := \
proxmox-backup-proxy \
proxmox-daily-update
+# Single file restore daemon
+RESTORE_BIN := \
+ proxmox-restore-daemon
+
ifeq ($(BUILD_MODE), release)
CARGO_BUILD_ARGS += --release
COMPILEDIR := target/release
@@ -39,7 +43,7 @@ endif
CARGO ?= cargo
COMPILED_BINS := \
- $(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN))
+ $(addprefix $(COMPILEDIR)/,$(USR_BIN) $(USR_SBIN) $(SERVICE_BIN) $(RESTORE_BIN))
export DEB_VERSION DEB_VERSION_UPSTREAM
@@ -151,6 +155,9 @@ install: $(COMPILED_BINS)
install -m4755 -o root -g root $(COMPILEDIR)/sg-tape-cmd $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/sg-tape-cmd
$(foreach i,$(SERVICE_BIN), \
install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/ ;)
+ install -dm755 $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore
+ $(foreach i,$(RESTORE_BIN), \
+ install -m755 $(COMPILEDIR)/$(i) $(DESTDIR)$(LIBEXECDIR)/proxmox-backup/file-restore/ ;)
$(MAKE) -C www install
$(MAKE) -C docs install
diff --git a/debian/control b/debian/control
index 57d47a85..f4d81732 100644
--- a/debian/control
+++ b/debian/control
@@ -15,6 +15,7 @@ Build-Depends: debhelper (>= 11),
librust-crossbeam-channel-0.5+default-dev,
librust-endian-trait-0.6+arrays-dev,
librust-endian-trait-0.6+default-dev,
+ librust-env-logger-0.7+default-dev,
librust-futures-0.3+default-dev,
librust-h2-0.3+default-dev,
librust-h2-0.3+stream-dev,
diff --git a/debian/proxmox-backup-client.install b/debian/proxmox-backup-client.install
index 74b568f1..b203f152 100644
--- a/debian/proxmox-backup-client.install
+++ b/debian/proxmox-backup-client.install
@@ -1,5 +1,6 @@
usr/bin/proxmox-backup-client
usr/bin/pxar
+usr/lib/x86_64-linux-gnu/proxmox-backup/file-restore/proxmox-restore-daemon
usr/share/man/man1/proxmox-backup-client.1
usr/share/man/man1/pxar.1
usr/share/zsh/vendor-completions/_proxmox-backup-client
diff --git a/src/api2/types/file_restore.rs b/src/api2/types/file_restore.rs
new file mode 100644
index 00000000..cd8df16a
--- /dev/null
+++ b/src/api2/types/file_restore.rs
@@ -0,0 +1,12 @@
+use serde::{Deserialize, Serialize};
+use proxmox::api::api;
+
+#[api()]
+#[derive(Serialize, Deserialize)]
+#[serde(rename_all = "kebab-case")]
+/// General status information about a running VM file-restore daemon
+pub struct RestoreDaemonStatus {
+ /// VM uptime in seconds
+ pub uptime: i64,
+}
+
diff --git a/src/api2/types/mod.rs b/src/api2/types/mod.rs
index 4c663335..763b86fd 100644
--- a/src/api2/types/mod.rs
+++ b/src/api2/types/mod.rs
@@ -34,6 +34,9 @@ pub use userid::{PROXMOX_TOKEN_ID_SCHEMA, PROXMOX_TOKEN_NAME_SCHEMA, PROXMOX_GRO
mod tape;
pub use tape::*;
+mod file_restore;
+pub use file_restore::*;
+
// File names: may not contain slashes, may not start with "."
pub const FILENAME_FORMAT: ApiStringFormat = ApiStringFormat::VerifyFn(|name| {
if name.starts_with('.') {
diff --git a/src/bin/proxmox-restore-daemon.rs b/src/bin/proxmox-restore-daemon.rs
new file mode 100644
index 00000000..1ec90794
--- /dev/null
+++ b/src/bin/proxmox-restore-daemon.rs
@@ -0,0 +1,104 @@
+///! Daemon binary to run inside a micro-VM for secure single file restore of disk images
+use anyhow::{bail, Error};
+use log::error;
+
+use std::os::unix::{
+ io::{FromRawFd, RawFd},
+ net,
+};
+use std::path::Path;
+
+use tokio::sync::mpsc;
+use tokio_stream::wrappers::ReceiverStream;
+
+use proxmox::api::RpcEnvironmentType;
+use proxmox_backup::client::DEFAULT_VSOCK_PORT;
+use proxmox_backup::server::{rest::*, ApiConfig};
+
+mod proxmox_restore_daemon;
+use proxmox_restore_daemon::*;
+
+/// Maximum amount of pending requests. If saturated, virtio-vsock returns ETIMEDOUT immediately.
+/// We should never have more than a few requests in queue, so use a low number.
+pub const MAX_PENDING: usize = 32;
+
+/// Will be present in base initramfs
+pub const VM_DETECT_FILE: &str = "/restore-vm-marker";
+
+/// This is expected to be run by 'proxmox-file-restore' within a mini-VM
+fn main() -> Result<(), Error> {
+ if !Path::new(VM_DETECT_FILE).exists() {
+ bail!(concat!(
+ "This binary is not supposed to be run manually. ",
+ "Please use 'proxmox-file-restore' instead."
+ ));
+ }
+
+ // don't have a real syslog (and no persistance), so use env_logger to print to a log file (via
+ // stdout to a serial terminal attached by QEMU)
+ env_logger::from_env(env_logger::Env::default().default_filter_or("info"))
+ .write_style(env_logger::WriteStyle::Never)
+ .init();
+
+ proxmox_backup::tools::runtime::main(run())
+}
+
+async fn run() -> Result<(), Error> {
+ let config = ApiConfig::new("", &ROUTER, RpcEnvironmentType::PUBLIC)?;
+ let rest_server = RestServer::new(config);
+
+ let vsock_fd = get_vsock_fd()?;
+ let connections = accept_vsock_connections(vsock_fd);
+ let receiver_stream = ReceiverStream::new(connections);
+ let acceptor = hyper::server::accept::from_stream(receiver_stream);
+
+ hyper::Server::builder(acceptor).serve(rest_server).await?;
+
+ bail!("hyper server exited");
+}
+
+fn accept_vsock_connections(
+ vsock_fd: RawFd,
+) -> mpsc::Receiver<Result<tokio::net::UnixStream, Error>> {
+ use nix::sys::socket::*;
+ let (sender, receiver) = mpsc::channel(MAX_PENDING);
+
+ tokio::spawn(async move {
+ loop {
+ let stream: Result<tokio::net::UnixStream, Error> = tokio::task::block_in_place(|| {
+ // we need to accept manually, as UnixListener aborts if socket type != AF_UNIX ...
+ let client_fd = accept(vsock_fd)?;
+ let stream = unsafe { net::UnixStream::from_raw_fd(client_fd) };
+ stream.set_nonblocking(true)?;
+ tokio::net::UnixStream::from_std(stream).map_err(|err| err.into())
+ });
+
+ match stream {
+ Ok(stream) => {
+ if sender.send(Ok(stream)).await.is_err() {
+ error!("connection accept channel was closed");
+ }
+ }
+ Err(err) => {
+ error!("error accepting vsock connetion: {}", err);
+ }
+ }
+ }
+ });
+
+ receiver
+}
+
+fn get_vsock_fd() -> Result<RawFd, Error> {
+ use nix::sys::socket::*;
+ let sock_fd = socket(
+ AddressFamily::Vsock,
+ SockType::Stream,
+ SockFlag::empty(),
+ None,
+ )?;
+ let sock_addr = VsockAddr::new(libc::VMADDR_CID_ANY, DEFAULT_VSOCK_PORT as u32);
+ bind(sock_fd, &SockAddr::Vsock(sock_addr))?;
+ listen(sock_fd, MAX_PENDING)?;
+ Ok(sock_fd)
+}
diff --git a/src/bin/proxmox_restore_daemon/api.rs b/src/bin/proxmox_restore_daemon/api.rs
new file mode 100644
index 00000000..3c642aaf
--- /dev/null
+++ b/src/bin/proxmox_restore_daemon/api.rs
@@ -0,0 +1,45 @@
+///! File-restore API running inside the restore VM
+use anyhow::Error;
+use serde_json::Value;
+use std::fs;
+
+use proxmox::api::{api, ApiMethod, Permission, Router, RpcEnvironment, SubdirMap};
+use proxmox::list_subdirs_api_method;
+
+use proxmox_backup::api2::types::*;
+
+// NOTE: All API endpoints must have Permission::World, as the configs for authentication do not
+// exist within the restore VM. Safety is guaranteed since we use a low port, so only root on the
+// host can contact us - and there the proxmox-backup-client validates permissions already.
+
+const SUBDIRS: SubdirMap = &[("status", &Router::new().get(&API_METHOD_STATUS))];
+
+pub const ROUTER: Router = Router::new()
+ .get(&list_subdirs_api_method!(SUBDIRS))
+ .subdirs(SUBDIRS);
+
+fn read_uptime() -> Result<f32, Error> {
+ let uptime = fs::read_to_string("/proc/uptime")?;
+ // unwrap the Option, if /proc/uptime is empty we have bigger problems
+ Ok(uptime.split_ascii_whitespace().next().unwrap().parse()?)
+}
+
+#[api(
+ access: {
+ description: "Permissions are handled outside restore VM.",
+ permission: &Permission::World,
+ },
+ returns: {
+ type: RestoreDaemonStatus,
+ }
+)]
+/// General status information
+fn status(
+ _param: Value,
+ _info: &ApiMethod,
+ _rpcenv: &mut dyn RpcEnvironment,
+) -> Result<RestoreDaemonStatus, Error> {
+ Ok(RestoreDaemonStatus {
+ uptime: read_uptime()? as i64,
+ })
+}
diff --git a/src/bin/proxmox_restore_daemon/mod.rs b/src/bin/proxmox_restore_daemon/mod.rs
new file mode 100644
index 00000000..d938a5bb
--- /dev/null
+++ b/src/bin/proxmox_restore_daemon/mod.rs
@@ -0,0 +1,3 @@
+///! File restore VM related functionality
+mod api;
+pub use api::*;
--
2.20.1
More information about the pbs-devel
mailing list