[pbs-devel] [RFC proxmox-backup 8/8] proxy: add sanity check task to scheduler
Christian Ebner
c.ebner at proxmox.com
Wed Dec 13 16:38:19 CET 2023
Execute configured sanity check tasks based on their configured time
schedule.
Signed-off-by: Christian Ebner <c.ebner at proxmox.com>
---
src/bin/proxmox-backup-proxy.rs | 41 ++++++++++++++++++++++++++++++++-
1 file changed, 40 insertions(+), 1 deletion(-)
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 9c49026b..8efa0655 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -44,7 +44,7 @@ use proxmox_time::CalendarEvent;
use pbs_api_types::{
Authid, DataStoreConfig, Operation, PruneJobConfig, SyncJobConfig, TapeBackupJobConfig,
- VerificationJobConfig,
+ VerificationJobConfig, SanityCheckJobConfig,
};
use proxmox_rest_server::daemon;
@@ -60,6 +60,7 @@ use proxmox_backup::api2::pull::do_sync_job;
use proxmox_backup::api2::tape::backup::do_tape_backup_job;
use proxmox_backup::server::do_prune_job;
use proxmox_backup::server::do_verification_job;
+use proxmox_backup::server::do_sanity_check_job;
fn main() -> Result<(), Error> {
pbs_tools::setup_libc_malloc_opts();
@@ -454,6 +455,7 @@ async fn schedule_tasks() -> Result<(), Error> {
schedule_datastore_verify_jobs().await;
schedule_tape_backup_jobs().await;
schedule_task_log_rotate().await;
+ schedule_task_sanity_check_jobs().await;
Ok(())
}
@@ -825,6 +827,43 @@ async fn schedule_task_log_rotate() {
}
}
+async fn schedule_task_sanity_check_jobs() {
+ let config = match pbs_config::sanity_check::config() {
+ Err(err) => {
+ eprintln!("unable to read sanity check job config - {err}");
+ return;
+ }
+ Ok((config, _digest)) => config,
+ };
+ for (job_id, (_, job_config)) in config.sections {
+ let job_config: SanityCheckJobConfig = match serde_json::from_value(job_config) {
+ Ok(c) => c,
+ Err(err) => {
+ eprintln!("sanity check job config from_value failed - {err}");
+ continue;
+ }
+ };
+
+ let worker_type = "sanitycheckjob";
+ let auth_id = Authid::root_auth_id().clone();
+ if check_schedule(worker_type, &job_config.schedule, &job_id) {
+ let job = match Job::new(worker_type, &job_id) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+ if let Err(err) = do_sanity_check_job(
+ job,
+ job_config.options,
+ &auth_id,
+ Some(job_config.schedule),
+ ) {
+ eprintln!("unable to start sanity check job {job_id} - {err}");
+ }
+ };
+ }
+}
+
+
async fn command_reopen_access_logfiles() -> Result<(), Error> {
// only care about the most recent daemon instance for each, proxy & api, as other older ones
// should not respond to new requests anyway, but only finish their current one and then exit.
--
2.39.2
More information about the pbs-devel
mailing list