[pbs-devel] [PATCH proxmox-backup] datastore/prune schedules: use JobState for tracking of schedules
Dominik Csapak
d.csapak at proxmox.com
Fri Sep 18 16:03:52 CEST 2020
like the sync jobs, so that if an admin configures a schedule it
really starts the next time that time is reached not immediately
Signed-off-by: Dominik Csapak <d.csapak at proxmox.com>
---
best viewed with '-w'
the patch for garbage collection is not yet done, since there are more
things to consider there, since we already save a state
in memory for that
src/api2/config/datastore.rs | 16 ++++++-
src/bin/proxmox-backup-proxy.rs | 83 +++++++++++++++++++--------------
2 files changed, 64 insertions(+), 35 deletions(-)
diff --git a/src/api2/config/datastore.rs b/src/api2/config/datastore.rs
index e62ba5ce..870324a3 100644
--- a/src/api2/config/datastore.rs
+++ b/src/api2/config/datastore.rs
@@ -131,6 +131,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
datastore::save_config(&config)?;
+ crate::config::jobstate::create_state_file("prune", &datastore.name)?;
+
Ok(())
}
@@ -312,7 +314,11 @@ pub fn update_datastore(
}
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
- if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
+ let mut prune_schedule_changed = false;
+ if prune_schedule.is_some() {
+ prune_schedule_changed = true;
+ data.prune_schedule = prune_schedule;
+ }
if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
if keep_last.is_some() { data.keep_last = keep_last; }
@@ -326,6 +332,12 @@ pub fn update_datastore(
datastore::save_config(&config)?;
+ // we want to reset the statefile, to avoid an immediate sync in some cases
+ // (e.g. going from monthly to weekly in the second week of the month)
+ if prune_schedule_changed {
+ crate::config::jobstate::create_state_file("prune", &name)?;
+ }
+
Ok(())
}
@@ -365,6 +377,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
datastore::save_config(&config)?;
+ crate::config::jobstate::remove_state_file("prune", &name)?;
+
Ok(())
}
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 1f349c8c..8a6dfe36 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -337,7 +337,10 @@ async fn schedule_datastore_prune() {
use proxmox_backup::backup::{
PruneOptions, DataStore, BackupGroup, compute_prune_info};
use proxmox_backup::server::{WorkerTask};
- use proxmox_backup::config::datastore::{self, DataStoreConfig};
+ use proxmox_backup::config::{
+ jobstate::{self, Job},
+ datastore::{self, DataStoreConfig}
+ };
use proxmox_backup::tools::systemd::time::{
parse_calendar_event, compute_next_event};
@@ -394,16 +397,10 @@ async fn schedule_datastore_prune() {
let worker_type = "prune";
- let last = match lookup_last_worker(worker_type, &store) {
- Ok(Some(upid)) => {
- if proxmox_backup::server::worker_is_active_local(&upid) {
- continue;
- }
- upid.starttime
- }
- Ok(None) => 0,
+ let last = match jobstate::last_run_time(worker_type, &store) {
+ Ok(time) => time,
Err(err) => {
- eprintln!("lookup_last_job_start failed: {}", err);
+ eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
continue;
}
};
@@ -421,6 +418,11 @@ async fn schedule_datastore_prune() {
if next > now { continue; }
+ let mut job = match Job::new(worker_type, &store) {
+ Ok(job) => job,
+ Err(_) => continue, // could not get lock
+ };
+
let store2 = store.clone();
if let Err(err) = WorkerTask::new_thread(
@@ -429,34 +431,47 @@ async fn schedule_datastore_prune() {
Userid::backup_userid().clone(),
false,
move |worker| {
- worker.log(format!("Starting datastore prune on store \"{}\"", store));
- worker.log(format!("task triggered by schedule '{}'", event_str));
- worker.log(format!("retention options: {}", prune_options.cli_options_string()));
-
- let base_path = datastore.base_path();
-
- let groups = BackupGroup::list_groups(&base_path)?;
- for group in groups {
- let list = group.list_backups(&base_path)?;
- let mut prune_info = compute_prune_info(list, &prune_options)?;
- prune_info.reverse(); // delete older snapshots first
-
- worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
- store, group.backup_type(), group.backup_id()));
-
- for (info, keep) in prune_info {
- worker.log(format!(
- "{} {}/{}/{}",
- if keep { "keep" } else { "remove" },
- group.backup_type(), group.backup_id(),
- info.backup_dir.backup_time_string()));
- if !keep {
- datastore.remove_backup_dir(&info.backup_dir, true)?;
+
+ job.start(&worker.upid().to_string())?;
+
+ let result = {
+
+ worker.log(format!("Starting datastore prune on store \"{}\"", store));
+ worker.log(format!("task triggered by schedule '{}'", event_str));
+ worker.log(format!("retention options: {}", prune_options.cli_options_string()));
+
+ let base_path = datastore.base_path();
+
+ let groups = BackupGroup::list_groups(&base_path)?;
+ for group in groups {
+ let list = group.list_backups(&base_path)?;
+ let mut prune_info = compute_prune_info(list, &prune_options)?;
+ prune_info.reverse(); // delete older snapshots first
+
+ worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
+ store, group.backup_type(), group.backup_id()));
+
+ for (info, keep) in prune_info {
+ worker.log(format!(
+ "{} {}/{}/{}",
+ if keep { "keep" } else { "remove" },
+ group.backup_type(), group.backup_id(),
+ info.backup_dir.backup_time_string()));
+ if !keep {
+ datastore.remove_backup_dir(&info.backup_dir, true)?;
+ }
}
}
+ Ok(())
+ };
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
}
- Ok(())
+ result
}
) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
--
2.20.1
More information about the pbs-devel
mailing list