[pbs-devel] [PATCH proxmox-backup 10/10] proxmox-backup-proxy: add task archive rotation
Dominik Csapak
d.csapak at proxmox.com
Fri Sep 25 16:13:26 CEST 2020
this starts a task once a day at "00:00" that rotates the task log
archive if it is bigger than 500k
if we want, we can make the schedule/size limit/etc. configurable,
but for now it's ok to set fixed values for that
Signed-off-by: Dominik Csapak <d.csapak at proxmox.com>
---
src/bin/proxmox-backup-proxy.rs | 96 +++++++++++++++++++++++++++++++++
src/server/worker_task.rs | 22 ++++++++
2 files changed, 118 insertions(+)
diff --git a/src/bin/proxmox-backup-proxy.rs b/src/bin/proxmox-backup-proxy.rs
index 3272fe72..67fbc541 100644
--- a/src/bin/proxmox-backup-proxy.rs
+++ b/src/bin/proxmox-backup-proxy.rs
@@ -198,6 +198,7 @@ async fn schedule_tasks() -> Result<(), Error> {
schedule_datastore_prune().await;
schedule_datastore_verification().await;
schedule_datastore_sync_jobs().await;
+ schedule_task_log_rotate().await;
Ok(())
}
@@ -655,6 +656,101 @@ async fn schedule_datastore_sync_jobs() {
}
}
+async fn schedule_task_log_rotate() {
+ use proxmox_backup::{
+ config::jobstate::{self, Job},
+ server::rotate_task_log_archive,
+ };
+ use proxmox_backup::server::WorkerTask;
+ use proxmox_backup::tools::systemd::time::{
+ parse_calendar_event, compute_next_event};
+
+ let worker_type = "logrotate";
+ let job_id = "task-archive";
+
+ let last = match jobstate::last_run_time(worker_type, job_id) {
+ Ok(time) => time,
+ Err(err) => {
+ eprintln!("could not get last run time of task log archive rotation: {}", err);
+ return;
+ }
+ };
+
+ // schedule daily at 00:00 like normal logrotate
+ let schedule = "00:00";
+
+ let event = match parse_calendar_event(schedule) {
+ Ok(event) => event,
+ Err(err) => {
+ // should not happen?
+ eprintln!("unable to parse schedule '{}' - {}", schedule, err);
+ return;
+ }
+ };
+
+ let next = match compute_next_event(&event, last, false) {
+ Ok(Some(next)) => next,
+ Ok(None) => return,
+ Err(err) => {
+ eprintln!("compute_next_event for '{}' failed - {}", schedule, err);
+ return;
+ }
+ };
+
+ let now = proxmox::tools::time::epoch_i64();
+
+ if next > now {
+ // if we never ran the rotation, schedule instantly
+ match jobstate::JobState::load(worker_type, job_id) {
+ Ok(state) => match state {
+ jobstate::JobState::Created { .. } => {},
+ _ => return,
+ },
+ _ => return,
+ }
+ }
+
+ let mut job = match Job::new(worker_type, job_id) {
+ Ok(job) => job,
+ Err(_) => return, // could not get lock
+ };
+
+ if let Err(err) = WorkerTask::new_thread(
+ worker_type,
+ Some(job_id.to_string()),
+ Userid::backup_userid().clone(),
+ false,
+ move |worker| {
+ job.start(&worker.upid().to_string())?;
+ worker.log(format!("starting task log rotation"));
+ // one entry has normally about ~100-150 bytes
+ let max_size = 500000; // at least 5000 entries
+ let max_files = 20; // at least 100000 entries
+ let result = try_block!({
+ let has_rotated = rotate_task_log_archive(max_size, true, Some(max_files))?;
+ if has_rotated {
+ worker.log(format!("task log archive was rotated"));
+ } else {
+ worker.log(format!("task log archive was not rotated"));
+ }
+
+ Ok(())
+ });
+
+ let status = worker.create_state(&result);
+
+ if let Err(err) = job.finish(status) {
+ eprintln!("could not finish job state for {}: {}", worker_type, err);
+ }
+
+ result
+ },
+ ) {
+ eprintln!("unable to start task log rotation: {}", err);
+ }
+
+}
+
async fn run_stat_generator() {
let mut count = 0;
diff --git a/src/server/worker_task.rs b/src/server/worker_task.rs
index 8838db38..c567e6be 100644
--- a/src/server/worker_task.rs
+++ b/src/server/worker_task.rs
@@ -1,5 +1,6 @@
use std::collections::{HashMap, VecDeque};
use std::fs::File;
+use std::path::Path;
use std::io::{Read, Write, BufRead, BufReader};
use std::panic::UnwindSafe;
use std::sync::atomic::{AtomicBool, Ordering};
@@ -343,6 +344,27 @@ fn lock_task_list_files(exclusive: bool) -> Result<std::fs::File, Error> {
Ok(lock)
}
+/// checks if the Task Archive is bigger that 'size_threshold' bytes, and
+/// rotates it if it is
+pub fn rotate_task_log_archive(size_threshold: u64, compress: bool, max_files: Option<usize>) -> Result<bool, Error> {
+ let _lock = lock_task_list_files(true)?;
+ let path = Path::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN);
+ let metadata = path.metadata()?;
+ if metadata.len() > size_threshold {
+ let mut logrotate = LogRotate::new(PROXMOX_BACKUP_ARCHIVE_TASK_FN, compress).ok_or_else(|| format_err!("could not get archive file names"))?;
+ let backup_user = crate::backup::backup_user()?;
+ logrotate.rotate(
+ CreateOptions::new()
+ .owner(backup_user.uid)
+ .group(backup_user.gid),
+ max_files,
+ )?;
+ Ok(true)
+ } else {
+ Ok(false)
+ }
+}
+
// atomically read/update the task list, update status of finished tasks
// new_upid is added to the list when specified.
fn update_active_workers(new_upid: Option<&UPID>) -> Result<(), Error> {
--
2.20.1
More information about the pbs-devel
mailing list