[pbs-devel] [PATCH v3 proxmox-backup 1/8] api: garbage collect job status
Lukas Wagner
l.wagner at proxmox.com
Thu Mar 21 11:23:10 CET 2024
On 2024-02-08 14:59, Stefan Lendl wrote:
> Adds an api endpoint on the datastore that reports the gc job status
> such as:
> - Schedule
> - State (of last run)
> - Duration (of last run)
> - Last Run
> - Next Run (if scheduled)
> - Pending Chunks (of last run)
> - Removed Chunks (of last run)
>From a user's perspective I think it would make more sense to report
bytes, not chunks, especially since we deal with non-constant chunk
sizes.
>
> Adds a dedicated endpoint admin/gc that reports gc job status for all
> datastores including the onces without a gc-schedule.
>
> Originally-by: Gabriel Goller <g.goller at proxmox.com>
> Signed-off-by: Stefan Lendl <s.lendl at proxmox.com>
> Tested-by: Gabriel Goller <g.goller at proxmox.com>
> Reviewd-by: Gabriel Goller <g.goller at proxmox.com>
^ typo, also applies to the other patches
> ---
> pbs-api-types/src/datastore.rs | 40 ++++++++++
> src/api2/admin/datastore.rs | 129 ++++++++++++++++++++++++++++++++-
> src/api2/admin/gc.rs | 57 +++++++++++++++
> src/api2/admin/mod.rs | 2 +
> 4 files changed, 225 insertions(+), 3 deletions(-)
> create mode 100644 src/api2/admin/gc.rs
>
> diff --git a/pbs-api-types/src/datastore.rs b/pbs-api-types/src/datastore.rs
> index cce9888b..ba3879c9 100644
> --- a/pbs-api-types/src/datastore.rs
> +++ b/pbs-api-types/src/datastore.rs
> @@ -1270,6 +1270,46 @@ pub struct GarbageCollectionStatus {
> pub still_bad: usize,
> }
>
> +#[api(
> + properties: {
> + "last-run-upid": {
> + optional: true,
> + type: UPID,
> + },
> + },
> +)]
> +#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
> +#[serde(rename_all = "kebab-case")]
> +/// Garbage Collection general info
> +pub struct GarbageCollectionJobStatus {
> + /// Datastore
> + pub store: String,
> + /// upid of the last run gc job
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub last_run_upid: Option<String>,
> + /// Number of removed chunks
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub removed_chunks: Option<usize>,
> + /// Number of pending chunks
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub pending_chunks: Option<usize>,
> + /// Schedule of the gc job
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub schedule: Option<String>,
> + /// Time of the next gc run
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub next_run: Option<i64>,
> + /// Endtime of the last gc run
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub last_run_endtime: Option<i64>,
> + /// State of the last gc run
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub last_run_state: Option<String>,
> + /// Duration of last gc run
> + #[serde(skip_serializing_if = "Option::is_none")]
> + pub duration: Option<i64>,
> +}
> +
> #[api(
> properties: {
> "gc-status": {
> diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs
> index a95031e7..357cae0a 100644
> --- a/src/api2/admin/datastore.rs
> +++ b/src/api2/admin/datastore.rs
> @@ -27,18 +27,20 @@ use proxmox_sys::fs::{
> file_read_firstline, file_read_optional_string, replace_file, CreateOptions,
> };
> use proxmox_sys::{task_log, task_warn};
> +use proxmox_time::CalendarEvent;
>
> use pxar::accessor::aio::Accessor;
> use pxar::EntryKind;
>
> use pbs_api_types::{
> print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
> - Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
> + Counts, CryptMode, DataStoreConfig, DataStoreListItem, DataStoreStatus,
> + GarbageCollectionJobStatus, GarbageCollectionStatus, GroupListItem, JobScheduleStatus,
> KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
> SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
> BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
> MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
> - PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
> + PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID,
> UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
> };
> use pbs_client::pxar::{create_tar, create_zip};
> @@ -67,7 +69,7 @@ use crate::backup::{
> ListAccessibleBackupGroups, NS_PRIVS_OK,
> };
>
> -use crate::server::jobstate::Job;
> +use crate::server::jobstate::{compute_schedule_status, Job, JobState};
>
> const GROUP_NOTES_FILE_NAME: &str = "notes";
>
> @@ -1199,6 +1201,123 @@ pub fn garbage_collection_status(
> Ok(status)
> }
>
> +#[api(
> + input: {
> + properties: {
> + store: {
> + schema: DATASTORE_SCHEMA,
> + },
> + },
> + },
> + returns: {
> + type: GarbageCollectionJobStatus,
> + },
> + access: {
> + permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_AUDIT, false),
> + },
> +)]
> +/// Garbage collection status.
> +pub fn garbage_collection_job_status(
> + store: String,
> + _info: &ApiMethod,
> + _rpcenv: &mut dyn RpcEnvironment,
> +) -> Result<GarbageCollectionJobStatus, Error> {
> + let (config, _) = pbs_config::datastore::config()?;
> + let store_config: DataStoreConfig = config.lookup("datastore", &store)?;
> +
> + let mut info = GarbageCollectionJobStatus {
> + store: store.clone(),
> + schedule: store_config.gc_schedule,
> + ..Default::default()
> + };
> +
> + let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
> + let status_in_memory = datastore.last_gc_status();
> + let state_file = JobState::load("garbage_collection", &store)
> + .map_err(|err| {
> + log::error!(
> + "could not open statefile for {:?}: {}",
> + info.last_run_upid,
> + err
> + )
> + })
> + .ok();
> +
> + let mut selected_upid = None;
> + if status_in_memory.upid.is_some() {
> + selected_upid = status_in_memory.upid;
> + } else if let Some(JobState::Finished { upid, .. }) = &state_file {
> + selected_upid = Some(upid.to_owned());
> + }
> +
> + info.last_run_upid = selected_upid.clone();
> +
> + match selected_upid {
> + Some(upid) => {
> + info.removed_chunks = Some(status_in_memory.removed_chunks);
> + info.pending_chunks = Some(status_in_memory.pending_chunks);
> +
... seems like the gc-stats also contain the removed/pending bytes, so this should be
an easy change. :)
--
- Lukas
More information about the pbs-devel
mailing list