add prune jobs api
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
parent
db4b8683cf
commit
dba37e212b
|
@ -264,14 +264,19 @@ pub const DATASTORE_TUNING_STRING_SCHEMA: Schema = StringSchema::new("Datastore
|
||||||
pub struct DataStoreConfig {
|
pub struct DataStoreConfig {
|
||||||
#[updater(skip)]
|
#[updater(skip)]
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
|
||||||
#[updater(skip)]
|
#[updater(skip)]
|
||||||
pub path: String,
|
pub path: String,
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub comment: Option<String>,
|
pub comment: Option<String>,
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub gc_schedule: Option<String>,
|
pub gc_schedule: Option<String>,
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub prune_schedule: Option<String>,
|
pub prune_schedule: Option<String>,
|
||||||
|
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub keep_last: Option<u64>,
|
pub keep_last: Option<u64>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
@ -284,18 +289,23 @@ pub struct DataStoreConfig {
|
||||||
pub keep_monthly: Option<u64>,
|
pub keep_monthly: Option<u64>,
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub keep_yearly: Option<u64>,
|
pub keep_yearly: Option<u64>,
|
||||||
|
|
||||||
/// If enabled, all backups will be verified right after completion.
|
/// If enabled, all backups will be verified right after completion.
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub verify_new: Option<bool>,
|
pub verify_new: Option<bool>,
|
||||||
|
|
||||||
/// Send job email notification to this user
|
/// Send job email notification to this user
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub notify_user: Option<Userid>,
|
pub notify_user: Option<Userid>,
|
||||||
|
|
||||||
/// Send notification only for job errors
|
/// Send notification only for job errors
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub notify: Option<String>,
|
pub notify: Option<String>,
|
||||||
|
|
||||||
/// Datastore tuning options
|
/// Datastore tuning options
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub tuning: Option<String>,
|
pub tuning: Option<String>,
|
||||||
|
|
||||||
/// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
|
/// Maintenance mode, type is either 'offline' or 'read-only', message should be enclosed in "
|
||||||
#[serde(skip_serializing_if = "Option::is_none")]
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
pub maintenance_mode: Option<String>,
|
pub maintenance_mode: Option<String>,
|
||||||
|
|
|
@ -3,7 +3,7 @@ use std::path::PathBuf;
|
||||||
|
|
||||||
use anyhow::Error;
|
use anyhow::Error;
|
||||||
|
|
||||||
use pbs_api_types::PruneOptions;
|
use pbs_api_types::KeepOptions;
|
||||||
|
|
||||||
use super::BackupInfo;
|
use super::BackupInfo;
|
||||||
|
|
||||||
|
@ -103,46 +103,10 @@ fn remove_incomplete_snapshots(mark: &mut HashMap<PathBuf, PruneMark>, list: &[B
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn cli_options_string(options: &PruneOptions) -> String {
|
/// This filters incomplete and kept backups.
|
||||||
let mut opts = Vec::new();
|
|
||||||
|
|
||||||
if let Some(count) = options.keep_last {
|
|
||||||
if count > 0 {
|
|
||||||
opts.push(format!("--keep-last {}", count));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(count) = options.keep_hourly {
|
|
||||||
if count > 0 {
|
|
||||||
opts.push(format!("--keep-hourly {}", count));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(count) = options.keep_daily {
|
|
||||||
if count > 0 {
|
|
||||||
opts.push(format!("--keep-daily {}", count));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(count) = options.keep_weekly {
|
|
||||||
if count > 0 {
|
|
||||||
opts.push(format!("--keep-weekly {}", count));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(count) = options.keep_monthly {
|
|
||||||
if count > 0 {
|
|
||||||
opts.push(format!("--keep-monthly {}", count));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if let Some(count) = options.keep_yearly {
|
|
||||||
if count > 0 {
|
|
||||||
opts.push(format!("--keep-yearly {}", count));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
opts.join(" ")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn compute_prune_info(
|
pub fn compute_prune_info(
|
||||||
mut list: Vec<BackupInfo>,
|
mut list: Vec<BackupInfo>,
|
||||||
options: &PruneOptions,
|
options: &KeepOptions,
|
||||||
) -> Result<Vec<(BackupInfo, PruneMark)>, Error> {
|
) -> Result<Vec<(BackupInfo, PruneMark)>, Error> {
|
||||||
let mut mark = HashMap::new();
|
let mut mark = HashMap::new();
|
||||||
|
|
||||||
|
|
|
@ -34,12 +34,12 @@ use pxar::EntryKind;
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
|
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
|
||||||
Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
|
Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
|
||||||
Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
|
KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
|
||||||
BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
|
SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
|
||||||
BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
|
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||||
NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
|
MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
|
||||||
PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA,
|
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
|
||||||
VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_client::pxar::{create_tar, create_zip};
|
use pbs_client::pxar::{create_tar, create_zip};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
|
@ -888,10 +888,6 @@ pub fn verify(
|
||||||
#[api(
|
#[api(
|
||||||
input: {
|
input: {
|
||||||
properties: {
|
properties: {
|
||||||
ns: {
|
|
||||||
type: BackupNamespace,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
group: {
|
group: {
|
||||||
type: pbs_api_types::BackupGroup,
|
type: pbs_api_types::BackupGroup,
|
||||||
flatten: true,
|
flatten: true,
|
||||||
|
@ -902,13 +898,17 @@ pub fn verify(
|
||||||
default: false,
|
default: false,
|
||||||
description: "Just show what prune would do, but do not delete anything.",
|
description: "Just show what prune would do, but do not delete anything.",
|
||||||
},
|
},
|
||||||
"prune-options": {
|
"keep-options": {
|
||||||
type: PruneOptions,
|
type: KeepOptions,
|
||||||
flatten: true,
|
flatten: true,
|
||||||
},
|
},
|
||||||
store: {
|
store: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_SCHEMA,
|
||||||
},
|
},
|
||||||
|
ns: {
|
||||||
|
type: BackupNamespace,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
|
returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
|
||||||
|
@ -920,17 +920,16 @@ pub fn verify(
|
||||||
)]
|
)]
|
||||||
/// Prune a group on the datastore
|
/// Prune a group on the datastore
|
||||||
pub fn prune(
|
pub fn prune(
|
||||||
ns: Option<BackupNamespace>,
|
|
||||||
group: pbs_api_types::BackupGroup,
|
group: pbs_api_types::BackupGroup,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
prune_options: PruneOptions,
|
keep_options: KeepOptions,
|
||||||
store: String,
|
store: String,
|
||||||
|
ns: Option<BackupNamespace>,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let ns = ns.unwrap_or_default();
|
let ns = ns.unwrap_or_default();
|
||||||
|
|
||||||
let datastore = check_privs_and_load_store(
|
let datastore = check_privs_and_load_store(
|
||||||
&store,
|
&store,
|
||||||
&ns,
|
&ns,
|
||||||
|
@ -948,11 +947,11 @@ pub fn prune(
|
||||||
|
|
||||||
let list = group.list_backups()?;
|
let list = group.list_backups()?;
|
||||||
|
|
||||||
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
let mut prune_info = compute_prune_info(list, &keep_options)?;
|
||||||
|
|
||||||
prune_info.reverse(); // delete older snapshots first
|
prune_info.reverse(); // delete older snapshots first
|
||||||
|
|
||||||
let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
|
let keep_all = !keep_options.keeps_something();
|
||||||
|
|
||||||
if dry_run {
|
if dry_run {
|
||||||
for (info, mark) in prune_info {
|
for (info, mark) in prune_info {
|
||||||
|
@ -980,11 +979,13 @@ pub fn prune(
|
||||||
if keep_all {
|
if keep_all {
|
||||||
task_log!(worker, "No prune selection - keeping all files.");
|
task_log!(worker, "No prune selection - keeping all files.");
|
||||||
} else {
|
} else {
|
||||||
task_log!(
|
let mut opts = Vec::new();
|
||||||
worker,
|
if !ns.is_root() {
|
||||||
"retention options: {}",
|
opts.push(format!("--ns {ns}"));
|
||||||
pbs_datastore::prune::cli_options_string(&prune_options)
|
}
|
||||||
);
|
crate::server::cli_keep_options(&mut opts, &keep_options);
|
||||||
|
|
||||||
|
task_log!(worker, "retention options: {}", opts.join(" "));
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"Starting prune on {} group \"{}\"",
|
"Starting prune on {} group \"{}\"",
|
||||||
|
@ -1039,44 +1040,43 @@ pub fn prune(
|
||||||
description: "Just show what prune would do, but do not delete anything.",
|
description: "Just show what prune would do, but do not delete anything.",
|
||||||
},
|
},
|
||||||
"prune-options": {
|
"prune-options": {
|
||||||
type: PruneOptions,
|
type: PruneJobOptions,
|
||||||
flatten: true,
|
flatten: true,
|
||||||
},
|
},
|
||||||
store: {
|
store: {
|
||||||
schema: DATASTORE_SCHEMA,
|
schema: DATASTORE_SCHEMA,
|
||||||
},
|
},
|
||||||
ns: {
|
|
||||||
type: BackupNamespace,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
"max-depth": {
|
|
||||||
schema: NS_MAX_DEPTH_SCHEMA,
|
|
||||||
optional: true,
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
schema: UPID_SCHEMA,
|
schema: UPID_SCHEMA,
|
||||||
},
|
},
|
||||||
access: {
|
access: {
|
||||||
permission: &Permission::Privilege(
|
permission: &Permission::Anybody,
|
||||||
&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
|
description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
|
||||||
},
|
},
|
||||||
)]
|
)]
|
||||||
/// Prune the datastore
|
/// Prune the datastore
|
||||||
pub fn prune_datastore(
|
pub fn prune_datastore(
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
prune_options: PruneOptions,
|
prune_options: PruneJobOptions,
|
||||||
store: String,
|
store: String,
|
||||||
ns: Option<BackupNamespace>,
|
|
||||||
max_depth: Option<usize>,
|
|
||||||
_param: Value,
|
_param: Value,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
|
user_info.check_privs(
|
||||||
|
&auth_id,
|
||||||
|
&prune_options.acl_path(&store),
|
||||||
|
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
let ns = ns.unwrap_or_default();
|
let ns = prune_options.ns.clone().unwrap_or_default();
|
||||||
let worker_id = format!("{}:{}", store, ns);
|
let worker_id = format!("{}:{}", store, ns);
|
||||||
|
|
||||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
@ -1087,15 +1087,7 @@ pub fn prune_datastore(
|
||||||
auth_id.to_string(),
|
auth_id.to_string(),
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
crate::server::prune_datastore(
|
crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
|
||||||
worker,
|
|
||||||
auth_id,
|
|
||||||
prune_options,
|
|
||||||
datastore,
|
|
||||||
ns,
|
|
||||||
max_depth.unwrap_or(MAX_NAMESPACE_DEPTH), // canoot rely on schema default
|
|
||||||
dry_run,
|
|
||||||
)
|
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@ use proxmox_sys::sortable;
|
||||||
|
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod namespace;
|
pub mod namespace;
|
||||||
|
pub mod prune;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
pub mod traffic_control;
|
pub mod traffic_control;
|
||||||
pub mod verify;
|
pub mod verify;
|
||||||
|
@ -13,6 +14,7 @@ pub mod verify;
|
||||||
#[sortable]
|
#[sortable]
|
||||||
const SUBDIRS: SubdirMap = &sorted!([
|
const SUBDIRS: SubdirMap = &sorted!([
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
|
("prune", &prune::ROUTER),
|
||||||
("sync", &sync::ROUTER),
|
("sync", &sync::ROUTER),
|
||||||
("traffic-control", &traffic_control::ROUTER),
|
("traffic-control", &traffic_control::ROUTER),
|
||||||
("verify", &verify::ROUTER),
|
("verify", &verify::ROUTER),
|
||||||
|
|
|
@ -0,0 +1,143 @@
|
||||||
|
//! Datastore Prune Job Management
|
||||||
|
|
||||||
|
use anyhow::{format_err, Error};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox_router::{
|
||||||
|
list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, SubdirMap,
|
||||||
|
};
|
||||||
|
use proxmox_schema::api;
|
||||||
|
use proxmox_sys::sortable;
|
||||||
|
|
||||||
|
use pbs_api_types::{
|
||||||
|
Authid, PruneJobConfig, PruneJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
|
||||||
|
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
|
||||||
|
};
|
||||||
|
use pbs_config::prune;
|
||||||
|
use pbs_config::CachedUserInfo;
|
||||||
|
|
||||||
|
use crate::server::{
|
||||||
|
do_prune_job,
|
||||||
|
jobstate::{compute_schedule_status, Job, JobState},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
store: {
|
||||||
|
schema: DATASTORE_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List configured jobs and their status (filtered by access)",
|
||||||
|
type: Array,
|
||||||
|
items: { type: PruneJobStatus },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Audit or Datastore.Modify on datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all prune jobs
|
||||||
|
pub fn list_prune_jobs(
|
||||||
|
store: Option<String>,
|
||||||
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<PruneJobStatus>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE;
|
||||||
|
|
||||||
|
let (config, digest) = prune::config()?;
|
||||||
|
|
||||||
|
let job_config_iter =
|
||||||
|
config
|
||||||
|
.convert_to_typed_array("prune")?
|
||||||
|
.into_iter()
|
||||||
|
.filter(|job: &PruneJobConfig| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
|
||||||
|
if privs & required_privs == 0 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(store) = &store {
|
||||||
|
&job.store == store
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut list = Vec::new();
|
||||||
|
|
||||||
|
for job in job_config_iter {
|
||||||
|
let last_state = JobState::load("prunejob", &job.id)
|
||||||
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
||||||
|
|
||||||
|
let mut status = compute_schedule_status(&last_state, Some(&job.schedule))?;
|
||||||
|
if job.disable {
|
||||||
|
status.next_run = None;
|
||||||
|
}
|
||||||
|
|
||||||
|
list.push(PruneJobStatus {
|
||||||
|
config: job,
|
||||||
|
status,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
rpcenv["digest"] = hex::encode(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Modify on job's datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Runs a prune job manually.
|
||||||
|
pub fn run_prune_job(
|
||||||
|
id: String,
|
||||||
|
_info: &ApiMethod,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<String, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let (config, _digest) = prune::config()?;
|
||||||
|
let prune_job: PruneJobConfig = config.lookup("prune", &id)?;
|
||||||
|
|
||||||
|
user_info.check_privs(
|
||||||
|
&auth_id,
|
||||||
|
&prune_job.acl_path(),
|
||||||
|
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let job = Job::new("prunejob", &id)?;
|
||||||
|
|
||||||
|
let upid_str = do_prune_job(job, prune_job.options, prune_job.store, &auth_id, None)?;
|
||||||
|
|
||||||
|
Ok(upid_str)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[sortable]
|
||||||
|
const PRUNE_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_PRUNE_JOB))];
|
||||||
|
|
||||||
|
const PRUNE_INFO_ROUTER: Router = Router::new()
|
||||||
|
.get(&list_subdirs_api_method!(PRUNE_INFO_SUBDIRS))
|
||||||
|
.subdirs(PRUNE_INFO_SUBDIRS);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_PRUNE_JOBS)
|
||||||
|
.match_all("id", &PRUNE_INFO_ROUTER);
|
|
@ -10,6 +10,7 @@ pub mod changer;
|
||||||
pub mod datastore;
|
pub mod datastore;
|
||||||
pub mod drive;
|
pub mod drive;
|
||||||
pub mod media_pool;
|
pub mod media_pool;
|
||||||
|
pub mod prune;
|
||||||
pub mod remote;
|
pub mod remote;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
pub mod tape_backup_job;
|
pub mod tape_backup_job;
|
||||||
|
@ -25,6 +26,7 @@ const SUBDIRS: SubdirMap = &sorted!([
|
||||||
("datastore", &datastore::ROUTER),
|
("datastore", &datastore::ROUTER),
|
||||||
("drive", &drive::ROUTER),
|
("drive", &drive::ROUTER),
|
||||||
("media-pool", &media_pool::ROUTER),
|
("media-pool", &media_pool::ROUTER),
|
||||||
|
("prune", &prune::ROUTER),
|
||||||
("remote", &remote::ROUTER),
|
("remote", &remote::ROUTER),
|
||||||
("sync", &sync::ROUTER),
|
("sync", &sync::ROUTER),
|
||||||
("tape-backup-job", &tape_backup_job::ROUTER),
|
("tape-backup-job", &tape_backup_job::ROUTER),
|
||||||
|
|
|
@ -0,0 +1,398 @@
|
||||||
|
use anyhow::Error;
|
||||||
|
use hex::FromHex;
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
|
||||||
|
use proxmox_schema::{api, param_bail};
|
||||||
|
|
||||||
|
use pbs_api_types::{
|
||||||
|
Authid, PruneJobConfig, PruneJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
|
||||||
|
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
};
|
||||||
|
use pbs_config::prune;
|
||||||
|
|
||||||
|
use pbs_config::CachedUserInfo;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {},
|
||||||
|
},
|
||||||
|
returns: {
|
||||||
|
description: "List configured prune schedules.",
|
||||||
|
type: Array,
|
||||||
|
items: { type: PruneJobConfig },
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
// FIXME: Audit on namespaces
|
||||||
|
description: "Requires Datastore.Audit.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// List all scheduled prune jobs.
|
||||||
|
pub fn list_prune_jobs(
|
||||||
|
_param: Value,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<Vec<PruneJobConfig>, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE;
|
||||||
|
|
||||||
|
let (config, digest) = prune::config()?;
|
||||||
|
|
||||||
|
let list = config.convert_to_typed_array("prune")?;
|
||||||
|
|
||||||
|
let list = list
|
||||||
|
.into_iter()
|
||||||
|
.filter(|job: &PruneJobConfig| {
|
||||||
|
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
|
||||||
|
privs & required_privs != 00
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
rpcenv["digest"] = hex::encode(&digest).into();
|
||||||
|
|
||||||
|
Ok(list)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
config: {
|
||||||
|
type: PruneJobConfig,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Modify on job's datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Create a new prune job.
|
||||||
|
pub fn create_prune_job(
|
||||||
|
config: PruneJobConfig,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
user_info.check_privs(
|
||||||
|
&auth_id,
|
||||||
|
&config.acl_path(),
|
||||||
|
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let _lock = prune::lock_config()?;
|
||||||
|
|
||||||
|
let (mut section_config, _digest) = prune::config()?;
|
||||||
|
|
||||||
|
if section_config.sections.get(&config.id).is_some() {
|
||||||
|
param_bail!("id", "job '{}' already exists.", config.id);
|
||||||
|
}
|
||||||
|
|
||||||
|
section_config.set_data(&config.id, "prune", &config)?;
|
||||||
|
|
||||||
|
prune::save_config(§ion_config)?;
|
||||||
|
|
||||||
|
crate::server::jobstate::create_state_file("prunejob", &config.id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
returns: { type: PruneJobConfig },
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Audit or Datastore.Verify on job's datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Read a prune job configuration.
|
||||||
|
pub fn read_prune_job(
|
||||||
|
id: String,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<PruneJobConfig, Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let (config, digest) = prune::config()?;
|
||||||
|
|
||||||
|
let prune_job: PruneJobConfig = config.lookup("prune", &id)?;
|
||||||
|
|
||||||
|
let required_privs = PRIV_DATASTORE_AUDIT;
|
||||||
|
user_info.check_privs(&auth_id, &prune_job.acl_path(), required_privs, true)?;
|
||||||
|
|
||||||
|
rpcenv["digest"] = hex::encode(&digest).into();
|
||||||
|
|
||||||
|
Ok(prune_job)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api]
|
||||||
|
#[derive(Serialize, Deserialize)]
|
||||||
|
#[serde(rename_all = "kebab-case")]
|
||||||
|
/// Deletable property name
|
||||||
|
pub enum DeletableProperty {
|
||||||
|
/// Delete the comment.
|
||||||
|
Comment,
|
||||||
|
/// Unset the disable flag.
|
||||||
|
Disable,
|
||||||
|
/// Reset the namespace to the root namespace.
|
||||||
|
Ns,
|
||||||
|
/// Reset the maximum depth to full recursion.
|
||||||
|
MaxDepth,
|
||||||
|
/// Delete number of last backups to keep.
|
||||||
|
KeepLast,
|
||||||
|
/// Delete number of hourly backups to keep.
|
||||||
|
KeepHourly,
|
||||||
|
/// Delete number of daily backups to keep.
|
||||||
|
KeepDaily,
|
||||||
|
/// Delete number of weekly backups to keep.
|
||||||
|
KeepWeekly,
|
||||||
|
/// Delete number of monthly backups to keep.
|
||||||
|
KeepMonthly,
|
||||||
|
/// Delete number of yearly backups to keep.
|
||||||
|
KeepYearly,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
update: {
|
||||||
|
type: PruneJobConfigUpdater,
|
||||||
|
flatten: true,
|
||||||
|
},
|
||||||
|
delete: {
|
||||||
|
description: "List of properties to delete.",
|
||||||
|
type: Array,
|
||||||
|
optional: true,
|
||||||
|
items: {
|
||||||
|
type: DeletableProperty,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Modify on job's datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Update prune job config.
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub fn update_prune_job(
|
||||||
|
id: String,
|
||||||
|
update: PruneJobConfigUpdater,
|
||||||
|
delete: Option<Vec<DeletableProperty>>,
|
||||||
|
digest: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let _lock = prune::lock_config()?;
|
||||||
|
|
||||||
|
// pass/compare digest
|
||||||
|
let (mut config, expected_digest) = prune::config()?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut data: PruneJobConfig = config.lookup("prune", &id)?;
|
||||||
|
|
||||||
|
user_info.check_privs(
|
||||||
|
&auth_id,
|
||||||
|
&data.acl_path(),
|
||||||
|
PRIV_DATASTORE_PRUNE | PRIV_DATASTORE_MODIFY,
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if let Some(delete) = delete {
|
||||||
|
for delete_prop in delete {
|
||||||
|
match delete_prop {
|
||||||
|
DeletableProperty::Comment => {
|
||||||
|
data.comment = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::Disable => {
|
||||||
|
data.disable = false;
|
||||||
|
}
|
||||||
|
DeletableProperty::Ns => {
|
||||||
|
data.options.ns = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::MaxDepth => {
|
||||||
|
data.options.max_depth = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::KeepLast => {
|
||||||
|
data.options.keep.keep_last = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::KeepHourly => {
|
||||||
|
data.options.keep.keep_hourly = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::KeepDaily => {
|
||||||
|
data.options.keep.keep_daily = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::KeepWeekly => {
|
||||||
|
data.options.keep.keep_weekly = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::KeepMonthly => {
|
||||||
|
data.options.keep.keep_monthly = None;
|
||||||
|
}
|
||||||
|
DeletableProperty::KeepYearly => {
|
||||||
|
data.options.keep.keep_yearly = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut recheck_privs = false;
|
||||||
|
if let Some(store) = update.store {
|
||||||
|
// check new store with possibly new ns:
|
||||||
|
recheck_privs = true;
|
||||||
|
data.store = store;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(ns) = update.options.ns {
|
||||||
|
recheck_privs = true;
|
||||||
|
data.options.ns = if ns.is_root() { None } else { Some(ns) };
|
||||||
|
}
|
||||||
|
|
||||||
|
if recheck_privs {
|
||||||
|
user_info.check_privs(
|
||||||
|
&auth_id,
|
||||||
|
&data.acl_path(),
|
||||||
|
PRIV_DATASTORE_PRUNE | PRIV_DATASTORE_MODIFY,
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut schedule_changed = false;
|
||||||
|
if let Some(schedule) = update.schedule {
|
||||||
|
schedule_changed = data.schedule != schedule;
|
||||||
|
data.schedule = schedule;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(max_depth) = update.options.max_depth {
|
||||||
|
if max_depth <= pbs_api_types::MAX_NAMESPACE_DEPTH {
|
||||||
|
data.options.max_depth = Some(max_depth);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(value) = update.disable {
|
||||||
|
data.disable = value;
|
||||||
|
}
|
||||||
|
if let Some(value) = update.comment {
|
||||||
|
data.comment = Some(value);
|
||||||
|
}
|
||||||
|
if let Some(value) = update.options.keep.keep_last {
|
||||||
|
data.options.keep.keep_last = Some(value);
|
||||||
|
}
|
||||||
|
if let Some(value) = update.options.keep.keep_hourly {
|
||||||
|
data.options.keep.keep_hourly = Some(value);
|
||||||
|
}
|
||||||
|
if let Some(value) = update.options.keep.keep_daily {
|
||||||
|
data.options.keep.keep_daily = Some(value);
|
||||||
|
}
|
||||||
|
if let Some(value) = update.options.keep.keep_weekly {
|
||||||
|
data.options.keep.keep_weekly = Some(value);
|
||||||
|
}
|
||||||
|
if let Some(value) = update.options.keep.keep_monthly {
|
||||||
|
data.options.keep.keep_monthly = Some(value);
|
||||||
|
}
|
||||||
|
if let Some(value) = update.options.keep.keep_yearly {
|
||||||
|
data.options.keep.keep_yearly = Some(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
config.set_data(&id, "prune", &data)?;
|
||||||
|
|
||||||
|
prune::save_config(&config)?;
|
||||||
|
|
||||||
|
if schedule_changed {
|
||||||
|
crate::server::jobstate::update_job_last_run_time("prunejob", &id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
protected: true,
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
digest: {
|
||||||
|
optional: true,
|
||||||
|
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
access: {
|
||||||
|
permission: &Permission::Anybody,
|
||||||
|
description: "Requires Datastore.Verify on job's datastore.",
|
||||||
|
},
|
||||||
|
)]
|
||||||
|
/// Remove a prune job configuration
|
||||||
|
pub fn delete_prune_job(
|
||||||
|
id: String,
|
||||||
|
digest: Option<String>,
|
||||||
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
let user_info = CachedUserInfo::new()?;
|
||||||
|
|
||||||
|
let _lock = prune::lock_config()?;
|
||||||
|
|
||||||
|
let (mut config, expected_digest) = prune::config()?;
|
||||||
|
|
||||||
|
let job: PruneJobConfig = config.lookup("prune", &id)?;
|
||||||
|
|
||||||
|
user_info.check_privs(
|
||||||
|
&auth_id,
|
||||||
|
&job.acl_path(),
|
||||||
|
PRIV_DATASTORE_PRUNE | PRIV_DATASTORE_MODIFY,
|
||||||
|
true,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
if let Some(ref digest) = digest {
|
||||||
|
let digest = <[u8; 32]>::from_hex(digest)?;
|
||||||
|
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.sections.remove(&id).is_none() {
|
||||||
|
http_bail!(NOT_FOUND, "job '{}' does not exist.", id);
|
||||||
|
}
|
||||||
|
|
||||||
|
prune::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::server::jobstate::remove_state_file("prunejob", &id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
const ITEM_ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_READ_PRUNE_JOB)
|
||||||
|
.put(&API_METHOD_UPDATE_PRUNE_JOB)
|
||||||
|
.delete(&API_METHOD_DELETE_PRUNE_JOB);
|
||||||
|
|
||||||
|
pub const ROUTER: Router = Router::new()
|
||||||
|
.get(&API_METHOD_LIST_PRUNE_JOBS)
|
||||||
|
.post(&API_METHOD_CREATE_PRUNE_JOB)
|
||||||
|
.match_all("id", &ITEM_ROUTER);
|
|
@ -430,6 +430,7 @@ async fn run() -> Result<(), Error> {
|
||||||
.insert("subscription", subscription_commands())
|
.insert("subscription", subscription_commands())
|
||||||
.insert("sync-job", sync_job_commands())
|
.insert("sync-job", sync_job_commands())
|
||||||
.insert("verify-job", verify_job_commands())
|
.insert("verify-job", verify_job_commands())
|
||||||
|
.insert("prune-job", prune_job_commands())
|
||||||
.insert("task", task_mgmt_cli())
|
.insert("task", task_mgmt_cli())
|
||||||
.insert(
|
.insert(
|
||||||
"pull",
|
"pull",
|
||||||
|
|
|
@ -47,8 +47,8 @@ use pbs_buildcfg::configdir;
|
||||||
use proxmox_time::CalendarEvent;
|
use proxmox_time::CalendarEvent;
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, DataStoreConfig, Operation, PruneOptions, SyncJobConfig, TapeBackupJobConfig,
|
Authid, DataStoreConfig, KeepOptions, Operation, PruneJobConfig, PruneJobOptions,
|
||||||
VerificationJobConfig,
|
SyncJobConfig, TapeBackupJobConfig, VerificationJobConfig,
|
||||||
};
|
};
|
||||||
|
|
||||||
use proxmox_rest_server::daemon;
|
use proxmox_rest_server::daemon;
|
||||||
|
@ -558,6 +558,7 @@ async fn run_task_scheduler() {
|
||||||
async fn schedule_tasks() -> Result<(), Error> {
|
async fn schedule_tasks() -> Result<(), Error> {
|
||||||
schedule_datastore_garbage_collection().await;
|
schedule_datastore_garbage_collection().await;
|
||||||
schedule_datastore_prune().await;
|
schedule_datastore_prune().await;
|
||||||
|
schedule_datastore_prune_jobs().await;
|
||||||
schedule_datastore_sync_jobs().await;
|
schedule_datastore_sync_jobs().await;
|
||||||
schedule_datastore_verify_jobs().await;
|
schedule_datastore_verify_jobs().await;
|
||||||
schedule_tape_backup_jobs().await;
|
schedule_tape_backup_jobs().await;
|
||||||
|
@ -690,16 +691,19 @@ async fn schedule_datastore_prune() {
|
||||||
None => continue,
|
None => continue,
|
||||||
};
|
};
|
||||||
|
|
||||||
let prune_options = PruneOptions {
|
let prune_options = PruneJobOptions {
|
||||||
keep_last: store_config.keep_last,
|
keep: KeepOptions {
|
||||||
keep_hourly: store_config.keep_hourly,
|
keep_last: store_config.keep_last,
|
||||||
keep_daily: store_config.keep_daily,
|
keep_hourly: store_config.keep_hourly,
|
||||||
keep_weekly: store_config.keep_weekly,
|
keep_daily: store_config.keep_daily,
|
||||||
keep_monthly: store_config.keep_monthly,
|
keep_weekly: store_config.keep_weekly,
|
||||||
keep_yearly: store_config.keep_yearly,
|
keep_monthly: store_config.keep_monthly,
|
||||||
|
keep_yearly: store_config.keep_yearly,
|
||||||
|
},
|
||||||
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
if !pbs_datastore::prune::keeps_something(&prune_options) {
|
if !prune_options.keeps_something() {
|
||||||
// no prune settings - keep all
|
// no prune settings - keep all
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -721,6 +725,52 @@ async fn schedule_datastore_prune() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn schedule_datastore_prune_jobs() {
|
||||||
|
let config = match pbs_config::prune::config() {
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("unable to read prune job config - {}", err);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Ok((config, _digest)) => config,
|
||||||
|
};
|
||||||
|
for (job_id, (_, job_config)) in config.sections {
|
||||||
|
let job_config: PruneJobConfig = match serde_json::from_value(job_config) {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("prune job config from_value failed - {}", err);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if job_config.disable {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if !job_config.options.keeps_something() {
|
||||||
|
// no 'keep' values set, keep all
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let worker_type = "prunejob";
|
||||||
|
let auth_id = Authid::root_auth_id().clone();
|
||||||
|
if check_schedule(worker_type, &job_config.schedule, &job_id) {
|
||||||
|
let job = match Job::new(worker_type, &job_id) {
|
||||||
|
Ok(job) => job,
|
||||||
|
Err(_) => continue, // could not get lock
|
||||||
|
};
|
||||||
|
if let Err(err) = do_prune_job(
|
||||||
|
job,
|
||||||
|
job_config.options,
|
||||||
|
job_config.store,
|
||||||
|
&auth_id,
|
||||||
|
Some(job_config.schedule),
|
||||||
|
) {
|
||||||
|
eprintln!("unable to start datastore prune job {} - {}", &job_id, err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn schedule_datastore_sync_jobs() {
|
async fn schedule_datastore_sync_jobs() {
|
||||||
let config = match pbs_config::sync::config() {
|
let config = match pbs_config::sync::config() {
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
|
|
@ -10,6 +10,8 @@ mod dns;
|
||||||
pub use dns::*;
|
pub use dns::*;
|
||||||
mod network;
|
mod network;
|
||||||
pub use network::*;
|
pub use network::*;
|
||||||
|
mod prune;
|
||||||
|
pub use prune::*;
|
||||||
mod remote;
|
mod remote;
|
||||||
pub use remote::*;
|
pub use remote::*;
|
||||||
mod sync;
|
mod sync;
|
||||||
|
|
|
@ -0,0 +1,157 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use anyhow::Error;
|
||||||
|
use serde_json::Value;
|
||||||
|
|
||||||
|
use proxmox_router::{cli::*, ApiHandler, RpcEnvironment};
|
||||||
|
use proxmox_schema::api;
|
||||||
|
|
||||||
|
use pbs_api_types::{PruneJobConfig, JOB_ID_SCHEMA};
|
||||||
|
use pbs_config::prune;
|
||||||
|
|
||||||
|
use proxmox_backup::api2;
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// List all prune jobs
|
||||||
|
fn list_prune_jobs(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let info = &api2::config::prune::API_METHOD_LIST_PRUNE_JOBS;
|
||||||
|
let mut data = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = default_table_format_options()
|
||||||
|
.column(ColumnConfig::new("id"))
|
||||||
|
.column(ColumnConfig::new("disable"))
|
||||||
|
.column(ColumnConfig::new("store"))
|
||||||
|
.column(ColumnConfig::new("ns"))
|
||||||
|
.column(ColumnConfig::new("schedule"))
|
||||||
|
.column(ColumnConfig::new("max-depth"))
|
||||||
|
.column(ColumnConfig::new("keep-last"))
|
||||||
|
.column(ColumnConfig::new("keep-hourly"))
|
||||||
|
.column(ColumnConfig::new("keep-daily"))
|
||||||
|
.column(ColumnConfig::new("keep-weekly"))
|
||||||
|
.column(ColumnConfig::new("keep-monthly"))
|
||||||
|
.column(ColumnConfig::new("keep-yearly"));
|
||||||
|
|
||||||
|
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[api(
|
||||||
|
input: {
|
||||||
|
properties: {
|
||||||
|
id: {
|
||||||
|
schema: JOB_ID_SCHEMA,
|
||||||
|
},
|
||||||
|
"output-format": {
|
||||||
|
schema: OUTPUT_FORMAT,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)]
|
||||||
|
/// Show prune job configuration
|
||||||
|
fn show_prune_job(param: Value, rpcenv: &mut dyn RpcEnvironment) -> Result<Value, Error> {
|
||||||
|
let output_format = get_output_format(¶m);
|
||||||
|
|
||||||
|
let info = &api2::config::prune::API_METHOD_READ_PRUNE_JOB;
|
||||||
|
let mut data = match info.handler {
|
||||||
|
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
|
||||||
|
_ => unreachable!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let options = default_table_format_options();
|
||||||
|
format_and_print_result_full(&mut data, &info.returns, &output_format, &options);
|
||||||
|
|
||||||
|
Ok(Value::Null)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn prune_job_commands() -> CommandLineInterface {
|
||||||
|
let cmd_def = CliCommandMap::new()
|
||||||
|
.insert("list", CliCommand::new(&API_METHOD_LIST_PRUNE_JOBS))
|
||||||
|
.insert(
|
||||||
|
"show",
|
||||||
|
CliCommand::new(&API_METHOD_SHOW_PRUNE_JOB)
|
||||||
|
.arg_param(&["id"])
|
||||||
|
.completion_cb("id", pbs_config::prune::complete_prune_job_id),
|
||||||
|
)
|
||||||
|
.insert(
|
||||||
|
"create",
|
||||||
|
CliCommand::new(&api2::config::prune::API_METHOD_CREATE_PRUNE_JOB)
|
||||||
|
.arg_param(&["id"])
|
||||||
|
.completion_cb("id", pbs_config::prune::complete_prune_job_id)
|
||||||
|
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||||
|
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||||
|
.completion_cb("ns", complete_prune_local_datastore_namespace),
|
||||||
|
)
|
||||||
|
.insert(
|
||||||
|
"update",
|
||||||
|
CliCommand::new(&api2::config::prune::API_METHOD_UPDATE_PRUNE_JOB)
|
||||||
|
.arg_param(&["id"])
|
||||||
|
.completion_cb("id", pbs_config::prune::complete_prune_job_id)
|
||||||
|
.completion_cb("schedule", pbs_config::datastore::complete_calendar_event)
|
||||||
|
.completion_cb("store", pbs_config::datastore::complete_datastore_name)
|
||||||
|
.completion_cb("ns", complete_prune_local_datastore_namespace),
|
||||||
|
)
|
||||||
|
.insert(
|
||||||
|
"remove",
|
||||||
|
CliCommand::new(&api2::config::prune::API_METHOD_DELETE_PRUNE_JOB)
|
||||||
|
.arg_param(&["id"])
|
||||||
|
.completion_cb("id", pbs_config::prune::complete_prune_job_id),
|
||||||
|
);
|
||||||
|
|
||||||
|
cmd_def.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
// shell completion helper
|
||||||
|
fn complete_prune_local_datastore_namespace(
|
||||||
|
_arg: &str,
|
||||||
|
param: &HashMap<String, String>,
|
||||||
|
) -> Vec<String> {
|
||||||
|
let mut list = Vec::new();
|
||||||
|
let mut rpcenv = CliEnvironment::new();
|
||||||
|
rpcenv.set_auth_id(Some(String::from("root@pam")));
|
||||||
|
|
||||||
|
let mut job: Option<PruneJobConfig> = None;
|
||||||
|
|
||||||
|
let store = param.get("store").map(|r| r.to_owned()).or_else(|| {
|
||||||
|
if let Some(id) = param.get("id") {
|
||||||
|
job = get_prune_job(id).ok();
|
||||||
|
if let Some(ref job) = job {
|
||||||
|
return Some(job.store.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(store) = store {
|
||||||
|
if let Ok(data) =
|
||||||
|
crate::api2::admin::namespace::list_namespaces(store, None, None, &mut rpcenv)
|
||||||
|
{
|
||||||
|
for item in data {
|
||||||
|
list.push(item.ns.name());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
list
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_prune_job(id: &str) -> Result<PruneJobConfig, Error> {
|
||||||
|
let (config, _digest) = prune::config()?;
|
||||||
|
|
||||||
|
config.lookup("prune", id)
|
||||||
|
}
|
|
@ -5,7 +5,8 @@ use anyhow::Error;
|
||||||
use proxmox_sys::{task_log, task_warn};
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, BackupNamespace, Operation, PruneOptions, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
|
print_store_and_ns, Authid, KeepOptions, Operation, PruneJobOptions, PRIV_DATASTORE_MODIFY,
|
||||||
|
PRIV_DATASTORE_PRUNE,
|
||||||
};
|
};
|
||||||
use pbs_datastore::prune::compute_prune_info;
|
use pbs_datastore::prune::compute_prune_info;
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
|
@ -17,13 +18,14 @@ use crate::server::jobstate::Job;
|
||||||
pub fn prune_datastore(
|
pub fn prune_datastore(
|
||||||
worker: Arc<WorkerTask>,
|
worker: Arc<WorkerTask>,
|
||||||
auth_id: Authid,
|
auth_id: Authid,
|
||||||
prune_options: PruneOptions,
|
prune_options: PruneJobOptions,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
ns: BackupNamespace,
|
|
||||||
max_depth: usize,
|
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let store = &datastore.name();
|
let store = &datastore.name();
|
||||||
|
let max_depth = prune_options
|
||||||
|
.max_depth
|
||||||
|
.unwrap_or(pbs_api_types::MAX_NAMESPACE_DEPTH);
|
||||||
let depth_str = if max_depth == pbs_api_types::MAX_NAMESPACE_DEPTH {
|
let depth_str = if max_depth == pbs_api_types::MAX_NAMESPACE_DEPTH {
|
||||||
" down to full depth".to_string()
|
" down to full depth".to_string()
|
||||||
} else if max_depth > 0 {
|
} else if max_depth > 0 {
|
||||||
|
@ -31,23 +33,18 @@ pub fn prune_datastore(
|
||||||
} else {
|
} else {
|
||||||
"non-recursive".to_string()
|
"non-recursive".to_string()
|
||||||
};
|
};
|
||||||
if ns.is_root() {
|
let ns = prune_options.ns.clone().unwrap_or_default();
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"Starting datastore prune on store '{store}', {depth_str}"
|
"Starting datastore prune on {}, {depth_str}",
|
||||||
);
|
print_store_and_ns(store, &ns),
|
||||||
} else {
|
);
|
||||||
task_log!(
|
|
||||||
worker,
|
|
||||||
"Starting datastore prune on store '{store}' namespace '{ns}', {depth_str}"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if dry_run {
|
if dry_run {
|
||||||
task_log!(worker, "(dry test run)");
|
task_log!(worker, "(dry test run)");
|
||||||
}
|
}
|
||||||
|
|
||||||
let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
|
let keep_all = !prune_options.keeps_something();
|
||||||
|
|
||||||
if keep_all {
|
if keep_all {
|
||||||
task_log!(worker, "No prune selection - keeping all files.");
|
task_log!(worker, "No prune selection - keeping all files.");
|
||||||
|
@ -55,7 +52,7 @@ pub fn prune_datastore(
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"retention options: {}",
|
"retention options: {}",
|
||||||
pbs_datastore::prune::cli_options_string(&prune_options)
|
cli_prune_options_string(&prune_options)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,7 +68,7 @@ pub fn prune_datastore(
|
||||||
let ns = group.backup_ns();
|
let ns = group.backup_ns();
|
||||||
let list = group.list_backups()?;
|
let list = group.list_backups()?;
|
||||||
|
|
||||||
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
let mut prune_info = compute_prune_info(list, &prune_options.keep)?;
|
||||||
prune_info.reverse(); // delete older snapshots first
|
prune_info.reverse(); // delete older snapshots first
|
||||||
|
|
||||||
task_log!(
|
task_log!(
|
||||||
|
@ -104,9 +101,60 @@ pub fn prune_datastore(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn cli_prune_options_string(options: &PruneJobOptions) -> String {
|
||||||
|
let mut opts = Vec::new();
|
||||||
|
|
||||||
|
if let Some(ns) = &options.ns {
|
||||||
|
if !ns.is_root() {
|
||||||
|
opts.push(format!("--ns {}", ns));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(max_depth) = options.max_depth {
|
||||||
|
// FIXME: don't add if it's the default?
|
||||||
|
opts.push(format!("--max-depth {max_depth}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
cli_keep_options(&mut opts, &options.keep);
|
||||||
|
|
||||||
|
opts.join(" ")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn cli_keep_options(opts: &mut Vec<String>, options: &KeepOptions) {
|
||||||
|
if let Some(count) = options.keep_last {
|
||||||
|
if count > 0 {
|
||||||
|
opts.push(format!("--keep-last {}", count));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(count) = options.keep_hourly {
|
||||||
|
if count > 0 {
|
||||||
|
opts.push(format!("--keep-hourly {}", count));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(count) = options.keep_daily {
|
||||||
|
if count > 0 {
|
||||||
|
opts.push(format!("--keep-daily {}", count));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(count) = options.keep_weekly {
|
||||||
|
if count > 0 {
|
||||||
|
opts.push(format!("--keep-weekly {}", count));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(count) = options.keep_monthly {
|
||||||
|
if count > 0 {
|
||||||
|
opts.push(format!("--keep-monthly {}", count));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(count) = options.keep_yearly {
|
||||||
|
if count > 0 {
|
||||||
|
opts.push(format!("--keep-yearly {}", count));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn do_prune_job(
|
pub fn do_prune_job(
|
||||||
mut job: Job,
|
mut job: Job,
|
||||||
prune_options: PruneOptions,
|
prune_options: PruneJobOptions,
|
||||||
store: String,
|
store: String,
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
schedule: Option<String>,
|
schedule: Option<String>,
|
||||||
|
@ -115,8 +163,12 @@ pub fn do_prune_job(
|
||||||
|
|
||||||
let worker_type = job.jobtype().to_string();
|
let worker_type = job.jobtype().to_string();
|
||||||
let auth_id = auth_id.clone();
|
let auth_id = auth_id.clone();
|
||||||
// TODO include namespace info here once this becomes namespace-aware/configurable
|
let worker_id = match &prune_options.ns {
|
||||||
let worker_id = format!("{store}");
|
Some(ns) if ns.is_root() => format!("{store}"),
|
||||||
|
Some(ns) => format!("{store}:{ns}"),
|
||||||
|
None => format!("{store}"),
|
||||||
|
};
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
&worker_type,
|
&worker_type,
|
||||||
Some(worker_id),
|
Some(worker_id),
|
||||||
|
@ -131,15 +183,7 @@ pub fn do_prune_job(
|
||||||
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = prune_datastore(
|
let result = prune_datastore(worker.clone(), auth_id, prune_options, datastore, false);
|
||||||
worker.clone(),
|
|
||||||
auth_id,
|
|
||||||
prune_options,
|
|
||||||
datastore,
|
|
||||||
BackupNamespace::default(),
|
|
||||||
pbs_api_types::MAX_NAMESPACE_DEPTH,
|
|
||||||
false,
|
|
||||||
);
|
|
||||||
|
|
||||||
let status = worker.create_state(&result);
|
let status = worker.create_state(&result);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue