add prune jobs api

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
Wolfgang Bumiller
2022-05-19 11:02:01 +02:00
parent db4b8683cf
commit dba37e212b
12 changed files with 890 additions and 125 deletions

View File

@ -34,12 +34,12 @@ use pxar::EntryKind;
use pbs_api_types::{
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA,
VERIFICATION_OUTDATED_AFTER_SCHEMA,
KeepOptions, Operation, PruneJobOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo;
@ -888,10 +888,6 @@ pub fn verify(
#[api(
input: {
properties: {
ns: {
type: BackupNamespace,
optional: true,
},
group: {
type: pbs_api_types::BackupGroup,
flatten: true,
@ -902,13 +898,17 @@ pub fn verify(
default: false,
description: "Just show what prune would do, but do not delete anything.",
},
"prune-options": {
type: PruneOptions,
"keep-options": {
type: KeepOptions,
flatten: true,
},
store: {
schema: DATASTORE_SCHEMA,
},
ns: {
type: BackupNamespace,
optional: true,
},
},
},
returns: pbs_api_types::ADMIN_DATASTORE_PRUNE_RETURN_TYPE,
@ -920,17 +920,16 @@ pub fn verify(
)]
/// Prune a group on the datastore
pub fn prune(
ns: Option<BackupNamespace>,
group: pbs_api_types::BackupGroup,
dry_run: bool,
prune_options: PruneOptions,
keep_options: KeepOptions,
store: String,
ns: Option<BackupNamespace>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store,
&ns,
@ -948,11 +947,11 @@ pub fn prune(
let list = group.list_backups()?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
let mut prune_info = compute_prune_info(list, &keep_options)?;
prune_info.reverse(); // delete older snapshots first
let keep_all = !pbs_datastore::prune::keeps_something(&prune_options);
let keep_all = !keep_options.keeps_something();
if dry_run {
for (info, mark) in prune_info {
@ -980,11 +979,13 @@ pub fn prune(
if keep_all {
task_log!(worker, "No prune selection - keeping all files.");
} else {
task_log!(
worker,
"retention options: {}",
pbs_datastore::prune::cli_options_string(&prune_options)
);
let mut opts = Vec::new();
if !ns.is_root() {
opts.push(format!("--ns {ns}"));
}
crate::server::cli_keep_options(&mut opts, &keep_options);
task_log!(worker, "retention options: {}", opts.join(" "));
task_log!(
worker,
"Starting prune on {} group \"{}\"",
@ -1039,44 +1040,43 @@ pub fn prune(
description: "Just show what prune would do, but do not delete anything.",
},
"prune-options": {
type: PruneOptions,
type: PruneJobOptions,
flatten: true,
},
store: {
schema: DATASTORE_SCHEMA,
},
ns: {
type: BackupNamespace,
optional: true,
},
"max-depth": {
schema: NS_MAX_DEPTH_SCHEMA,
optional: true,
},
},
},
returns: {
schema: UPID_SCHEMA,
},
access: {
permission: &Permission::Privilege(
&["datastore", "{store}"], PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, true),
permission: &Permission::Anybody,
description: "Requires Datastore.Modify or Datastore.Prune on the datastore/namespace.",
},
)]
/// Prune the datastore
pub fn prune_datastore(
dry_run: bool,
prune_options: PruneOptions,
prune_options: PruneJobOptions,
store: String,
ns: Option<BackupNamespace>,
max_depth: Option<usize>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let user_info = CachedUserInfo::new()?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
user_info.check_privs(
&auth_id,
&prune_options.acl_path(&store),
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
true,
)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let ns = ns.unwrap_or_default();
let ns = prune_options.ns.clone().unwrap_or_default();
let worker_id = format!("{}:{}", store, ns);
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
@ -1087,15 +1087,7 @@ pub fn prune_datastore(
auth_id.to_string(),
to_stdout,
move |worker| {
crate::server::prune_datastore(
worker,
auth_id,
prune_options,
datastore,
ns,
max_depth.unwrap_or(MAX_NAMESPACE_DEPTH), // canoot rely on schema default
dry_run,
)
crate::server::prune_datastore(worker, auth_id, prune_options, datastore, dry_run)
},
)?;

View File

@ -6,6 +6,7 @@ use proxmox_sys::sortable;
pub mod datastore;
pub mod namespace;
pub mod prune;
pub mod sync;
pub mod traffic_control;
pub mod verify;
@ -13,6 +14,7 @@ pub mod verify;
#[sortable]
const SUBDIRS: SubdirMap = &sorted!([
("datastore", &datastore::ROUTER),
("prune", &prune::ROUTER),
("sync", &sync::ROUTER),
("traffic-control", &traffic_control::ROUTER),
("verify", &verify::ROUTER),

143
src/api2/admin/prune.rs Normal file
View File

@ -0,0 +1,143 @@
//! Datastore Prune Job Management
use anyhow::{format_err, Error};
use serde_json::Value;
use proxmox_router::{
list_subdirs_api_method, ApiMethod, Permission, Router, RpcEnvironment, SubdirMap,
};
use proxmox_schema::api;
use proxmox_sys::sortable;
use pbs_api_types::{
Authid, PruneJobConfig, PruneJobStatus, DATASTORE_SCHEMA, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
};
use pbs_config::prune;
use pbs_config::CachedUserInfo;
use crate::server::{
do_prune_job,
jobstate::{compute_schedule_status, Job, JobState},
};
#[api(
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
optional: true,
},
},
},
returns: {
description: "List configured jobs and their status (filtered by access)",
type: Array,
items: { type: PruneJobStatus },
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Modify on datastore.",
},
)]
/// List all prune jobs
pub fn list_prune_jobs(
store: Option<String>,
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<PruneJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE;
let (config, digest) = prune::config()?;
let job_config_iter =
config
.convert_to_typed_array("prune")?
.into_iter()
.filter(|job: &PruneJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
if privs & required_privs == 0 {
return false;
}
if let Some(store) = &store {
&job.store == store
} else {
true
}
});
let mut list = Vec::new();
for job in job_config_iter {
let last_state = JobState::load("prunejob", &job.id)
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
let mut status = compute_schedule_status(&last_state, Some(&job.schedule))?;
if job.disable {
status.next_run = None;
}
list.push(PruneJobStatus {
config: job,
status,
});
}
rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
}
}
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Modify on job's datastore.",
},
)]
/// Runs a prune job manually.
pub fn run_prune_job(
id: String,
_info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, _digest) = prune::config()?;
let prune_job: PruneJobConfig = config.lookup("prune", &id)?;
user_info.check_privs(
&auth_id,
&prune_job.acl_path(),
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
true,
)?;
let job = Job::new("prunejob", &id)?;
let upid_str = do_prune_job(job, prune_job.options, prune_job.store, &auth_id, None)?;
Ok(upid_str)
}
#[sortable]
const PRUNE_INFO_SUBDIRS: SubdirMap = &[("run", &Router::new().post(&API_METHOD_RUN_PRUNE_JOB))];
const PRUNE_INFO_ROUTER: Router = Router::new()
.get(&list_subdirs_api_method!(PRUNE_INFO_SUBDIRS))
.subdirs(PRUNE_INFO_SUBDIRS);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_PRUNE_JOBS)
.match_all("id", &PRUNE_INFO_ROUTER);

View File

@ -10,6 +10,7 @@ pub mod changer;
pub mod datastore;
pub mod drive;
pub mod media_pool;
pub mod prune;
pub mod remote;
pub mod sync;
pub mod tape_backup_job;
@ -25,6 +26,7 @@ const SUBDIRS: SubdirMap = &sorted!([
("datastore", &datastore::ROUTER),
("drive", &drive::ROUTER),
("media-pool", &media_pool::ROUTER),
("prune", &prune::ROUTER),
("remote", &remote::ROUTER),
("sync", &sync::ROUTER),
("tape-backup-job", &tape_backup_job::ROUTER),

398
src/api2/config/prune.rs Normal file
View File

@ -0,0 +1,398 @@
use anyhow::Error;
use hex::FromHex;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, PruneJobConfig, PruneJobConfigUpdater, JOB_ID_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::prune;
use pbs_config::CachedUserInfo;
#[api(
input: {
properties: {},
},
returns: {
description: "List configured prune schedules.",
type: Array,
items: { type: PruneJobConfig },
},
access: {
permission: &Permission::Anybody,
// FIXME: Audit on namespaces
description: "Requires Datastore.Audit.",
},
)]
/// List all scheduled prune jobs.
pub fn list_prune_jobs(
_param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<PruneJobConfig>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE;
let (config, digest) = prune::config()?;
let list = config.convert_to_typed_array("prune")?;
let list = list
.into_iter()
.filter(|job: &PruneJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
privs & required_privs != 00
})
.collect();
rpcenv["digest"] = hex::encode(&digest).into();
Ok(list)
}
#[api(
protected: true,
input: {
properties: {
config: {
type: PruneJobConfig,
flatten: true,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Modify on job's datastore.",
},
)]
/// Create a new prune job.
pub fn create_prune_job(
config: PruneJobConfig,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
user_info.check_privs(
&auth_id,
&config.acl_path(),
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
true,
)?;
let _lock = prune::lock_config()?;
let (mut section_config, _digest) = prune::config()?;
if section_config.sections.get(&config.id).is_some() {
param_bail!("id", "job '{}' already exists.", config.id);
}
section_config.set_data(&config.id, "prune", &config)?;
prune::save_config(&section_config)?;
crate::server::jobstate::create_state_file("prunejob", &config.id)?;
Ok(())
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
},
},
returns: { type: PruneJobConfig },
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Audit or Datastore.Verify on job's datastore.",
},
)]
/// Read a prune job configuration.
pub fn read_prune_job(
id: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<PruneJobConfig, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = prune::config()?;
let prune_job: PruneJobConfig = config.lookup("prune", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT;
user_info.check_privs(&auth_id, &prune_job.acl_path(), required_privs, true)?;
rpcenv["digest"] = hex::encode(&digest).into();
Ok(prune_job)
}
#[api]
#[derive(Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
/// Deletable property name
pub enum DeletableProperty {
/// Delete the comment.
Comment,
/// Unset the disable flag.
Disable,
/// Reset the namespace to the root namespace.
Ns,
/// Reset the maximum depth to full recursion.
MaxDepth,
/// Delete number of last backups to keep.
KeepLast,
/// Delete number of hourly backups to keep.
KeepHourly,
/// Delete number of daily backups to keep.
KeepDaily,
/// Delete number of weekly backups to keep.
KeepWeekly,
/// Delete number of monthly backups to keep.
KeepMonthly,
/// Delete number of yearly backups to keep.
KeepYearly,
}
#[api(
protected: true,
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
update: {
type: PruneJobConfigUpdater,
flatten: true,
},
delete: {
description: "List of properties to delete.",
type: Array,
optional: true,
items: {
type: DeletableProperty,
}
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Modify on job's datastore.",
},
)]
/// Update prune job config.
#[allow(clippy::too_many_arguments)]
pub fn update_prune_job(
id: String,
update: PruneJobConfigUpdater,
delete: Option<Vec<DeletableProperty>>,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = prune::lock_config()?;
// pass/compare digest
let (mut config, expected_digest) = prune::config()?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
let mut data: PruneJobConfig = config.lookup("prune", &id)?;
user_info.check_privs(
&auth_id,
&data.acl_path(),
PRIV_DATASTORE_PRUNE | PRIV_DATASTORE_MODIFY,
true,
)?;
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::Comment => {
data.comment = None;
}
DeletableProperty::Disable => {
data.disable = false;
}
DeletableProperty::Ns => {
data.options.ns = None;
}
DeletableProperty::MaxDepth => {
data.options.max_depth = None;
}
DeletableProperty::KeepLast => {
data.options.keep.keep_last = None;
}
DeletableProperty::KeepHourly => {
data.options.keep.keep_hourly = None;
}
DeletableProperty::KeepDaily => {
data.options.keep.keep_daily = None;
}
DeletableProperty::KeepWeekly => {
data.options.keep.keep_weekly = None;
}
DeletableProperty::KeepMonthly => {
data.options.keep.keep_monthly = None;
}
DeletableProperty::KeepYearly => {
data.options.keep.keep_yearly = None;
}
}
}
}
let mut recheck_privs = false;
if let Some(store) = update.store {
// check new store with possibly new ns:
recheck_privs = true;
data.store = store;
}
if let Some(ns) = update.options.ns {
recheck_privs = true;
data.options.ns = if ns.is_root() { None } else { Some(ns) };
}
if recheck_privs {
user_info.check_privs(
&auth_id,
&data.acl_path(),
PRIV_DATASTORE_PRUNE | PRIV_DATASTORE_MODIFY,
true,
)?;
}
let mut schedule_changed = false;
if let Some(schedule) = update.schedule {
schedule_changed = data.schedule != schedule;
data.schedule = schedule;
}
if let Some(max_depth) = update.options.max_depth {
if max_depth <= pbs_api_types::MAX_NAMESPACE_DEPTH {
data.options.max_depth = Some(max_depth);
}
}
if let Some(value) = update.disable {
data.disable = value;
}
if let Some(value) = update.comment {
data.comment = Some(value);
}
if let Some(value) = update.options.keep.keep_last {
data.options.keep.keep_last = Some(value);
}
if let Some(value) = update.options.keep.keep_hourly {
data.options.keep.keep_hourly = Some(value);
}
if let Some(value) = update.options.keep.keep_daily {
data.options.keep.keep_daily = Some(value);
}
if let Some(value) = update.options.keep.keep_weekly {
data.options.keep.keep_weekly = Some(value);
}
if let Some(value) = update.options.keep.keep_monthly {
data.options.keep.keep_monthly = Some(value);
}
if let Some(value) = update.options.keep.keep_yearly {
data.options.keep.keep_yearly = Some(value);
}
config.set_data(&id, "prune", &data)?;
prune::save_config(&config)?;
if schedule_changed {
crate::server::jobstate::update_job_last_run_time("prunejob", &id)?;
}
Ok(())
}
#[api(
protected: true,
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
digest: {
optional: true,
schema: PROXMOX_CONFIG_DIGEST_SCHEMA,
},
},
},
access: {
permission: &Permission::Anybody,
description: "Requires Datastore.Verify on job's datastore.",
},
)]
/// Remove a prune job configuration
pub fn delete_prune_job(
id: String,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let _lock = prune::lock_config()?;
let (mut config, expected_digest) = prune::config()?;
let job: PruneJobConfig = config.lookup("prune", &id)?;
user_info.check_privs(
&auth_id,
&job.acl_path(),
PRIV_DATASTORE_PRUNE | PRIV_DATASTORE_MODIFY,
true,
)?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;
crate::tools::detect_modified_configuration_file(&digest, &expected_digest)?;
}
if config.sections.remove(&id).is_none() {
http_bail!(NOT_FOUND, "job '{}' does not exist.", id);
}
prune::save_config(&config)?;
crate::server::jobstate::remove_state_file("prunejob", &id)?;
Ok(())
}
const ITEM_ROUTER: Router = Router::new()
.get(&API_METHOD_READ_PRUNE_JOB)
.put(&API_METHOD_UPDATE_PRUNE_JOB)
.delete(&API_METHOD_DELETE_PRUNE_JOB);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_PRUNE_JOBS)
.post(&API_METHOD_CREATE_PRUNE_JOB)
.match_all("id", &ITEM_ROUTER);