proxy: move prune logic into new file

Signed-off-by: Hannes Laimer <h.laimer@proxmox.com>
This commit is contained in:
Hannes Laimer 2020-10-30 09:07:24 +01:00 committed by Dietmar Maurer
parent f8a682a873
commit b8d9079835
3 changed files with 100 additions and 62 deletions

View File

@ -49,6 +49,7 @@ use proxmox_backup::tools::{
use proxmox_backup::api2::pull::do_sync_job; use proxmox_backup::api2::pull::do_sync_job;
use proxmox_backup::server::do_verification_job; use proxmox_backup::server::do_verification_job;
use proxmox_backup::server::do_prune_job;
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
proxmox_backup::tools::setup_safe_path_env(); proxmox_backup::tools::setup_safe_path_env();
@ -370,8 +371,6 @@ async fn schedule_datastore_prune() {
use proxmox_backup::{ use proxmox_backup::{
backup::{ backup::{
PruneOptions, PruneOptions,
BackupGroup,
compute_prune_info,
}, },
config::datastore::{ config::datastore::{
self, self,
@ -388,13 +387,6 @@ async fn schedule_datastore_prune() {
}; };
for (store, (_, store_config)) in config.sections { for (store, (_, store_config)) in config.sections {
let datastore = match DataStore::lookup_datastore(&store) {
Ok(datastore) => datastore,
Err(err) => {
eprintln!("lookup_datastore '{}' failed - {}", store, err);
continue;
}
};
let store_config: DataStoreConfig = match serde_json::from_value(store_config) { let store_config: DataStoreConfig = match serde_json::from_value(store_config) {
Ok(c) => c, Ok(c) => c,
@ -453,64 +445,16 @@ async fn schedule_datastore_prune() {
if next > now { continue; } if next > now { continue; }
let mut job = match Job::new(worker_type, &store) { let job = match Job::new(worker_type, &store) {
Ok(job) => job, Ok(job) => job,
Err(_) => continue, // could not get lock Err(_) => continue, // could not get lock
}; };
let store2 = store.clone(); let auth_id = Authid::backup_auth_id();
if let Err(err) = do_prune_job(job, prune_options, store.clone(), &auth_id, Some(event_str)) {
if let Err(err) = WorkerTask::new_thread( eprintln!("unable to start datastore prune job {} - {}", &store, err);
worker_type,
Some(store.clone()),
Authid::backup_auth_id().clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
let result = try_block!({
worker.log(format!("Starting datastore prune on store \"{}\"", store));
worker.log(format!("task triggered by schedule '{}'", event_str));
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
let base_path = datastore.base_path();
let groups = BackupGroup::list_groups(&base_path)?;
for group in groups {
let list = group.list_backups(&base_path)?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
prune_info.reverse(); // delete older snapshots first
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
store, group.backup_type(), group.backup_id()));
for (info, keep) in prune_info {
worker.log(format!(
"{} {}/{}/{}",
if keep { "keep" } else { "remove" },
group.backup_type(), group.backup_id(),
info.backup_dir.backup_time_string()));
if !keep {
datastore.remove_backup_dir(&info.backup_dir, true)?;
}
}
}
Ok(())
});
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!("could not finish job state for {}: {}", worker_type, err);
}
result
}
) {
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
} }
} }
} }

View File

@ -35,5 +35,8 @@ pub mod jobstate;
mod verify_job; mod verify_job;
pub use verify_job::*; pub use verify_job::*;
mod prune_job;
pub use prune_job::*;
mod email_notifications; mod email_notifications;
pub use email_notifications::*; pub use email_notifications::*;

91
src/server/prune_job.rs Normal file
View File

@ -0,0 +1,91 @@
use anyhow::Error;
use proxmox::try_block;
use crate::{
api2::types::*,
backup::{compute_prune_info, BackupGroup, DataStore, PruneOptions},
server::jobstate::Job,
server::WorkerTask,
task_log,
};
pub fn do_prune_job(
mut job: Job,
prune_options: PruneOptions,
store: String,
auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store)?;
let worker_type = job.jobtype().to_string();
let upid_str = WorkerTask::new_thread(
&worker_type,
Some(job.jobname().to_string()),
auth_id.clone(),
false,
move |worker| {
job.start(&worker.upid().to_string())?;
let result = try_block!({
task_log!(worker, "Starting datastore prune on store \"{}\"", store);
if let Some(event_str) = schedule {
task_log!(worker, "task triggered by schedule '{}'", event_str);
}
task_log!(
worker,
"retention options: {}",
prune_options.cli_options_string()
);
let base_path = datastore.base_path();
let groups = BackupGroup::list_groups(&base_path)?;
for group in groups {
let list = group.list_backups(&base_path)?;
let mut prune_info = compute_prune_info(list, &prune_options)?;
prune_info.reverse(); // delete older snapshots first
task_log!(
worker,
"Starting prune on store \"{}\" group \"{}/{}\"",
store,
group.backup_type(),
group.backup_id()
);
for (info, keep) in prune_info {
task_log!(
worker,
"{} {}/{}/{}",
if keep { "keep" } else { "remove" },
group.backup_type(),
group.backup_id(),
info.backup_dir.backup_time_string()
);
if !keep {
datastore.remove_backup_dir(&info.backup_dir, true)?;
}
}
}
Ok(())
});
let status = worker.create_state(&result);
if let Err(err) = job.finish(status) {
eprintln!(
"could not finish job state for {}: {}",
job.jobtype().to_string(),
err
);
}
result
},
)?;
Ok(upid_str)
}