2021-07-16 08:53:23 +00:00
|
|
|
use std::sync::Arc;
|
2020-10-30 08:07:24 +00:00
|
|
|
|
2021-07-16 08:53:23 +00:00
|
|
|
use anyhow::Error;
|
2020-10-30 08:07:24 +00:00
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
use pbs_datastore::{task_log, task_warn};
|
|
|
|
|
2020-10-30 08:07:24 +00:00
|
|
|
use crate::{
|
|
|
|
api2::types::*,
|
2020-11-30 15:27:18 +00:00
|
|
|
backup::{compute_prune_info, BackupInfo, DataStore, PruneOptions},
|
2020-10-30 08:07:24 +00:00
|
|
|
server::jobstate::Job,
|
|
|
|
server::WorkerTask,
|
|
|
|
};
|
|
|
|
|
2021-07-16 08:53:23 +00:00
|
|
|
pub fn prune_datastore(
|
|
|
|
worker: Arc<WorkerTask>,
|
|
|
|
prune_options: PruneOptions,
|
|
|
|
store: &str,
|
|
|
|
datastore: Arc<DataStore>,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
task_log!(worker, "Starting datastore prune on store \"{}\"", store);
|
|
|
|
|
2021-07-16 08:53:24 +00:00
|
|
|
let keep_all = !prune_options.keeps_something();
|
|
|
|
|
|
|
|
if keep_all {
|
|
|
|
task_log!(worker, "No prune selection - keeping all files.");
|
|
|
|
} else {
|
|
|
|
task_log!(
|
|
|
|
worker,
|
|
|
|
"retention options: {}",
|
|
|
|
prune_options.cli_options_string()
|
|
|
|
);
|
|
|
|
}
|
2021-07-16 08:53:23 +00:00
|
|
|
|
|
|
|
let base_path = datastore.base_path();
|
|
|
|
|
|
|
|
let groups = BackupInfo::list_backup_groups(&base_path)?;
|
|
|
|
for group in groups {
|
|
|
|
let list = group.list_backups(&base_path)?;
|
|
|
|
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
|
|
|
prune_info.reverse(); // delete older snapshots first
|
|
|
|
|
|
|
|
task_log!(
|
|
|
|
worker,
|
|
|
|
"Starting prune on store \"{}\" group \"{}/{}\"",
|
|
|
|
store,
|
|
|
|
group.backup_type(),
|
|
|
|
group.backup_id()
|
|
|
|
);
|
|
|
|
|
2021-07-16 08:53:24 +00:00
|
|
|
for (info, mut keep) in prune_info {
|
|
|
|
if keep_all { keep = true; }
|
2021-07-16 08:53:23 +00:00
|
|
|
task_log!(
|
|
|
|
worker,
|
|
|
|
"{} {}/{}/{}",
|
|
|
|
if keep { "keep" } else { "remove" },
|
|
|
|
group.backup_type(),
|
|
|
|
group.backup_id(),
|
|
|
|
info.backup_dir.backup_time_string()
|
|
|
|
);
|
|
|
|
if !keep {
|
|
|
|
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
|
|
|
|
task_warn!(
|
|
|
|
worker,
|
|
|
|
"failed to remove dir {:?}: {}",
|
|
|
|
info.backup_dir.relative_path(),
|
|
|
|
err,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-10-30 08:07:24 +00:00
|
|
|
pub fn do_prune_job(
|
|
|
|
mut job: Job,
|
|
|
|
prune_options: PruneOptions,
|
|
|
|
store: String,
|
|
|
|
auth_id: &Authid,
|
|
|
|
schedule: Option<String>,
|
|
|
|
) -> Result<String, Error> {
|
|
|
|
let datastore = DataStore::lookup_datastore(&store)?;
|
|
|
|
|
|
|
|
let worker_type = job.jobtype().to_string();
|
|
|
|
let upid_str = WorkerTask::new_thread(
|
|
|
|
&worker_type,
|
|
|
|
Some(job.jobname().to_string()),
|
|
|
|
auth_id.clone(),
|
|
|
|
false,
|
|
|
|
move |worker| {
|
|
|
|
job.start(&worker.upid().to_string())?;
|
|
|
|
|
2021-07-16 08:53:23 +00:00
|
|
|
if let Some(event_str) = schedule {
|
|
|
|
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
|
|
|
}
|
2020-10-30 08:07:24 +00:00
|
|
|
|
2021-07-16 08:53:23 +00:00
|
|
|
let result = prune_datastore(worker.clone(), prune_options, &store, datastore);
|
2020-10-30 08:07:24 +00:00
|
|
|
|
|
|
|
let status = worker.create_state(&result);
|
|
|
|
|
|
|
|
if let Err(err) = job.finish(status) {
|
|
|
|
eprintln!(
|
|
|
|
"could not finish job state for {}: {}",
|
|
|
|
job.jobtype().to_string(),
|
|
|
|
err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
Ok(upid_str)
|
|
|
|
}
|