datastore/prune schedules: use JobState for tracking of schedules
like the sync jobs, so that if an admin configures a schedule it really starts the next time that time is reached not immediately Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
This commit is contained in:
parent
9d3f183ba9
commit
9866de5e3d
@ -131,6 +131,8 @@ pub fn create_datastore(param: Value) -> Result<(), Error> {
|
|||||||
|
|
||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::config::jobstate::create_state_file("prune", &datastore.name)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -312,7 +314,11 @@ pub fn update_datastore(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
|
if gc_schedule.is_some() { data.gc_schedule = gc_schedule; }
|
||||||
if prune_schedule.is_some() { data.prune_schedule = prune_schedule; }
|
let mut prune_schedule_changed = false;
|
||||||
|
if prune_schedule.is_some() {
|
||||||
|
prune_schedule_changed = true;
|
||||||
|
data.prune_schedule = prune_schedule;
|
||||||
|
}
|
||||||
if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
|
if verify_schedule.is_some() { data.verify_schedule = verify_schedule; }
|
||||||
|
|
||||||
if keep_last.is_some() { data.keep_last = keep_last; }
|
if keep_last.is_some() { data.keep_last = keep_last; }
|
||||||
@ -326,6 +332,12 @@ pub fn update_datastore(
|
|||||||
|
|
||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
|
// we want to reset the statefile, to avoid an immediate sync in some cases
|
||||||
|
// (e.g. going from monthly to weekly in the second week of the month)
|
||||||
|
if prune_schedule_changed {
|
||||||
|
crate::config::jobstate::create_state_file("prune", &name)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -365,6 +377,8 @@ pub fn delete_datastore(name: String, digest: Option<String>) -> Result<(), Erro
|
|||||||
|
|
||||||
datastore::save_config(&config)?;
|
datastore::save_config(&config)?;
|
||||||
|
|
||||||
|
crate::config::jobstate::remove_state_file("prune", &name)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -337,7 +337,10 @@ async fn schedule_datastore_prune() {
|
|||||||
use proxmox_backup::backup::{
|
use proxmox_backup::backup::{
|
||||||
PruneOptions, DataStore, BackupGroup, compute_prune_info};
|
PruneOptions, DataStore, BackupGroup, compute_prune_info};
|
||||||
use proxmox_backup::server::{WorkerTask};
|
use proxmox_backup::server::{WorkerTask};
|
||||||
use proxmox_backup::config::datastore::{self, DataStoreConfig};
|
use proxmox_backup::config::{
|
||||||
|
jobstate::{self, Job},
|
||||||
|
datastore::{self, DataStoreConfig}
|
||||||
|
};
|
||||||
use proxmox_backup::tools::systemd::time::{
|
use proxmox_backup::tools::systemd::time::{
|
||||||
parse_calendar_event, compute_next_event};
|
parse_calendar_event, compute_next_event};
|
||||||
|
|
||||||
@ -394,16 +397,10 @@ async fn schedule_datastore_prune() {
|
|||||||
|
|
||||||
let worker_type = "prune";
|
let worker_type = "prune";
|
||||||
|
|
||||||
let last = match lookup_last_worker(worker_type, &store) {
|
let last = match jobstate::last_run_time(worker_type, &store) {
|
||||||
Ok(Some(upid)) => {
|
Ok(time) => time,
|
||||||
if proxmox_backup::server::worker_is_active_local(&upid) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
upid.starttime
|
|
||||||
}
|
|
||||||
Ok(None) => 0,
|
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!("lookup_last_job_start failed: {}", err);
|
eprintln!("could not get last run time of {} {}: {}", worker_type, store, err);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -421,6 +418,11 @@ async fn schedule_datastore_prune() {
|
|||||||
|
|
||||||
if next > now { continue; }
|
if next > now { continue; }
|
||||||
|
|
||||||
|
let mut job = match Job::new(worker_type, &store) {
|
||||||
|
Ok(job) => job,
|
||||||
|
Err(_) => continue, // could not get lock
|
||||||
|
};
|
||||||
|
|
||||||
let store2 = store.clone();
|
let store2 = store.clone();
|
||||||
|
|
||||||
if let Err(err) = WorkerTask::new_thread(
|
if let Err(err) = WorkerTask::new_thread(
|
||||||
@ -429,34 +431,47 @@ async fn schedule_datastore_prune() {
|
|||||||
Userid::backup_userid().clone(),
|
Userid::backup_userid().clone(),
|
||||||
false,
|
false,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
|
||||||
worker.log(format!("task triggered by schedule '{}'", event_str));
|
|
||||||
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
|
||||||
|
|
||||||
let base_path = datastore.base_path();
|
job.start(&worker.upid().to_string())?;
|
||||||
|
|
||||||
let groups = BackupGroup::list_groups(&base_path)?;
|
let result = {
|
||||||
for group in groups {
|
|
||||||
let list = group.list_backups(&base_path)?;
|
|
||||||
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
|
||||||
prune_info.reverse(); // delete older snapshots first
|
|
||||||
|
|
||||||
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
worker.log(format!("Starting datastore prune on store \"{}\"", store));
|
||||||
store, group.backup_type(), group.backup_id()));
|
worker.log(format!("task triggered by schedule '{}'", event_str));
|
||||||
|
worker.log(format!("retention options: {}", prune_options.cli_options_string()));
|
||||||
|
|
||||||
for (info, keep) in prune_info {
|
let base_path = datastore.base_path();
|
||||||
worker.log(format!(
|
|
||||||
"{} {}/{}/{}",
|
let groups = BackupGroup::list_groups(&base_path)?;
|
||||||
if keep { "keep" } else { "remove" },
|
for group in groups {
|
||||||
group.backup_type(), group.backup_id(),
|
let list = group.list_backups(&base_path)?;
|
||||||
info.backup_dir.backup_time_string()));
|
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
||||||
if !keep {
|
prune_info.reverse(); // delete older snapshots first
|
||||||
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
|
||||||
|
worker.log(format!("Starting prune on store \"{}\" group \"{}/{}\"",
|
||||||
|
store, group.backup_type(), group.backup_id()));
|
||||||
|
|
||||||
|
for (info, keep) in prune_info {
|
||||||
|
worker.log(format!(
|
||||||
|
"{} {}/{}/{}",
|
||||||
|
if keep { "keep" } else { "remove" },
|
||||||
|
group.backup_type(), group.backup_id(),
|
||||||
|
info.backup_dir.backup_time_string()));
|
||||||
|
if !keep {
|
||||||
|
datastore.remove_backup_dir(&info.backup_dir, true)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = worker.create_state(&result);
|
||||||
|
|
||||||
|
if let Err(err) = job.finish(status) {
|
||||||
|
eprintln!("could not finish job state for {}: {}", worker_type, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
result
|
||||||
}
|
}
|
||||||
) {
|
) {
|
||||||
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
|
eprintln!("unable to start datastore prune on store {} - {}", store2, err);
|
||||||
|
Loading…
Reference in New Issue
Block a user