api2/tape/backup: wait indefinitely for lock in scheduled backup jobs

so that a user can schedule multiple backup jobs onto a single
media pool without having to consider timing them apart

this makes sense since we can backup multiple datastores onto
the same media-set but can only specify one datastore per backup job

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
This commit is contained in:
Dominik Csapak 2021-03-19 08:53:18 +01:00 committed by Dietmar Maurer
parent 4abd4dbe38
commit 54fcb7f5d8

View File

@ -5,6 +5,7 @@ use anyhow::{bail, format_err, Error};
use serde_json::Value; use serde_json::Value;
use proxmox::{ use proxmox::{
try_block,
api::{ api::{
api, api,
RpcEnvironment, RpcEnvironment,
@ -177,8 +178,15 @@ pub fn do_tape_backup_job(
let (drive_config, _digest) = config::drive::config()?; let (drive_config, _digest) = config::drive::config()?;
// early check/lock before starting worker // for scheduled jobs we acquire the lock later in the worker
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?; let drive_lock = if schedule.is_some() {
None
} else {
Some(lock_tape_device(&drive_config, &setup.drive)?)
};
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
let email = lookup_user_email(notify_user);
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
&worker_type, &worker_type,
@ -186,26 +194,37 @@ pub fn do_tape_backup_job(
auth_id.clone(), auth_id.clone(),
false, false,
move |worker| { move |worker| {
let _drive_lock = drive_lock; // keep lock guard
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
job.start(&worker.upid().to_string())?; job.start(&worker.upid().to_string())?;
let mut drive_lock = drive_lock;
task_log!(worker,"Starting tape backup job '{}'", job_id); let (job_result, summary) = match try_block!({
if let Some(event_str) = schedule { if schedule.is_some() {
task_log!(worker,"task triggered by schedule '{}'", event_str); // for scheduled tape backup jobs, we wait indefinitely for the lock
} task_log!(worker, "waiting for drive lock...");
loop {
if let Ok(lock) = lock_tape_device(&drive_config, &setup.drive) {
drive_lock = Some(lock);
break;
} // ignore errors
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid()); worker.check_abort()?;
let email = lookup_user_email(notify_user); }
}
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
let (job_result, summary) = match backup_worker( task_log!(worker,"Starting tape backup job '{}'", job_id);
&worker, if let Some(event_str) = schedule {
datastore, task_log!(worker,"task triggered by schedule '{}'", event_str);
&pool_config, }
&setup,
email.clone(), backup_worker(
) { &worker,
datastore,
&pool_config,
&setup,
email.clone(),
)
}) {
Ok(summary) => (Ok(()), summary), Ok(summary) => (Ok(()), summary),
Err(err) => (Err(err), Default::default()), Err(err) => (Err(err), Default::default()),
}; };