2020-12-18 14:32:12 +00:00
|
|
|
use std::path::Path;
|
2021-03-16 07:11:57 +00:00
|
|
|
use std::sync::{Mutex, Arc};
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-02-19 08:02:13 +00:00
|
|
|
use anyhow::{bail, format_err, Error};
|
2020-12-18 14:32:12 +00:00
|
|
|
use serde_json::Value;
|
|
|
|
|
|
|
|
use proxmox::{
|
2021-03-19 07:53:18 +00:00
|
|
|
try_block,
|
2020-12-18 14:32:12 +00:00
|
|
|
api::{
|
|
|
|
api,
|
|
|
|
RpcEnvironment,
|
2020-12-30 18:01:39 +00:00
|
|
|
RpcEnvironmentType,
|
2020-12-18 14:32:12 +00:00
|
|
|
Router,
|
2021-03-03 11:44:39 +00:00
|
|
|
Permission,
|
2020-12-18 14:32:12 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
use crate::{
|
2021-01-29 08:07:55 +00:00
|
|
|
task_log,
|
2021-03-09 08:43:06 +00:00
|
|
|
task_warn,
|
2021-02-15 06:55:13 +00:00
|
|
|
config::{
|
|
|
|
self,
|
2021-03-03 11:44:39 +00:00
|
|
|
cached_user_info::CachedUserInfo,
|
|
|
|
acl::{
|
2021-03-05 10:40:52 +00:00
|
|
|
PRIV_DATASTORE_READ,
|
2021-03-03 11:44:39 +00:00
|
|
|
PRIV_TAPE_AUDIT,
|
2021-03-05 10:40:52 +00:00
|
|
|
PRIV_TAPE_WRITE,
|
2021-03-03 11:44:39 +00:00
|
|
|
},
|
2021-02-19 08:02:13 +00:00
|
|
|
tape_job::{
|
|
|
|
TapeBackupJobConfig,
|
2021-02-24 17:12:11 +00:00
|
|
|
TapeBackupJobSetup,
|
2021-02-19 08:02:13 +00:00
|
|
|
TapeBackupJobStatus,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
server::{
|
2021-03-05 13:10:19 +00:00
|
|
|
lookup_user_email,
|
2021-03-19 07:53:17 +00:00
|
|
|
TapeBackupJobSummary,
|
2021-02-19 08:02:13 +00:00
|
|
|
jobstate::{
|
|
|
|
Job,
|
|
|
|
JobState,
|
|
|
|
compute_schedule_status,
|
|
|
|
},
|
2021-02-15 06:55:13 +00:00
|
|
|
},
|
2020-12-18 14:32:12 +00:00
|
|
|
backup::{
|
|
|
|
DataStore,
|
|
|
|
BackupDir,
|
|
|
|
BackupInfo,
|
2021-03-10 09:59:13 +00:00
|
|
|
StoreProgress,
|
2020-12-18 14:32:12 +00:00
|
|
|
},
|
|
|
|
api2::types::{
|
|
|
|
Authid,
|
|
|
|
UPID_SCHEMA,
|
2021-02-15 10:09:30 +00:00
|
|
|
JOB_ID_SCHEMA,
|
2020-12-18 14:32:12 +00:00
|
|
|
MediaPoolConfig,
|
2021-03-05 13:10:19 +00:00
|
|
|
Userid,
|
2020-12-18 14:32:12 +00:00
|
|
|
},
|
|
|
|
server::WorkerTask,
|
2021-01-29 08:07:55 +00:00
|
|
|
task::TaskState,
|
2020-12-18 14:32:12 +00:00
|
|
|
tape::{
|
|
|
|
TAPE_STATUS_DIR,
|
|
|
|
Inventory,
|
|
|
|
PoolWriter,
|
|
|
|
MediaPool,
|
|
|
|
SnapshotReader,
|
2021-02-05 09:50:21 +00:00
|
|
|
drive::{
|
|
|
|
media_changer,
|
|
|
|
lock_tape_device,
|
2021-02-18 14:40:28 +00:00
|
|
|
set_tape_device_state,
|
2021-02-05 09:50:21 +00:00
|
|
|
},
|
2021-01-21 16:12:01 +00:00
|
|
|
changer::update_changer_online_status,
|
2020-12-18 14:32:12 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2021-02-15 10:09:30 +00:00
|
|
|
const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
|
|
|
|
.post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
|
|
|
|
|
|
|
|
pub const ROUTER: Router = Router::new()
|
2021-02-19 08:02:13 +00:00
|
|
|
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
|
2021-02-15 10:09:30 +00:00
|
|
|
.post(&API_METHOD_BACKUP)
|
|
|
|
.match_all("id", &TAPE_BACKUP_JOB_ROUTER);
|
|
|
|
|
2021-03-05 10:40:52 +00:00
|
|
|
fn check_backup_permission(
|
|
|
|
auth_id: &Authid,
|
|
|
|
store: &str,
|
|
|
|
pool: &str,
|
|
|
|
drive: &str,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let user_info = CachedUserInfo::new()?;
|
|
|
|
|
|
|
|
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
|
|
|
|
if (privs & PRIV_DATASTORE_READ) == 0 {
|
|
|
|
bail!("no permissions on /datastore/{}", store);
|
|
|
|
}
|
|
|
|
|
|
|
|
let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]);
|
|
|
|
if (privs & PRIV_TAPE_WRITE) == 0 {
|
|
|
|
bail!("no permissions on /tape/drive/{}", drive);
|
|
|
|
}
|
|
|
|
|
|
|
|
let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]);
|
|
|
|
if (privs & PRIV_TAPE_WRITE) == 0 {
|
|
|
|
bail!("no permissions on /tape/pool/{}", pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-02-19 08:02:13 +00:00
|
|
|
#[api(
|
|
|
|
returns: {
|
|
|
|
description: "List configured thape backup jobs and their status",
|
|
|
|
type: Array,
|
|
|
|
items: { type: TapeBackupJobStatus },
|
|
|
|
},
|
2021-03-03 11:44:39 +00:00
|
|
|
access: {
|
|
|
|
description: "List configured tape jobs filtered by Tape.Audit privileges",
|
|
|
|
permission: &Permission::Anybody,
|
|
|
|
},
|
2021-02-19 08:02:13 +00:00
|
|
|
)]
|
|
|
|
/// List all tape backup jobs
|
|
|
|
pub fn list_tape_backup_jobs(
|
|
|
|
_param: Value,
|
|
|
|
mut rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Vec<TapeBackupJobStatus>, Error> {
|
2021-03-03 11:44:39 +00:00
|
|
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
|
|
|
let user_info = CachedUserInfo::new()?;
|
2021-02-19 08:02:13 +00:00
|
|
|
|
|
|
|
let (config, digest) = config::tape_job::config()?;
|
|
|
|
|
|
|
|
let job_list_iter = config
|
|
|
|
.convert_to_typed_array("backup")?
|
|
|
|
.into_iter()
|
|
|
|
.filter(|_job: &TapeBackupJobConfig| {
|
|
|
|
// fixme: check access permission
|
|
|
|
true
|
|
|
|
});
|
|
|
|
|
|
|
|
let mut list = Vec::new();
|
|
|
|
|
|
|
|
for job in job_list_iter {
|
2021-03-03 11:44:39 +00:00
|
|
|
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
|
|
|
|
if (privs & PRIV_TAPE_AUDIT) == 0 {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2021-02-19 08:02:13 +00:00
|
|
|
let last_state = JobState::load("tape-backup-job", &job.id)
|
|
|
|
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
|
|
|
|
|
|
|
|
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
|
|
|
|
|
|
|
|
list.push(TapeBackupJobStatus { config: job, status });
|
|
|
|
}
|
|
|
|
|
|
|
|
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
|
|
|
|
|
|
|
|
Ok(list)
|
|
|
|
}
|
|
|
|
|
2021-02-15 06:55:13 +00:00
|
|
|
pub fn do_tape_backup_job(
|
|
|
|
mut job: Job,
|
2021-02-24 17:12:11 +00:00
|
|
|
setup: TapeBackupJobSetup,
|
2021-02-15 06:55:13 +00:00
|
|
|
auth_id: &Authid,
|
|
|
|
schedule: Option<String>,
|
|
|
|
) -> Result<String, Error> {
|
|
|
|
|
|
|
|
let job_id = format!("{}:{}:{}:{}",
|
2021-02-24 17:12:11 +00:00
|
|
|
setup.store,
|
|
|
|
setup.pool,
|
|
|
|
setup.drive,
|
2021-02-15 06:55:13 +00:00
|
|
|
job.jobname());
|
|
|
|
|
|
|
|
let worker_type = job.jobtype().to_string();
|
|
|
|
|
2021-02-24 17:12:11 +00:00
|
|
|
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
2021-02-15 06:55:13 +00:00
|
|
|
|
|
|
|
let (config, _digest) = config::media_pool::config()?;
|
2021-02-24 17:12:11 +00:00
|
|
|
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
2021-02-15 06:55:13 +00:00
|
|
|
|
|
|
|
let (drive_config, _digest) = config::drive::config()?;
|
|
|
|
|
2021-03-19 07:53:18 +00:00
|
|
|
// for scheduled jobs we acquire the lock later in the worker
|
|
|
|
let drive_lock = if schedule.is_some() {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
Some(lock_tape_device(&drive_config, &setup.drive)?)
|
|
|
|
};
|
|
|
|
|
|
|
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
|
|
|
let email = lookup_user_email(notify_user);
|
2021-02-15 06:55:13 +00:00
|
|
|
|
|
|
|
let upid_str = WorkerTask::new_thread(
|
|
|
|
&worker_type,
|
|
|
|
Some(job_id.clone()),
|
|
|
|
auth_id.clone(),
|
|
|
|
false,
|
|
|
|
move |worker| {
|
|
|
|
job.start(&worker.upid().to_string())?;
|
2021-03-19 07:53:18 +00:00
|
|
|
let mut drive_lock = drive_lock;
|
|
|
|
|
|
|
|
let (job_result, summary) = match try_block!({
|
|
|
|
if schedule.is_some() {
|
|
|
|
// for scheduled tape backup jobs, we wait indefinitely for the lock
|
|
|
|
task_log!(worker, "waiting for drive lock...");
|
|
|
|
loop {
|
|
|
|
if let Ok(lock) = lock_tape_device(&drive_config, &setup.drive) {
|
|
|
|
drive_lock = Some(lock);
|
|
|
|
break;
|
|
|
|
} // ignore errors
|
|
|
|
|
|
|
|
worker.check_abort()?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
2021-02-15 06:55:13 +00:00
|
|
|
|
2021-03-19 07:53:18 +00:00
|
|
|
task_log!(worker,"Starting tape backup job '{}'", job_id);
|
|
|
|
if let Some(event_str) = schedule {
|
|
|
|
task_log!(worker,"task triggered by schedule '{}'", event_str);
|
|
|
|
}
|
2021-03-08 13:10:49 +00:00
|
|
|
|
2021-03-19 07:53:18 +00:00
|
|
|
backup_worker(
|
|
|
|
&worker,
|
|
|
|
datastore,
|
|
|
|
&pool_config,
|
|
|
|
&setup,
|
|
|
|
email.clone(),
|
|
|
|
)
|
|
|
|
}) {
|
2021-03-19 07:53:17 +00:00
|
|
|
Ok(summary) => (Ok(()), summary),
|
|
|
|
Err(err) => (Err(err), Default::default()),
|
|
|
|
};
|
2021-02-15 06:55:13 +00:00
|
|
|
|
|
|
|
let status = worker.create_state(&job_result);
|
|
|
|
|
2021-03-08 13:10:49 +00:00
|
|
|
if let Some(email) = email {
|
|
|
|
if let Err(err) = crate::server::send_tape_backup_status(
|
|
|
|
&email,
|
|
|
|
Some(job.jobname()),
|
|
|
|
&setup,
|
|
|
|
&job_result,
|
2021-03-19 07:53:17 +00:00
|
|
|
summary,
|
2021-03-08 13:10:49 +00:00
|
|
|
) {
|
|
|
|
eprintln!("send tape backup notification failed: {}", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-15 06:55:13 +00:00
|
|
|
if let Err(err) = job.finish(status) {
|
|
|
|
eprintln!(
|
|
|
|
"could not finish job state for {}: {}",
|
|
|
|
job.jobtype().to_string(),
|
|
|
|
err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-02-24 17:12:11 +00:00
|
|
|
if let Err(err) = set_tape_device_state(&setup.drive, "") {
|
2021-02-18 14:40:28 +00:00
|
|
|
eprintln!(
|
|
|
|
"could not unset drive state for {}: {}",
|
2021-02-24 17:12:11 +00:00
|
|
|
setup.drive,
|
2021-02-18 14:40:28 +00:00
|
|
|
err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2021-02-15 06:55:13 +00:00
|
|
|
job_result
|
|
|
|
}
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(upid_str)
|
|
|
|
}
|
|
|
|
|
2021-02-15 10:09:30 +00:00
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
id: {
|
|
|
|
schema: JOB_ID_SCHEMA,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2021-03-05 10:40:52 +00:00
|
|
|
access: {
|
|
|
|
// Note: parameters are from job config, so we need to test inside function body
|
|
|
|
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
|
|
|
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
|
|
|
permission: &Permission::Anybody,
|
|
|
|
},
|
2021-02-15 10:09:30 +00:00
|
|
|
)]
|
|
|
|
/// Runs a tape backup job manually.
|
|
|
|
pub fn run_tape_backup_job(
|
|
|
|
id: String,
|
|
|
|
rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<String, Error> {
|
|
|
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
|
|
|
|
|
|
|
let (config, _digest) = config::tape_job::config()?;
|
|
|
|
let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
|
|
|
|
|
2021-03-05 10:40:52 +00:00
|
|
|
check_backup_permission(
|
|
|
|
&auth_id,
|
|
|
|
&backup_job.setup.store,
|
|
|
|
&backup_job.setup.pool,
|
|
|
|
&backup_job.setup.drive,
|
|
|
|
)?;
|
|
|
|
|
2021-02-15 10:09:30 +00:00
|
|
|
let job = Job::new("tape-backup-job", &id)?;
|
|
|
|
|
2021-02-24 17:12:11 +00:00
|
|
|
let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?;
|
2021-02-15 10:09:30 +00:00
|
|
|
|
|
|
|
Ok(upid_str)
|
|
|
|
}
|
|
|
|
|
2020-12-18 14:32:12 +00:00
|
|
|
#[api(
|
2021-02-24 12:37:33 +00:00
|
|
|
input: {
|
2020-12-18 14:32:12 +00:00
|
|
|
properties: {
|
2021-02-24 17:12:11 +00:00
|
|
|
setup: {
|
|
|
|
type: TapeBackupJobSetup,
|
|
|
|
flatten: true,
|
2021-02-24 12:37:33 +00:00
|
|
|
},
|
2020-12-18 14:32:12 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
returns: {
|
|
|
|
schema: UPID_SCHEMA,
|
|
|
|
},
|
2021-03-05 10:40:52 +00:00
|
|
|
access: {
|
|
|
|
// Note: parameters are no uri parameter, so we need to test inside function body
|
|
|
|
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
|
|
|
|
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
|
|
|
|
permission: &Permission::Anybody,
|
|
|
|
},
|
2020-12-18 14:32:12 +00:00
|
|
|
)]
|
|
|
|
/// Backup datastore to tape media pool
|
|
|
|
pub fn backup(
|
2021-02-24 17:12:11 +00:00
|
|
|
setup: TapeBackupJobSetup,
|
2020-12-18 14:32:12 +00:00
|
|
|
rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
|
|
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
|
|
|
|
2021-03-05 10:40:52 +00:00
|
|
|
check_backup_permission(
|
|
|
|
&auth_id,
|
|
|
|
&setup.store,
|
|
|
|
&setup.pool,
|
|
|
|
&setup.drive,
|
|
|
|
)?;
|
|
|
|
|
2021-02-24 17:12:11 +00:00
|
|
|
let datastore = DataStore::lookup_datastore(&setup.store)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let (config, _digest) = config::media_pool::config()?;
|
2021-02-24 17:12:11 +00:00
|
|
|
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let (drive_config, _digest) = config::drive::config()?;
|
2021-02-05 09:50:21 +00:00
|
|
|
|
|
|
|
// early check/lock before starting worker
|
2021-02-24 17:12:11 +00:00
|
|
|
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-18 13:12:27 +00:00
|
|
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
2020-12-30 18:01:39 +00:00
|
|
|
|
2021-02-24 17:12:11 +00:00
|
|
|
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
|
2021-02-15 06:55:13 +00:00
|
|
|
|
2021-03-08 13:10:49 +00:00
|
|
|
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
|
|
|
|
let email = lookup_user_email(notify_user);
|
|
|
|
|
2020-12-18 14:32:12 +00:00
|
|
|
let upid_str = WorkerTask::new_thread(
|
|
|
|
"tape-backup",
|
2021-02-15 06:55:13 +00:00
|
|
|
Some(job_id),
|
2020-12-18 14:32:12 +00:00
|
|
|
auth_id,
|
2020-12-30 18:01:39 +00:00
|
|
|
to_stdout,
|
2020-12-18 14:32:12 +00:00
|
|
|
move |worker| {
|
2021-02-05 09:50:21 +00:00
|
|
|
let _drive_lock = drive_lock; // keep lock guard
|
2021-02-24 17:12:11 +00:00
|
|
|
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
|
2021-03-19 07:53:17 +00:00
|
|
|
|
|
|
|
let (job_result, summary) = match backup_worker(
|
2021-02-24 12:37:33 +00:00
|
|
|
&worker,
|
|
|
|
datastore,
|
|
|
|
&pool_config,
|
2021-02-24 17:12:11 +00:00
|
|
|
&setup,
|
2021-03-08 13:10:49 +00:00
|
|
|
email.clone(),
|
2021-03-19 07:53:17 +00:00
|
|
|
) {
|
|
|
|
Ok(summary) => (Ok(()), summary),
|
|
|
|
Err(err) => (Err(err), Default::default()),
|
|
|
|
};
|
2021-03-08 13:10:49 +00:00
|
|
|
|
|
|
|
if let Some(email) = email {
|
|
|
|
if let Err(err) = crate::server::send_tape_backup_status(
|
|
|
|
&email,
|
|
|
|
None,
|
|
|
|
&setup,
|
|
|
|
&job_result,
|
2021-03-19 07:53:17 +00:00
|
|
|
summary,
|
2021-03-08 13:10:49 +00:00
|
|
|
) {
|
|
|
|
eprintln!("send tape backup notification failed: {}", err);
|
|
|
|
}
|
|
|
|
}
|
2021-02-24 12:37:33 +00:00
|
|
|
|
2021-02-18 14:40:28 +00:00
|
|
|
// ignore errors
|
2021-02-24 17:12:11 +00:00
|
|
|
let _ = set_tape_device_state(&setup.drive, "");
|
2021-03-08 13:10:49 +00:00
|
|
|
job_result
|
2020-12-18 14:32:12 +00:00
|
|
|
}
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(upid_str.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn backup_worker(
|
|
|
|
worker: &WorkerTask,
|
|
|
|
datastore: Arc<DataStore>,
|
|
|
|
pool_config: &MediaPoolConfig,
|
2021-02-24 17:12:11 +00:00
|
|
|
setup: &TapeBackupJobSetup,
|
2021-03-08 13:10:49 +00:00
|
|
|
email: Option<String>,
|
2021-03-19 07:53:17 +00:00
|
|
|
) -> Result<TapeBackupJobSummary, Error> {
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
2021-03-19 07:53:17 +00:00
|
|
|
let start = std::time::Instant::now();
|
|
|
|
let mut summary: TapeBackupJobSummary = Default::default();
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-29 08:07:55 +00:00
|
|
|
task_log!(worker, "update media online status");
|
2021-02-24 17:12:11 +00:00
|
|
|
let changer_name = update_media_online_status(&setup.drive)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-03-22 05:32:18 +00:00
|
|
|
let pool = MediaPool::with_config(status_path, &pool_config, changer_name, false)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-03-05 13:10:19 +00:00
|
|
|
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
|
|
|
|
|
|
|
group_list.sort_unstable();
|
|
|
|
|
2021-03-10 09:59:13 +00:00
|
|
|
let group_count = group_list.len();
|
|
|
|
task_log!(worker, "found {} groups", group_count);
|
|
|
|
|
|
|
|
let mut progress = StoreProgress::new(group_count as u64);
|
|
|
|
|
2021-02-24 17:12:11 +00:00
|
|
|
let latest_only = setup.latest_only.unwrap_or(false);
|
|
|
|
|
2021-02-24 12:37:33 +00:00
|
|
|
if latest_only {
|
|
|
|
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
|
|
|
}
|
2021-02-24 17:12:11 +00:00
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
let datastore_name = datastore.name();
|
|
|
|
|
2021-03-09 08:43:06 +00:00
|
|
|
let mut errors = false;
|
|
|
|
|
2021-03-10 09:59:13 +00:00
|
|
|
for (group_number, group) in group_list.into_iter().enumerate() {
|
|
|
|
progress.done_groups = group_number as u64;
|
|
|
|
progress.done_snapshots = 0;
|
|
|
|
progress.group_snapshots = 0;
|
|
|
|
|
2020-12-18 14:32:12 +00:00
|
|
|
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
2021-02-24 12:37:33 +00:00
|
|
|
|
2020-12-18 14:32:12 +00:00
|
|
|
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
|
|
|
|
2021-02-24 12:37:33 +00:00
|
|
|
if latest_only {
|
2021-03-10 09:59:13 +00:00
|
|
|
progress.group_snapshots = 1;
|
2021-02-24 12:37:33 +00:00
|
|
|
if let Some(info) = snapshot_list.pop() {
|
2021-03-16 11:52:49 +00:00
|
|
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
2021-03-11 07:43:13 +00:00
|
|
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
2021-02-24 12:37:33 +00:00
|
|
|
continue;
|
|
|
|
}
|
2021-03-19 07:53:17 +00:00
|
|
|
let snapshot_name = info.backup_dir.to_string();
|
2021-03-09 08:43:06 +00:00
|
|
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
|
|
|
errors = true;
|
2021-03-19 07:53:17 +00:00
|
|
|
} else {
|
|
|
|
summary.snapshot_list.push(snapshot_name);
|
2021-03-09 08:43:06 +00:00
|
|
|
}
|
2021-03-10 09:59:13 +00:00
|
|
|
progress.done_snapshots = 1;
|
|
|
|
task_log!(
|
|
|
|
worker,
|
|
|
|
"percentage done: {}",
|
|
|
|
progress
|
|
|
|
);
|
2021-02-24 12:37:33 +00:00
|
|
|
}
|
|
|
|
} else {
|
2021-03-10 09:59:13 +00:00
|
|
|
progress.group_snapshots = snapshot_list.len() as u64;
|
|
|
|
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
2021-03-16 11:52:49 +00:00
|
|
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
2021-03-11 07:43:13 +00:00
|
|
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
2021-02-24 12:37:33 +00:00
|
|
|
continue;
|
|
|
|
}
|
2021-03-19 07:53:17 +00:00
|
|
|
let snapshot_name = info.backup_dir.to_string();
|
2021-03-09 08:43:06 +00:00
|
|
|
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
|
|
|
|
errors = true;
|
2021-03-19 07:53:17 +00:00
|
|
|
} else {
|
|
|
|
summary.snapshot_list.push(snapshot_name);
|
2021-03-09 08:43:06 +00:00
|
|
|
}
|
2021-03-10 09:59:13 +00:00
|
|
|
progress.done_snapshots = snapshot_number as u64 + 1;
|
|
|
|
task_log!(
|
|
|
|
worker,
|
|
|
|
"percentage done: {}",
|
|
|
|
progress
|
|
|
|
);
|
2020-12-18 14:32:12 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pool_writer.commit()?;
|
|
|
|
|
2021-03-19 05:58:02 +00:00
|
|
|
task_log!(worker, "append media catalog");
|
|
|
|
|
|
|
|
let uuid = pool_writer.load_writable_media(worker)?;
|
|
|
|
let done = pool_writer.append_catalog_archive(worker)?;
|
|
|
|
if !done {
|
|
|
|
task_log!(worker, "catalog does not fit on tape, writing to next volume");
|
|
|
|
pool_writer.set_media_status_full(&uuid)?;
|
|
|
|
pool_writer.load_writable_media(worker)?;
|
|
|
|
let done = pool_writer.append_catalog_archive(worker)?;
|
|
|
|
if !done {
|
|
|
|
bail!("write_catalog_archive failed on second media");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-24 17:12:11 +00:00
|
|
|
if setup.export_media_set.unwrap_or(false) {
|
2021-01-10 10:59:55 +00:00
|
|
|
pool_writer.export_media_set(worker)?;
|
2021-02-24 17:12:11 +00:00
|
|
|
} else if setup.eject_media.unwrap_or(false) {
|
2021-01-12 08:16:16 +00:00
|
|
|
pool_writer.eject_media(worker)?;
|
2021-01-09 14:17:03 +00:00
|
|
|
}
|
|
|
|
|
2021-03-09 08:43:06 +00:00
|
|
|
if errors {
|
|
|
|
bail!("Tape backup finished with some errors. Please check the task log.");
|
|
|
|
}
|
|
|
|
|
2021-03-19 07:53:17 +00:00
|
|
|
summary.duration = start.elapsed();
|
|
|
|
|
|
|
|
Ok(summary)
|
2020-12-18 14:32:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Try to update the the media online status
|
2021-02-04 09:15:18 +00:00
|
|
|
fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let (config, _digest) = config::drive::config()?;
|
|
|
|
|
2021-01-07 13:26:43 +00:00
|
|
|
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-02-04 09:15:18 +00:00
|
|
|
let label_text_list = changer.online_media_label_texts()?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
|
|
|
let mut inventory = Inventory::load(status_path)?;
|
|
|
|
|
|
|
|
update_changer_online_status(
|
|
|
|
&config,
|
|
|
|
&mut inventory,
|
|
|
|
&changer_name,
|
2021-01-13 12:26:59 +00:00
|
|
|
&label_text_list,
|
2020-12-18 14:32:12 +00:00
|
|
|
)?;
|
|
|
|
|
2021-02-04 09:15:18 +00:00
|
|
|
Ok(Some(changer_name))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
2020-12-18 14:32:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn backup_snapshot(
|
|
|
|
worker: &WorkerTask,
|
|
|
|
pool_writer: &mut PoolWriter,
|
|
|
|
datastore: Arc<DataStore>,
|
|
|
|
snapshot: BackupDir,
|
2021-03-09 08:43:06 +00:00
|
|
|
) -> Result<bool, Error> {
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-03-11 07:43:13 +00:00
|
|
|
task_log!(worker, "backup snapshot {}", snapshot);
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-03-09 08:43:06 +00:00
|
|
|
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
|
|
|
|
Ok(reader) => reader,
|
|
|
|
Err(err) => {
|
|
|
|
// ignore missing snapshots and continue
|
|
|
|
task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err);
|
|
|
|
return Ok(false);
|
|
|
|
}
|
|
|
|
};
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-03-16 07:11:57 +00:00
|
|
|
let snapshot_reader = Arc::new(Mutex::new(snapshot_reader));
|
|
|
|
|
|
|
|
let (reader_thread, chunk_iter) = pool_writer.spawn_chunk_reader_thread(
|
|
|
|
datastore.clone(),
|
|
|
|
snapshot_reader.clone(),
|
|
|
|
)?;
|
|
|
|
|
|
|
|
let mut chunk_iter = chunk_iter.peekable();
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
loop {
|
2021-01-29 08:07:55 +00:00
|
|
|
worker.check_abort()?;
|
|
|
|
|
2020-12-18 14:32:12 +00:00
|
|
|
// test is we have remaining chunks
|
2021-03-16 07:11:57 +00:00
|
|
|
match chunk_iter.peek() {
|
|
|
|
None => break,
|
|
|
|
Some(Ok(_)) => { /* Ok */ },
|
|
|
|
Some(Err(err)) => bail!("{}", err),
|
2020-12-18 14:32:12 +00:00
|
|
|
}
|
|
|
|
|
2020-12-30 11:58:06 +00:00
|
|
|
let uuid = pool_writer.load_writable_media(worker)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-29 08:23:39 +00:00
|
|
|
worker.check_abort()?;
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
if leom {
|
|
|
|
pool_writer.set_media_status_full(&uuid)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-16 07:11:57 +00:00
|
|
|
if let Err(_) = reader_thread.join() {
|
|
|
|
bail!("chunk reader thread failed");
|
|
|
|
}
|
|
|
|
|
2021-01-29 08:07:55 +00:00
|
|
|
worker.check_abort()?;
|
|
|
|
|
2020-12-30 11:58:06 +00:00
|
|
|
let uuid = pool_writer.load_writable_media(worker)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-29 08:23:39 +00:00
|
|
|
worker.check_abort()?;
|
|
|
|
|
2021-03-16 07:11:57 +00:00
|
|
|
let snapshot_reader = snapshot_reader.lock().unwrap();
|
|
|
|
|
2021-01-12 08:16:16 +00:00
|
|
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
if !done {
|
|
|
|
// does not fit on tape, so we try on next volume
|
|
|
|
pool_writer.set_media_status_full(&uuid)?;
|
|
|
|
|
2021-01-29 08:07:55 +00:00
|
|
|
worker.check_abort()?;
|
|
|
|
|
2020-12-30 11:58:06 +00:00
|
|
|
pool_writer.load_writable_media(worker)?;
|
2021-01-12 08:16:16 +00:00
|
|
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
if !done {
|
|
|
|
bail!("write_snapshot_archive failed on second media");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-29 08:07:55 +00:00
|
|
|
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-03-09 08:43:06 +00:00
|
|
|
Ok(true)
|
2020-12-18 14:32:12 +00:00
|
|
|
}
|