proxmox-backup/src/api2/tape/backup.rs

558 lines
16 KiB
Rust
Raw Normal View History

2020-12-18 14:32:12 +00:00
use std::path::Path;
use std::sync::Arc;
use anyhow::{bail, format_err, Error};
2020-12-18 14:32:12 +00:00
use serde_json::Value;
use proxmox::{
api::{
api,
RpcEnvironment,
RpcEnvironmentType,
2020-12-18 14:32:12 +00:00
Router,
Permission,
2020-12-18 14:32:12 +00:00
},
};
use crate::{
task_log,
task_warn,
config::{
self,
cached_user_info::CachedUserInfo,
acl::{
2021-03-05 10:40:52 +00:00
PRIV_DATASTORE_READ,
PRIV_TAPE_AUDIT,
2021-03-05 10:40:52 +00:00
PRIV_TAPE_WRITE,
},
tape_job::{
TapeBackupJobConfig,
TapeBackupJobSetup,
TapeBackupJobStatus,
},
},
server::{
lookup_user_email,
jobstate::{
Job,
JobState,
compute_schedule_status,
},
},
2020-12-18 14:32:12 +00:00
backup::{
DataStore,
BackupDir,
BackupInfo,
StoreProgress,
2020-12-18 14:32:12 +00:00
},
api2::types::{
Authid,
UPID_SCHEMA,
JOB_ID_SCHEMA,
2020-12-18 14:32:12 +00:00
MediaPoolConfig,
Userid,
2020-12-18 14:32:12 +00:00
},
server::WorkerTask,
task::TaskState,
2020-12-18 14:32:12 +00:00
tape::{
TAPE_STATUS_DIR,
Inventory,
PoolWriter,
MediaPool,
SnapshotReader,
2021-02-05 09:50:21 +00:00
drive::{
media_changer,
lock_tape_device,
set_tape_device_state,
2021-02-05 09:50:21 +00:00
},
changer::update_changer_online_status,
2020-12-18 14:32:12 +00:00
},
};
const TAPE_BACKUP_JOB_ROUTER: Router = Router::new()
.post(&API_METHOD_RUN_TAPE_BACKUP_JOB);
pub const ROUTER: Router = Router::new()
.get(&API_METHOD_LIST_TAPE_BACKUP_JOBS)
.post(&API_METHOD_BACKUP)
.match_all("id", &TAPE_BACKUP_JOB_ROUTER);
2021-03-05 10:40:52 +00:00
fn check_backup_permission(
auth_id: &Authid,
store: &str,
pool: &str,
drive: &str,
) -> Result<(), Error> {
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(auth_id, &["datastore", store]);
if (privs & PRIV_DATASTORE_READ) == 0 {
bail!("no permissions on /datastore/{}", store);
}
let privs = user_info.lookup_privs(auth_id, &["tape", "drive", drive]);
if (privs & PRIV_TAPE_WRITE) == 0 {
bail!("no permissions on /tape/drive/{}", drive);
}
let privs = user_info.lookup_privs(auth_id, &["tape", "pool", pool]);
if (privs & PRIV_TAPE_WRITE) == 0 {
bail!("no permissions on /tape/pool/{}", pool);
}
Ok(())
}
#[api(
returns: {
description: "List configured thape backup jobs and their status",
type: Array,
items: { type: TapeBackupJobStatus },
},
access: {
description: "List configured tape jobs filtered by Tape.Audit privileges",
permission: &Permission::Anybody,
},
)]
/// List all tape backup jobs
pub fn list_tape_backup_jobs(
_param: Value,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<TapeBackupJobStatus>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
let (config, digest) = config::tape_job::config()?;
let job_list_iter = config
.convert_to_typed_array("backup")?
.into_iter()
.filter(|_job: &TapeBackupJobConfig| {
// fixme: check access permission
true
});
let mut list = Vec::new();
for job in job_list_iter {
let privs = user_info.lookup_privs(&auth_id, &["tape", "job", &job.id]);
if (privs & PRIV_TAPE_AUDIT) == 0 {
continue;
}
let last_state = JobState::load("tape-backup-job", &job.id)
.map_err(|err| format_err!("could not open statefile for {}: {}", &job.id, err))?;
let status = compute_schedule_status(&last_state, job.schedule.as_deref())?;
list.push(TapeBackupJobStatus { config: job, status });
}
rpcenv["digest"] = proxmox::tools::digest_to_hex(&digest).into();
Ok(list)
}
pub fn do_tape_backup_job(
mut job: Job,
setup: TapeBackupJobSetup,
auth_id: &Authid,
schedule: Option<String>,
) -> Result<String, Error> {
let job_id = format!("{}:{}:{}:{}",
setup.store,
setup.pool,
setup.drive,
job.jobname());
let worker_type = job.jobtype().to_string();
let datastore = DataStore::lookup_datastore(&setup.store)?;
let (config, _digest) = config::media_pool::config()?;
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
let (drive_config, _digest) = config::drive::config()?;
// early check/lock before starting worker
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
let upid_str = WorkerTask::new_thread(
&worker_type,
Some(job_id.clone()),
auth_id.clone(),
false,
move |worker| {
let _drive_lock = drive_lock; // keep lock guard
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
job.start(&worker.upid().to_string())?;
task_log!(worker,"Starting tape backup job '{}'", job_id);
if let Some(event_str) = schedule {
task_log!(worker,"task triggered by schedule '{}'", event_str);
}
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
let email = lookup_user_email(notify_user);
let job_result = backup_worker(
&worker,
datastore,
&pool_config,
&setup,
email.clone(),
);
let status = worker.create_state(&job_result);
if let Some(email) = email {
if let Err(err) = crate::server::send_tape_backup_status(
&email,
Some(job.jobname()),
&setup,
&job_result,
) {
eprintln!("send tape backup notification failed: {}", err);
}
}
if let Err(err) = job.finish(status) {
eprintln!(
"could not finish job state for {}: {}",
job.jobtype().to_string(),
err
);
}
if let Err(err) = set_tape_device_state(&setup.drive, "") {
eprintln!(
"could not unset drive state for {}: {}",
setup.drive,
err
);
}
job_result
}
)?;
Ok(upid_str)
}
#[api(
input: {
properties: {
id: {
schema: JOB_ID_SCHEMA,
},
},
},
2021-03-05 10:40:52 +00:00
access: {
// Note: parameters are from job config, so we need to test inside function body
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
permission: &Permission::Anybody,
},
)]
/// Runs a tape backup job manually.
pub fn run_tape_backup_job(
id: String,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let (config, _digest) = config::tape_job::config()?;
let backup_job: TapeBackupJobConfig = config.lookup("backup", &id)?;
2021-03-05 10:40:52 +00:00
check_backup_permission(
&auth_id,
&backup_job.setup.store,
&backup_job.setup.pool,
&backup_job.setup.drive,
)?;
let job = Job::new("tape-backup-job", &id)?;
let upid_str = do_tape_backup_job(job, backup_job.setup, &auth_id, None)?;
Ok(upid_str)
}
2020-12-18 14:32:12 +00:00
#[api(
input: {
2020-12-18 14:32:12 +00:00
properties: {
setup: {
type: TapeBackupJobSetup,
flatten: true,
},
2020-12-18 14:32:12 +00:00
},
},
returns: {
schema: UPID_SCHEMA,
},
2021-03-05 10:40:52 +00:00
access: {
// Note: parameters are no uri parameter, so we need to test inside function body
description: "The user needs Tape.Write privilege on /tape/pool/{pool} \
and /tape/drive/{drive}, Datastore.Read privilege on /datastore/{store}.",
permission: &Permission::Anybody,
},
2020-12-18 14:32:12 +00:00
)]
/// Backup datastore to tape media pool
pub fn backup(
setup: TapeBackupJobSetup,
2020-12-18 14:32:12 +00:00
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
2021-03-05 10:40:52 +00:00
check_backup_permission(
&auth_id,
&setup.store,
&setup.pool,
&setup.drive,
)?;
let datastore = DataStore::lookup_datastore(&setup.store)?;
2020-12-18 14:32:12 +00:00
let (config, _digest) = config::media_pool::config()?;
let pool_config: MediaPoolConfig = config.lookup("pool", &setup.pool)?;
2020-12-18 14:32:12 +00:00
let (drive_config, _digest) = config::drive::config()?;
2021-02-05 09:50:21 +00:00
// early check/lock before starting worker
let drive_lock = lock_tape_device(&drive_config, &setup.drive)?;
2020-12-18 14:32:12 +00:00
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let job_id = format!("{}:{}:{}", setup.store, setup.pool, setup.drive);
let notify_user = setup.notify_user.as_ref().unwrap_or_else(|| &Userid::root_userid());
let email = lookup_user_email(notify_user);
2020-12-18 14:32:12 +00:00
let upid_str = WorkerTask::new_thread(
"tape-backup",
Some(job_id),
2020-12-18 14:32:12 +00:00
auth_id,
to_stdout,
2020-12-18 14:32:12 +00:00
move |worker| {
2021-02-05 09:50:21 +00:00
let _drive_lock = drive_lock; // keep lock guard
set_tape_device_state(&setup.drive, &worker.upid().to_string())?;
let job_result = backup_worker(
&worker,
datastore,
&pool_config,
&setup,
email.clone(),
);
if let Some(email) = email {
if let Err(err) = crate::server::send_tape_backup_status(
&email,
None,
&setup,
&job_result,
) {
eprintln!("send tape backup notification failed: {}", err);
}
}
// ignore errors
let _ = set_tape_device_state(&setup.drive, "");
job_result
2020-12-18 14:32:12 +00:00
}
)?;
Ok(upid_str.into())
}
fn backup_worker(
worker: &WorkerTask,
datastore: Arc<DataStore>,
pool_config: &MediaPoolConfig,
setup: &TapeBackupJobSetup,
email: Option<String>,
2020-12-18 14:32:12 +00:00
) -> Result<(), Error> {
let status_path = Path::new(TAPE_STATUS_DIR);
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
task_log!(worker, "update media online status");
let changer_name = update_media_online_status(&setup.drive)?;
2020-12-18 14:32:12 +00:00
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
2020-12-18 14:32:12 +00:00
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email)?;
2020-12-18 14:32:12 +00:00
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
group_list.sort_unstable();
let group_count = group_list.len();
task_log!(worker, "found {} groups", group_count);
let mut progress = StoreProgress::new(group_count as u64);
let latest_only = setup.latest_only.unwrap_or(false);
if latest_only {
task_log!(worker, "latest-only: true (only considering latest snapshots)");
}
let mut errors = false;
for (group_number, group) in group_list.into_iter().enumerate() {
progress.done_groups = group_number as u64;
progress.done_snapshots = 0;
progress.group_snapshots = 0;
2020-12-18 14:32:12 +00:00
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
2020-12-18 14:32:12 +00:00
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
if latest_only {
progress.group_snapshots = 1;
if let Some(info) = snapshot_list.pop() {
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
2021-03-11 07:43:13 +00:00
task_log!(worker, "skip snapshot {}", info.backup_dir);
continue;
}
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true;
}
progress.done_snapshots = 1;
task_log!(
worker,
"percentage done: {}",
progress
);
}
} else {
progress.group_snapshots = snapshot_list.len() as u64;
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
2021-03-11 07:43:13 +00:00
task_log!(worker, "skip snapshot {}", info.backup_dir);
continue;
}
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true;
}
progress.done_snapshots = snapshot_number as u64 + 1;
task_log!(
worker,
"percentage done: {}",
progress
);
2020-12-18 14:32:12 +00:00
}
}
}
pool_writer.commit()?;
if setup.export_media_set.unwrap_or(false) {
pool_writer.export_media_set(worker)?;
} else if setup.eject_media.unwrap_or(false) {
pool_writer.eject_media(worker)?;
}
if errors {
bail!("Tape backup finished with some errors. Please check the task log.");
}
2020-12-18 14:32:12 +00:00
Ok(())
}
// Try to update the the media online status
fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
2020-12-18 14:32:12 +00:00
let (config, _digest) = config::drive::config()?;
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
2020-12-18 14:32:12 +00:00
let label_text_list = changer.online_media_label_texts()?;
2020-12-18 14:32:12 +00:00
let status_path = Path::new(TAPE_STATUS_DIR);
let mut inventory = Inventory::load(status_path)?;
update_changer_online_status(
&config,
&mut inventory,
&changer_name,
2021-01-13 12:26:59 +00:00
&label_text_list,
2020-12-18 14:32:12 +00:00
)?;
Ok(Some(changer_name))
} else {
Ok(None)
}
2020-12-18 14:32:12 +00:00
}
pub fn backup_snapshot(
worker: &WorkerTask,
pool_writer: &mut PoolWriter,
datastore: Arc<DataStore>,
snapshot: BackupDir,
) -> Result<bool, Error> {
2020-12-18 14:32:12 +00:00
2021-03-11 07:43:13 +00:00
task_log!(worker, "backup snapshot {}", snapshot);
2020-12-18 14:32:12 +00:00
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
Ok(reader) => reader,
Err(err) => {
// ignore missing snapshots and continue
task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err);
return Ok(false);
}
};
2020-12-18 14:32:12 +00:00
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
loop {
worker.check_abort()?;
2020-12-18 14:32:12 +00:00
// test is we have remaining chunks
if chunk_iter.peek().is_none() {
break;
}
2020-12-30 11:58:06 +00:00
let uuid = pool_writer.load_writable_media(worker)?;
2020-12-18 14:32:12 +00:00
worker.check_abort()?;
2021-01-11 12:22:31 +00:00
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
2020-12-18 14:32:12 +00:00
if leom {
pool_writer.set_media_status_full(&uuid)?;
}
}
worker.check_abort()?;
2020-12-30 11:58:06 +00:00
let uuid = pool_writer.load_writable_media(worker)?;
2020-12-18 14:32:12 +00:00
worker.check_abort()?;
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
2020-12-18 14:32:12 +00:00
if !done {
// does not fit on tape, so we try on next volume
pool_writer.set_media_status_full(&uuid)?;
worker.check_abort()?;
2020-12-30 11:58:06 +00:00
pool_writer.load_writable_media(worker)?;
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
2020-12-18 14:32:12 +00:00
if !done {
bail!("write_snapshot_archive failed on second media");
}
}
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
2020-12-18 14:32:12 +00:00
Ok(true)
2020-12-18 14:32:12 +00:00
}