proxmox-backup/src/api2/tape/backup.rs

253 lines
6.3 KiB
Rust
Raw Normal View History

2020-12-18 14:32:12 +00:00
use std::path::Path;
use std::sync::Arc;
use anyhow::{bail, Error};
use serde_json::Value;
use proxmox::{
api::{
api,
RpcEnvironment,
RpcEnvironmentType,
2020-12-18 14:32:12 +00:00
Router,
},
};
use crate::{
task_log,
2021-02-05 09:50:21 +00:00
config,
2020-12-18 14:32:12 +00:00
backup::{
DataStore,
BackupDir,
BackupInfo,
},
api2::types::{
Authid,
DATASTORE_SCHEMA,
MEDIA_POOL_NAME_SCHEMA,
2021-02-01 08:14:28 +00:00
DRIVE_NAME_SCHEMA,
2020-12-18 14:32:12 +00:00
UPID_SCHEMA,
MediaPoolConfig,
},
server::WorkerTask,
task::TaskState,
2020-12-18 14:32:12 +00:00
tape::{
TAPE_STATUS_DIR,
Inventory,
PoolWriter,
MediaPool,
SnapshotReader,
2021-02-05 09:50:21 +00:00
drive::{
media_changer,
lock_tape_device,
},
changer::update_changer_online_status,
2020-12-18 14:32:12 +00:00
},
};
#[api(
input: {
properties: {
store: {
schema: DATASTORE_SCHEMA,
},
pool: {
schema: MEDIA_POOL_NAME_SCHEMA,
},
2021-02-01 08:14:28 +00:00
drive: {
schema: DRIVE_NAME_SCHEMA,
},
"eject-media": {
description: "Eject media upon job completion.",
type: bool,
optional: true,
},
"export-media-set": {
description: "Export media set upon job completion.",
type: bool,
optional: true,
},
2020-12-18 14:32:12 +00:00
},
},
returns: {
schema: UPID_SCHEMA,
},
)]
/// Backup datastore to tape media pool
pub fn backup(
store: String,
pool: String,
2021-02-01 08:14:28 +00:00
drive: String,
eject_media: Option<bool>,
export_media_set: Option<bool>,
2020-12-18 14:32:12 +00:00
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store)?;
let (config, _digest) = config::media_pool::config()?;
let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
let (drive_config, _digest) = config::drive::config()?;
2021-02-05 09:50:21 +00:00
// early check/lock before starting worker
let drive_lock = lock_tape_device(&drive_config, &drive)?;
2020-12-18 14:32:12 +00:00
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
let eject_media = eject_media.unwrap_or(false);
let export_media_set = export_media_set.unwrap_or(false);
2020-12-18 14:32:12 +00:00
let upid_str = WorkerTask::new_thread(
"tape-backup",
Some(store),
2020-12-18 14:32:12 +00:00
auth_id,
to_stdout,
2020-12-18 14:32:12 +00:00
move |worker| {
2021-02-05 09:50:21 +00:00
let _drive_lock = drive_lock; // keep lock guard
2021-02-01 08:14:28 +00:00
backup_worker(&worker, datastore, &drive, &pool_config, eject_media, export_media_set)?;
2020-12-18 14:32:12 +00:00
Ok(())
}
)?;
Ok(upid_str.into())
}
pub const ROUTER: Router = Router::new()
.post(&API_METHOD_BACKUP);
fn backup_worker(
worker: &WorkerTask,
datastore: Arc<DataStore>,
2021-02-01 08:14:28 +00:00
drive: &str,
2020-12-18 14:32:12 +00:00
pool_config: &MediaPoolConfig,
eject_media: bool,
export_media_set: bool,
2020-12-18 14:32:12 +00:00
) -> Result<(), Error> {
let status_path = Path::new(TAPE_STATUS_DIR);
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
task_log!(worker, "update media online status");
let changer_name = update_media_online_status(drive)?;
2020-12-18 14:32:12 +00:00
let pool = MediaPool::with_config(status_path, &pool_config, changer_name)?;
2020-12-18 14:32:12 +00:00
2021-02-01 08:14:28 +00:00
let mut pool_writer = PoolWriter::new(pool, drive)?;
2020-12-18 14:32:12 +00:00
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
group_list.sort_unstable();
for group in group_list {
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
for info in snapshot_list {
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
continue;
}
task_log!(worker, "backup snapshot {}", info.backup_dir);
2020-12-18 14:32:12 +00:00
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
}
}
pool_writer.commit()?;
if export_media_set {
pool_writer.export_media_set(worker)?;
} else if eject_media {
pool_writer.eject_media(worker)?;
}
2020-12-18 14:32:12 +00:00
Ok(())
}
// Try to update the the media online status
fn update_media_online_status(drive: &str) -> Result<Option<String>, Error> {
2020-12-18 14:32:12 +00:00
let (config, _digest) = config::drive::config()?;
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
2020-12-18 14:32:12 +00:00
let label_text_list = changer.online_media_label_texts()?;
2020-12-18 14:32:12 +00:00
let status_path = Path::new(TAPE_STATUS_DIR);
let mut inventory = Inventory::load(status_path)?;
update_changer_online_status(
&config,
&mut inventory,
&changer_name,
2021-01-13 12:26:59 +00:00
&label_text_list,
2020-12-18 14:32:12 +00:00
)?;
Ok(Some(changer_name))
} else {
Ok(None)
}
2020-12-18 14:32:12 +00:00
}
pub fn backup_snapshot(
worker: &WorkerTask,
pool_writer: &mut PoolWriter,
datastore: Arc<DataStore>,
snapshot: BackupDir,
) -> Result<(), Error> {
task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
2020-12-18 14:32:12 +00:00
let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
loop {
worker.check_abort()?;
2020-12-18 14:32:12 +00:00
// test is we have remaining chunks
if chunk_iter.peek().is_none() {
break;
}
2020-12-30 11:58:06 +00:00
let uuid = pool_writer.load_writable_media(worker)?;
2020-12-18 14:32:12 +00:00
worker.check_abort()?;
2021-01-11 12:22:31 +00:00
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
2020-12-18 14:32:12 +00:00
if leom {
pool_writer.set_media_status_full(&uuid)?;
}
}
worker.check_abort()?;
2020-12-30 11:58:06 +00:00
let uuid = pool_writer.load_writable_media(worker)?;
2020-12-18 14:32:12 +00:00
worker.check_abort()?;
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
2020-12-18 14:32:12 +00:00
if !done {
// does not fit on tape, so we try on next volume
pool_writer.set_media_status_full(&uuid)?;
worker.check_abort()?;
2020-12-30 11:58:06 +00:00
pool_writer.load_writable_media(worker)?;
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
2020-12-18 14:32:12 +00:00
if !done {
bail!("write_snapshot_archive failed on second media");
}
}
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot);
2020-12-18 14:32:12 +00:00
Ok(())
}