2020-12-18 14:32:12 +00:00
|
|
|
use std::path::Path;
|
|
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
use anyhow::{bail, Error};
|
|
|
|
use serde_json::Value;
|
|
|
|
|
|
|
|
use proxmox::{
|
|
|
|
api::{
|
|
|
|
api,
|
|
|
|
RpcEnvironment,
|
2020-12-30 18:01:39 +00:00
|
|
|
RpcEnvironmentType,
|
2020-12-18 14:32:12 +00:00
|
|
|
Router,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
use crate::{
|
|
|
|
config::{
|
|
|
|
self,
|
|
|
|
drive::check_drive_exists,
|
|
|
|
},
|
|
|
|
backup::{
|
|
|
|
DataStore,
|
|
|
|
BackupDir,
|
|
|
|
BackupInfo,
|
|
|
|
},
|
|
|
|
api2::types::{
|
|
|
|
Authid,
|
|
|
|
DATASTORE_SCHEMA,
|
|
|
|
MEDIA_POOL_NAME_SCHEMA,
|
|
|
|
UPID_SCHEMA,
|
|
|
|
MediaPoolConfig,
|
|
|
|
},
|
|
|
|
server::WorkerTask,
|
|
|
|
tape::{
|
|
|
|
TAPE_STATUS_DIR,
|
|
|
|
Inventory,
|
|
|
|
PoolWriter,
|
|
|
|
MediaPool,
|
|
|
|
SnapshotReader,
|
2021-01-21 16:12:01 +00:00
|
|
|
drive::media_changer,
|
|
|
|
changer::update_changer_online_status,
|
2020-12-18 14:32:12 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
#[api(
|
|
|
|
input: {
|
|
|
|
properties: {
|
|
|
|
store: {
|
|
|
|
schema: DATASTORE_SCHEMA,
|
|
|
|
},
|
|
|
|
pool: {
|
|
|
|
schema: MEDIA_POOL_NAME_SCHEMA,
|
|
|
|
},
|
2021-01-09 14:17:03 +00:00
|
|
|
"eject-media": {
|
|
|
|
description: "Eject media upon job completion.",
|
|
|
|
type: bool,
|
|
|
|
optional: true,
|
|
|
|
},
|
2021-01-10 10:59:55 +00:00
|
|
|
"export-media-set": {
|
|
|
|
description: "Export media set upon job completion.",
|
|
|
|
type: bool,
|
|
|
|
optional: true,
|
|
|
|
},
|
2020-12-18 14:32:12 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
returns: {
|
|
|
|
schema: UPID_SCHEMA,
|
|
|
|
},
|
|
|
|
)]
|
|
|
|
/// Backup datastore to tape media pool
|
|
|
|
pub fn backup(
|
|
|
|
store: String,
|
|
|
|
pool: String,
|
2021-01-09 14:17:03 +00:00
|
|
|
eject_media: Option<bool>,
|
2021-01-10 10:59:55 +00:00
|
|
|
export_media_set: Option<bool>,
|
2020-12-18 14:32:12 +00:00
|
|
|
rpcenv: &mut dyn RpcEnvironment,
|
|
|
|
) -> Result<Value, Error> {
|
|
|
|
|
|
|
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
|
|
|
|
|
|
|
let datastore = DataStore::lookup_datastore(&store)?;
|
|
|
|
|
|
|
|
let (config, _digest) = config::media_pool::config()?;
|
|
|
|
let pool_config: MediaPoolConfig = config.lookup("pool", &pool)?;
|
|
|
|
|
|
|
|
let (drive_config, _digest) = config::drive::config()?;
|
|
|
|
// early check before starting worker
|
|
|
|
check_drive_exists(&drive_config, &pool_config.drive)?;
|
|
|
|
|
2021-01-18 13:12:27 +00:00
|
|
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
2020-12-30 18:01:39 +00:00
|
|
|
|
2021-01-09 14:17:03 +00:00
|
|
|
let eject_media = eject_media.unwrap_or(false);
|
2021-01-10 10:59:55 +00:00
|
|
|
let export_media_set = export_media_set.unwrap_or(false);
|
2021-01-09 14:17:03 +00:00
|
|
|
|
2020-12-18 14:32:12 +00:00
|
|
|
let upid_str = WorkerTask::new_thread(
|
|
|
|
"tape-backup",
|
2021-01-15 13:38:27 +00:00
|
|
|
Some(store),
|
2020-12-18 14:32:12 +00:00
|
|
|
auth_id,
|
2020-12-30 18:01:39 +00:00
|
|
|
to_stdout,
|
2020-12-18 14:32:12 +00:00
|
|
|
move |worker| {
|
2021-01-10 10:59:55 +00:00
|
|
|
backup_worker(&worker, datastore, &pool_config, eject_media, export_media_set)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
)?;
|
|
|
|
|
|
|
|
Ok(upid_str.into())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub const ROUTER: Router = Router::new()
|
|
|
|
.post(&API_METHOD_BACKUP);
|
|
|
|
|
|
|
|
|
|
|
|
fn backup_worker(
|
|
|
|
worker: &WorkerTask,
|
|
|
|
datastore: Arc<DataStore>,
|
|
|
|
pool_config: &MediaPoolConfig,
|
2021-01-09 14:17:03 +00:00
|
|
|
eject_media: bool,
|
2021-01-10 10:59:55 +00:00
|
|
|
export_media_set: bool,
|
2020-12-18 14:32:12 +00:00
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
|
|
|
|
|
|
|
let _lock = MediaPool::lock(status_path, &pool_config.name)?;
|
|
|
|
|
|
|
|
worker.log("update media online status");
|
2021-01-01 09:03:59 +00:00
|
|
|
let has_changer = update_media_online_status(&pool_config.drive)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-01 09:03:59 +00:00
|
|
|
let use_offline_media = !has_changer;
|
|
|
|
|
|
|
|
let pool = MediaPool::with_config(status_path, &pool_config, use_offline_media)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let mut pool_writer = PoolWriter::new(pool, &pool_config.drive)?;
|
|
|
|
|
|
|
|
let mut group_list = BackupInfo::list_backup_groups(&datastore.base_path())?;
|
|
|
|
|
|
|
|
group_list.sort_unstable();
|
|
|
|
|
|
|
|
for group in group_list {
|
|
|
|
let mut snapshot_list = group.list_backups(&datastore.base_path())?;
|
|
|
|
BackupInfo::sort_list(&mut snapshot_list, true); // oldest first
|
|
|
|
|
|
|
|
for info in snapshot_list {
|
|
|
|
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
worker.log(format!("backup snapshot {}", info.backup_dir));
|
|
|
|
backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pool_writer.commit()?;
|
|
|
|
|
2021-01-10 10:59:55 +00:00
|
|
|
if export_media_set {
|
|
|
|
pool_writer.export_media_set(worker)?;
|
|
|
|
} else if eject_media {
|
2021-01-12 08:16:16 +00:00
|
|
|
pool_writer.eject_media(worker)?;
|
2021-01-09 14:17:03 +00:00
|
|
|
}
|
|
|
|
|
2020-12-18 14:32:12 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// Try to update the the media online status
|
2021-01-01 09:03:59 +00:00
|
|
|
fn update_media_online_status(drive: &str) -> Result<bool, Error> {
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let (config, _digest) = config::drive::config()?;
|
|
|
|
|
2021-01-01 09:03:59 +00:00
|
|
|
let mut has_changer = false;
|
|
|
|
|
2021-01-07 13:26:43 +00:00
|
|
|
if let Ok(Some((mut changer, changer_name))) = media_changer(&config, drive) {
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-01 09:03:59 +00:00
|
|
|
has_changer = true;
|
|
|
|
|
2021-01-13 12:26:59 +00:00
|
|
|
let label_text_list = changer.online_media_label_texts()?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
let status_path = Path::new(TAPE_STATUS_DIR);
|
|
|
|
let mut inventory = Inventory::load(status_path)?;
|
|
|
|
|
|
|
|
update_changer_online_status(
|
|
|
|
&config,
|
|
|
|
&mut inventory,
|
|
|
|
&changer_name,
|
2021-01-13 12:26:59 +00:00
|
|
|
&label_text_list,
|
2020-12-18 14:32:12 +00:00
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
2021-01-01 09:03:59 +00:00
|
|
|
Ok(has_changer)
|
2020-12-18 14:32:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn backup_snapshot(
|
|
|
|
worker: &WorkerTask,
|
|
|
|
pool_writer: &mut PoolWriter,
|
|
|
|
datastore: Arc<DataStore>,
|
|
|
|
snapshot: BackupDir,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
|
|
|
worker.log(format!("start backup {}:{}", datastore.name(), snapshot));
|
|
|
|
|
|
|
|
let snapshot_reader = SnapshotReader::new(datastore.clone(), snapshot.clone())?;
|
|
|
|
|
|
|
|
let mut chunk_iter = snapshot_reader.chunk_iterator()?.peekable();
|
|
|
|
|
|
|
|
loop {
|
|
|
|
// test is we have remaining chunks
|
|
|
|
if chunk_iter.peek().is_none() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2020-12-30 11:58:06 +00:00
|
|
|
let uuid = pool_writer.load_writable_media(worker)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-11 12:22:31 +00:00
|
|
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
if leom {
|
|
|
|
pool_writer.set_media_status_full(&uuid)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-30 11:58:06 +00:00
|
|
|
let uuid = pool_writer.load_writable_media(worker)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
2021-01-12 08:16:16 +00:00
|
|
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
if !done {
|
|
|
|
// does not fit on tape, so we try on next volume
|
|
|
|
pool_writer.set_media_status_full(&uuid)?;
|
|
|
|
|
2020-12-30 11:58:06 +00:00
|
|
|
pool_writer.load_writable_media(worker)?;
|
2021-01-12 08:16:16 +00:00
|
|
|
let (done, _bytes) = pool_writer.append_snapshot_archive(worker, &snapshot_reader)?;
|
2020-12-18 14:32:12 +00:00
|
|
|
|
|
|
|
if !done {
|
|
|
|
bail!("write_snapshot_archive failed on second media");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
worker.log(format!("end backup {}:{}", datastore.name(), snapshot));
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|