tape: add restore code, implement catalog api/command

This commit is contained in:
Dietmar Maurer 2020-12-30 09:48:18 +01:00
parent 9e8c0d2e33
commit b017bbc441
4 changed files with 596 additions and 13 deletions

View File

@ -23,25 +23,29 @@ use crate::{
self,
drive::check_drive_exists,
},
api2::types::{
UPID_SCHEMA,
DRIVE_NAME_SCHEMA,
MEDIA_LABEL_SCHEMA,
MEDIA_POOL_NAME_SCHEMA,
Authid,
LinuxTapeDrive,
ScsiTapeChanger,
TapeDeviceInfo,
MediaIdFlat,
LabelUuidMap,
MamAttribute,
LinuxDriveAndMediaStatus,
api2::{
types::{
UPID_SCHEMA,
DRIVE_NAME_SCHEMA,
MEDIA_LABEL_SCHEMA,
MEDIA_POOL_NAME_SCHEMA,
Authid,
LinuxTapeDrive,
ScsiTapeChanger,
TapeDeviceInfo,
MediaIdFlat,
LabelUuidMap,
MamAttribute,
LinuxDriveAndMediaStatus,
},
tape::restore::restore_media,
},
server::WorkerTask,
tape::{
TAPE_STATUS_DIR,
TapeDriver,
MediaChange,
MediaPool,
Inventory,
MediaStateDatabase,
MediaCatalog,
@ -836,6 +840,106 @@ pub fn status(drive: String) -> Result<LinuxDriveAndMediaStatus, Error> {
handle.get_drive_and_media_status()
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
},
force: {
description: "Force overriding existing index.",
type: bool,
optional: true,
},
verbose: {
description: "Verbose mode - log all found chunks.",
type: bool,
optional: true,
},
},
},
returns: {
schema: UPID_SCHEMA,
},
)]
/// Scan media and record content
pub fn catalog_media(
drive: String,
force: Option<bool>,
verbose: Option<bool>,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let verbose = verbose.unwrap_or(false);
let force = force.unwrap_or(false);
let (config, _digest) = config::drive::config()?;
check_drive_exists(&config, &drive)?; // early check before starting worker
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let upid_str = WorkerTask::new_thread(
"catalog-media",
Some(drive.clone()),
auth_id,
true,
move |worker| {
let mut drive = open_drive(&config, &drive)?;
drive.rewind()?;
let media_id = match drive.read_label()? {
Some(media_id) => {
worker.log(format!(
"found media label: {}",
serde_json::to_string_pretty(&serde_json::to_value(&media_id)?)?
));
media_id
},
None => bail!("media is empty (no media label found)"),
};
let status_path = Path::new(TAPE_STATUS_DIR);
let mut inventory = Inventory::load(status_path)?;
inventory.store(media_id.clone())?;
let pool = match media_id.media_set_label {
None => {
worker.log("media is empty");
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
return Ok(());
}
Some(ref set) => {
if set.uuid.as_ref() == [0u8;16] { // media is empty
worker.log("media is empty");
MediaCatalog::destroy(status_path, &media_id.label.uuid)?;
return Ok(());
}
set.pool.clone()
}
};
let _lock = MediaPool::lock(status_path, &pool)?;
if MediaCatalog::exists(status_path, &media_id.label.uuid) {
if !force {
bail!("media catalog exists (please use --force to overwrite)");
}
}
restore_media(&worker, &mut drive, &media_id, None, verbose)?;
Ok(())
}
)?;
Ok(upid_str.into())
}
#[sortable]
pub const SUBDIRS: SubdirMap = &sorted!([
(
@ -843,6 +947,11 @@ pub const SUBDIRS: SubdirMap = &sorted!([
&Router::new()
.put(&API_METHOD_BARCODE_LABEL_MEDIA)
),
(
"catalog",
&Router::new()
.put(&API_METHOD_CATALOG_MEDIA)
),
(
"eject-media",
&Router::new()

View File

@ -6,6 +6,7 @@ pub mod drive;
pub mod changer;
pub mod media;
pub mod backup;
pub mod restore;
pub const SUBDIRS: SubdirMap = &[
("backup", &backup::ROUTER),

422
src/api2/tape/restore.rs Normal file
View File

@ -0,0 +1,422 @@
use std::path::Path;
use std::ffi::OsStr;
use std::convert::TryFrom;
use anyhow::{bail, format_err, Error};
use proxmox::{
tools::{
Uuid,
io::ReadExt,
fs::{
replace_file,
CreateOptions,
},
},
api::section_config::SectionConfigData,
};
use crate::{
tools::compute_file_csum,
api2::types::Authid,
backup::{
archive_type,
MANIFEST_BLOB_NAME,
CryptMode,
DataStore,
BackupDir,
DataBlob,
BackupManifest,
ArchiveType,
IndexFile,
DynamicIndexReader,
FixedIndexReader,
},
server::WorkerTask,
tape::{
TAPE_STATUS_DIR,
TapeRead,
MediaId,
MediaCatalog,
ChunkArchiveDecoder,
TapeDriver,
request_and_load_media,
file_formats::{
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
MediaContentHeader,
},
},
};
/// Request and restore complete media without using existing catalog (create catalog instead)
pub fn request_and_restore_media(
worker: &WorkerTask,
media_id: &MediaId,
drive_config: &SectionConfigData,
drive_name: &str,
datastore: &DataStore,
authid: &Authid,
) -> Result<(), Error> {
let media_set_uuid = match media_id.media_set_label {
None => bail!("restore_media: no media set - internal error"),
Some(ref set) => &set.uuid,
};
let (mut drive, info) = request_and_load_media(&drive_config, &drive_name, &media_id.label)?;
match info.media_set_label {
None => {
bail!("missing media set label on media {} ({})",
media_id.label.changer_id, media_id.label.uuid);
}
Some(ref set) => {
if &set.uuid != media_set_uuid {
bail!("wrong media set label on media {} ({} != {})",
media_id.label.changer_id, media_id.label.uuid,
media_set_uuid);
}
}
}
restore_media(worker, &mut drive, &info, Some((datastore, authid)), false)
}
/// Restore complete media content and catalog
///
/// Only create the catalog if target is None.
pub fn restore_media(
worker: &WorkerTask,
drive: &mut Box<dyn TapeDriver>,
media_id: &MediaId,
target: Option<(&DataStore, &Authid)>,
verbose: bool,
) -> Result<(), Error> {
let status_path = Path::new(TAPE_STATUS_DIR);
let mut catalog = MediaCatalog::create_temporary_database(status_path, media_id, false)?;
loop {
let current_file_number = drive.current_file_number()?;
let reader = match drive.read_next_file()? {
None => {
worker.log(format!("detected EOT after {} files", current_file_number));
break;
}
Some(reader) => reader,
};
let target = target.clone();
restore_archive(worker, reader, current_file_number, target, &mut catalog, verbose)?;
}
MediaCatalog::finish_temporary_database(status_path, &media_id.label.uuid, true)?;
Ok(())
}
fn restore_archive<'a>(
worker: &WorkerTask,
mut reader: Box<dyn 'a + TapeRead>,
current_file_number: u64,
target: Option<(&DataStore, &Authid)>,
catalog: &mut MediaCatalog,
verbose: bool,
) -> Result<(), Error> {
let header: MediaContentHeader = unsafe { reader.read_le_value()? };
if header.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
bail!("missing MediaContentHeader");
}
//println!("Found MediaContentHeader: {:?}", header);
match header.content_magic {
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0 | PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0 => {
bail!("unexpected content magic (label)");
}
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
let snapshot = reader.read_exact_allocated(header.size as usize)?;
let snapshot = std::str::from_utf8(&snapshot)
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
worker.log(format!("Found snapshot archive: {} {}", current_file_number, snapshot));
let backup_dir: BackupDir = snapshot.parse()?;
if let Some((datastore, authid)) = target.as_ref() {
let (owner, _group_lock) = datastore.create_locked_backup_group(backup_dir.group(), authid)?;
if *authid != &owner { // only the owner is allowed to create additional snapshots
bail!("restore '{}' failed - owner check failed ({} != {})", snapshot, authid, owner);
}
let (rel_path, is_new, _snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
let mut path = datastore.base_path();
path.push(rel_path);
if is_new {
worker.log(format!("restore snapshot {}", backup_dir));
match restore_snapshot_archive(reader, &path) {
Err(err) => {
std::fs::remove_dir_all(&path)?;
bail!("restore snapshot {} failed - {}", backup_dir, err);
}
Ok(false) => {
std::fs::remove_dir_all(&path)?;
worker.log(format!("skip incomplete snapshot {}", backup_dir));
}
Ok(true) => {
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
catalog.commit_if_large()?;
}
}
return Ok(());
}
}
reader.skip_to_end()?; // read all data
if let Ok(false) = reader.is_incomplete() {
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
catalog.commit_if_large()?;
}
}
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
worker.log(format!("Found chunk archive: {}", current_file_number));
let datastore = target.as_ref().map(|t| t.0);
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
for digest in chunks.iter() {
catalog.register_chunk(&digest)?;
}
worker.log(format!("register {} chunks", chunks.len()));
catalog.end_chunk_archive()?;
catalog.commit_if_large()?;
}
}
_ => bail!("unknown content magic {:?}", header.content_magic),
}
catalog.commit()?;
Ok(())
}
fn restore_chunk_archive<'a>(
worker: &WorkerTask,
reader: Box<dyn 'a + TapeRead>,
datastore: Option<&DataStore>,
verbose: bool,
) -> Result<Option<Vec<[u8;32]>>, Error> {
let mut chunks = Vec::new();
let mut decoder = ChunkArchiveDecoder::new(reader);
let result: Result<_, Error> = proxmox::try_block!({
loop {
match decoder.next_chunk()? {
Some((digest, blob)) => {
if let Some(datastore) = datastore {
let chunk_exists = datastore.cond_touch_chunk(&digest, false)?;
if !chunk_exists {
blob.verify_crc()?;
if blob.crypt_mode()? == CryptMode::None {
blob.decode(None, Some(&digest))?; // verify digest
}
if verbose {
worker.log(format!("Insert chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
datastore.insert_chunk(&blob, &digest)?;
} else {
if verbose {
worker.log(format!("Found existing chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
}
} else {
if verbose {
worker.log(format!("Found chunk: {}", proxmox::tools::digest_to_hex(&digest)));
}
}
chunks.push(digest);
}
None => break,
}
}
Ok(())
});
match result {
Ok(()) => Ok(Some(chunks)),
Err(err) => {
let reader = decoder.reader();
// check if this stream is marked incomplete
if let Ok(true) = reader.is_incomplete() {
return Ok(Some(chunks));
}
// check if this is an aborted stream without end marker
if let Ok(false) = reader.has_end_marker() {
worker.log(format!("missing stream end marker"));
return Ok(None);
}
// else the archive is corrupt
Err(err)
}
}
}
fn restore_snapshot_archive<'a>(
reader: Box<dyn 'a + TapeRead>,
snapshot_path: &Path,
) -> Result<bool, Error> {
let mut decoder = pxar::decoder::sync::Decoder::from_std(reader)?;
match try_restore_snapshot_archive(&mut decoder, snapshot_path) {
Ok(()) => return Ok(true),
Err(err) => {
let reader = decoder.input();
// check if this stream is marked incomplete
if let Ok(true) = reader.is_incomplete() {
return Ok(false);
}
// check if this is an aborted stream without end marker
if let Ok(false) = reader.has_end_marker() {
return Ok(false);
}
// else the archive is corrupt
return Err(err);
}
}
}
fn try_restore_snapshot_archive<R: pxar::decoder::SeqRead>(
decoder: &mut pxar::decoder::sync::Decoder<R>,
snapshot_path: &Path,
) -> Result<(), Error> {
let _root = match decoder.next() {
None => bail!("missing root entry"),
Some(root) => {
let root = root?;
match root.kind() {
pxar::EntryKind::Directory => { /* Ok */ }
_ => bail!("wrong root entry type"),
}
root
}
};
let root_path = Path::new("/");
let manifest_file_name = OsStr::new(MANIFEST_BLOB_NAME);
let mut manifest = None;
loop {
let entry = match decoder.next() {
None => break,
Some(entry) => entry?,
};
let entry_path = entry.path();
match entry.kind() {
pxar::EntryKind::File { .. } => { /* Ok */ }
_ => bail!("wrong entry type for {:?}", entry_path),
}
match entry_path.parent() {
None => bail!("wrong parent for {:?}", entry_path),
Some(p) => {
if p != root_path {
bail!("wrong parent for {:?}", entry_path);
}
}
}
let filename = entry.file_name();
let mut contents = match decoder.contents() {
None => bail!("missing file content"),
Some(contents) => contents,
};
let mut archive_path = snapshot_path.to_owned();
archive_path.push(&filename);
let mut tmp_path = archive_path.clone();
tmp_path.set_extension("tmp");
if filename == manifest_file_name {
let blob = DataBlob::load_from_reader(&mut contents)?;
let options = CreateOptions::new();
replace_file(&tmp_path, blob.raw_data(), options)?;
manifest = Some(BackupManifest::try_from(blob)?);
} else {
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
.create(true)
.read(true)
.open(&tmp_path)
.map_err(|err| format_err!("restore {:?} failed - {}", tmp_path, err))?;
std::io::copy(&mut contents, &mut tmpfile)?;
if let Err(err) = std::fs::rename(&tmp_path, &archive_path) {
bail!("Atomic rename file {:?} failed - {}", archive_path, err);
}
}
}
let manifest = match manifest {
None => bail!("missing manifest"),
Some(manifest) => manifest,
};
for item in manifest.files() {
let mut archive_path = snapshot_path.to_owned();
archive_path.push(&item.filename);
match archive_type(&item.filename)? {
ArchiveType::DynamicIndex => {
let index = DynamicIndexReader::open(&archive_path)?;
let (csum, size) = index.compute_csum();
manifest.verify_file(&item.filename, &csum, size)?;
}
ArchiveType::FixedIndex => {
let index = FixedIndexReader::open(&archive_path)?;
let (csum, size) = index.compute_csum();
manifest.verify_file(&item.filename, &csum, size)?;
}
ArchiveType::Blob => {
let mut tmpfile = std::fs::File::open(&archive_path)?;
let (csum, size) = compute_file_csum(&mut tmpfile)?;
manifest.verify_file(&item.filename, &csum, size)?;
}
}
}
// commit manifest
let mut manifest_path = snapshot_path.to_owned();
manifest_path.push(MANIFEST_BLOB_NAME);
let mut tmp_manifest_path = manifest_path.clone();
tmp_manifest_path.set_extension("tmp");
if let Err(err) = std::fs::rename(&tmp_manifest_path, &manifest_path) {
bail!("Atomic rename manifest {:?} failed - {}", manifest_path, err);
}
Ok(())
}

View File

@ -632,6 +632,52 @@ async fn backup(
Ok(())
}
#[api(
input: {
properties: {
drive: {
schema: DRIVE_NAME_SCHEMA,
optional: true,
},
force: {
description: "Force overriding existing index.",
type: bool,
optional: true,
},
verbose: {
description: "Verbose mode - log all found chunks.",
type: bool,
optional: true,
},
"output-format": {
schema: OUTPUT_FORMAT,
optional: true,
},
},
},
)]
/// Scan media and record content
async fn catalog_media(
mut param: Value,
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let (config, _digest) = config::drive::config()?;
param["drive"] = lookup_drive_name(&param, &config)?.into();
let info = &api2::tape::drive::API_METHOD_CATALOG_MEDIA;
let result = match info.handler {
ApiHandler::Sync(handler) => (handler)(param, info, rpcenv)?,
_ => unreachable!(),
};
wait_for_local_worker(result.as_str().unwrap()).await?;
Ok(())
}
fn main() {
let cmd_def = CliCommandMap::new()
@ -688,6 +734,11 @@ fn main() {
CliCommand::new(&API_METHOD_READ_LABEL)
.completion_cb("drive", complete_drive_name)
)
.insert(
"catalog",
CliCommand::new(&API_METHOD_CATALOG_MEDIA)
.completion_cb("drive", complete_drive_name)
)
.insert(
"cartridge-memory",
CliCommand::new(&API_METHOD_CARTRIDGE_MEMORY)