tape: rust fmt

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2022-04-10 17:49:03 +02:00
parent 429bc9d0a2
commit 4de1c42c20
29 changed files with 1183 additions and 1116 deletions

View File

@ -1,15 +1,14 @@
use anyhow::Error;
use serde_json::Value;
use ::serde::{Deserialize, Serialize};
use anyhow::Error;
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, TapeBackupJobConfig, TapeBackupJobConfigUpdater,
JOB_ID_SCHEMA, PROXMOX_CONFIG_DIGEST_SCHEMA,
PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
Authid, TapeBackupJobConfig, TapeBackupJobConfigUpdater, JOB_ID_SCHEMA, PRIV_TAPE_AUDIT,
PRIV_TAPE_MODIFY, PROXMOX_CONFIG_DIGEST_SCHEMA,
};
use pbs_config::CachedUserInfo;
@ -107,7 +106,6 @@ pub fn read_tape_backup_job(
id: String,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<TapeBackupJobConfig, Error> {
let (config, digest) = pbs_config::tape_job::config()?;
let job = config.lookup("backup", &id)?;
@ -188,29 +186,61 @@ pub fn update_tape_backup_job(
if let Some(delete) = delete {
for delete_prop in delete {
match delete_prop {
DeletableProperty::EjectMedia => { data.setup.eject_media = None; },
DeletableProperty::ExportMediaSet => { data.setup.export_media_set = None; },
DeletableProperty::LatestOnly => { data.setup.latest_only = None; },
DeletableProperty::NotifyUser => { data.setup.notify_user = None; },
DeletableProperty::Schedule => { data.schedule = None; },
DeletableProperty::Comment => { data.comment = None; },
DeletableProperty::GroupFilter => { data.setup.group_filter = None; },
DeletableProperty::EjectMedia => {
data.setup.eject_media = None;
}
DeletableProperty::ExportMediaSet => {
data.setup.export_media_set = None;
}
DeletableProperty::LatestOnly => {
data.setup.latest_only = None;
}
DeletableProperty::NotifyUser => {
data.setup.notify_user = None;
}
DeletableProperty::Schedule => {
data.schedule = None;
}
DeletableProperty::Comment => {
data.comment = None;
}
DeletableProperty::GroupFilter => {
data.setup.group_filter = None;
}
}
}
}
if let Some(store) = update.setup.store { data.setup.store = store; }
if let Some(pool) = update.setup.pool { data.setup.pool = pool; }
if let Some(drive) = update.setup.drive { data.setup.drive = drive; }
if let Some(store) = update.setup.store {
data.setup.store = store;
}
if let Some(pool) = update.setup.pool {
data.setup.pool = pool;
}
if let Some(drive) = update.setup.drive {
data.setup.drive = drive;
}
if update.setup.eject_media.is_some() { data.setup.eject_media = update.setup.eject_media; };
if update.setup.export_media_set.is_some() { data.setup.export_media_set = update.setup.export_media_set; }
if update.setup.latest_only.is_some() { data.setup.latest_only = update.setup.latest_only; }
if update.setup.notify_user.is_some() { data.setup.notify_user = update.setup.notify_user; }
if update.setup.group_filter.is_some() { data.setup.group_filter = update.setup.group_filter; }
if update.setup.eject_media.is_some() {
data.setup.eject_media = update.setup.eject_media;
};
if update.setup.export_media_set.is_some() {
data.setup.export_media_set = update.setup.export_media_set;
}
if update.setup.latest_only.is_some() {
data.setup.latest_only = update.setup.latest_only;
}
if update.setup.notify_user.is_some() {
data.setup.notify_user = update.setup.notify_user;
}
if update.setup.group_filter.is_some() {
data.setup.group_filter = update.setup.group_filter;
}
let schedule_changed = data.schedule != update.schedule;
if update.schedule.is_some() { data.schedule = update.schedule; }
if update.schedule.is_some() {
data.schedule = update.schedule;
}
if let Some(comment) = update.comment {
let comment = comment.trim();
@ -267,8 +297,10 @@ pub fn delete_tape_backup_job(
match config.lookup::<TapeBackupJobConfig>("backup", &id) {
Ok(_job) => {
config.sections.remove(&id);
},
Err(_) => { http_bail!(NOT_FOUND, "job '{}' does not exist.", id) },
}
Err(_) => {
http_bail!(NOT_FOUND, "job '{}' does not exist.", id)
}
};
pbs_config::tape_job::save_config(&config)?;

View File

@ -1,15 +1,13 @@
use anyhow::{format_err, bail, Error};
use serde_json::Value;
use anyhow::{bail, format_err, Error};
use hex::FromHex;
use serde_json::Value;
use proxmox_router::{http_bail, ApiMethod, Router, RpcEnvironment, Permission};
use proxmox_router::{http_bail, ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::{api, param_bail};
use pbs_api_types::{
Authid, Fingerprint, KeyInfo, Kdf,
TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
PROXMOX_CONFIG_DIGEST_SCHEMA, PASSWORD_HINT_SCHEMA,
PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
Authid, Fingerprint, Kdf, KeyInfo, PASSWORD_HINT_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_MODIFY,
PROXMOX_CONFIG_DIGEST_SCHEMA, TAPE_ENCRYPTION_KEY_FINGERPRINT_SCHEMA,
};
use pbs_config::CachedUserInfo;
@ -17,12 +15,7 @@ use pbs_config::CachedUserInfo;
use pbs_config::key_config::KeyConfig;
use pbs_config::open_backup_lockfile;
use pbs_config::tape_encryption_keys::{
TAPE_KEYS_LOCKFILE,
load_keys,
load_key_configs,
save_keys,
save_key_configs,
insert_key,
insert_key, load_key_configs, load_keys, save_key_configs, save_keys, TAPE_KEYS_LOCKFILE,
};
#[api(
@ -44,7 +37,6 @@ pub fn list_keys(
_info: &ApiMethod,
mut rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<KeyInfo>, Error> {
let (key_map, digest) = load_key_configs()?;
let mut list = Vec::new();
@ -106,13 +98,15 @@ pub fn change_passphrase(
force: bool,
fingerprint: Fingerprint,
digest: Option<String>,
rpcenv: &mut dyn RpcEnvironment
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let kdf = kdf.unwrap_or_default();
if let Kdf::None = kdf {
param_bail!("kdf", format_err!("Please specify a key derivation function (none is not allowed here)."));
param_bail!(
"kdf",
format_err!("Please specify a key derivation function (none is not allowed here).")
);
}
let _lock = open_backup_lockfile(TAPE_KEYS_LOCKFILE, None, true)?;
@ -126,7 +120,11 @@ pub fn change_passphrase(
let key_config = match config_map.get(&fingerprint) {
Some(key_config) => key_config,
None => http_bail!(NOT_FOUND, "tape encryption key configuration '{}' does not exist.", fingerprint),
None => http_bail!(
NOT_FOUND,
"tape encryption key configuration '{}' does not exist.",
fingerprint
),
};
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -137,13 +135,23 @@ pub fn change_passphrase(
}
let (key, created, fingerprint) = match (force, &password) {
(true, Some(_)) => param_bail!("password", format_err!("password is not allowed when using force")),
(true, Some(_)) => param_bail!(
"password",
format_err!("password is not allowed when using force")
),
(false, None) => param_bail!("password", format_err!("missing parameter: password")),
(false, Some(pass)) => key_config.decrypt(&|| Ok(pass.as_bytes().to_vec()))?,
(true, None) => {
let key = load_keys()?.0.get(&fingerprint).ok_or_else(|| {
format_err!("failed to reset passphrase, could not find key '{}'", fingerprint)
})?.key;
let key = load_keys()?
.0
.get(&fingerprint)
.ok_or_else(|| {
format_err!(
"failed to reset passphrase, could not find key '{}'",
fingerprint
)
})?
.key;
(key, key_config.created, fingerprint)
}
@ -189,13 +197,15 @@ pub fn create_key(
kdf: Option<Kdf>,
password: String,
hint: String,
_rpcenv: &mut dyn RpcEnvironment
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Fingerprint, Error> {
let kdf = kdf.unwrap_or_default();
if let Kdf::None = kdf {
param_bail!("kdf", format_err!("Please specify a key derivation function (none is not allowed here)."));
param_bail!(
"kdf",
format_err!("Please specify a key derivation function (none is not allowed here).")
);
}
let (key, mut key_config) = KeyConfig::new(password.as_bytes(), kdf)?;
@ -208,7 +218,6 @@ pub fn create_key(
Ok(fingerprint)
}
#[api(
input: {
properties: {
@ -229,12 +238,15 @@ pub fn read_key(
fingerprint: Fingerprint,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<KeyInfo, Error> {
let (config_map, _digest) = load_key_configs()?;
let key_config = match config_map.get(&fingerprint) {
Some(key_config) => key_config,
None => http_bail!(NOT_FOUND, "tape encryption key '{}' does not exist.", fingerprint),
None => http_bail!(
NOT_FOUND,
"tape encryption key '{}' does not exist.",
fingerprint
),
};
if key_config.kdf.is_none() {
@ -280,8 +292,14 @@ pub fn delete_key(
}
match config_map.get(&fingerprint) {
Some(_) => { config_map.remove(&fingerprint); },
None => http_bail!(NOT_FOUND, "tape encryption key '{}' does not exist.", fingerprint),
Some(_) => {
config_map.remove(&fingerprint);
}
None => http_bail!(
NOT_FOUND,
"tape encryption key '{}' does not exist.",
fingerprint
),
}
save_key_configs(config_map)?;

View File

@ -9,15 +9,14 @@ use std::path::PathBuf;
use anyhow::{bail, Error};
use proxmox_sys::fs::{CreateOptions, replace_file, file_read_optional_string};
use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
use pbs_api_types::{ScsiTapeChanger, LtoTapeDrive};
use pbs_api_types::{LtoTapeDrive, ScsiTapeChanger};
use pbs_tape::{sg_pt_changer, MtxStatus, ElementStatus};
use pbs_tape::{sg_pt_changer, ElementStatus, MtxStatus};
/// Interface to SCSI changer devices
pub trait ScsiMediaChange {
fn status(&mut self, use_cache: bool) -> Result<MtxStatus, Error>;
fn load_slot(&mut self, from_slot: u64, drivenum: u64) -> Result<MtxStatus, Error>;
@ -29,7 +28,6 @@ pub trait ScsiMediaChange {
/// Interface to the media changer device for a single drive
pub trait MediaChange {
/// Drive number inside changer
fn drive_number(&self) -> u64;
@ -55,9 +53,11 @@ pub trait MediaChange {
/// slots. Also, you cannot load cleaning units with this
/// interface.
fn load_media(&mut self, label_text: &str) -> Result<MtxStatus, Error> {
if label_text.starts_with("CLN") {
bail!("unable to load media '{}' (seems to be a cleaning unit)", label_text);
bail!(
"unable to load media '{}' (seems to be a cleaning unit)",
label_text
);
}
let mut status = self.status()?;
@ -69,15 +69,19 @@ pub trait MediaChange {
if let ElementStatus::VolumeTag(ref tag) = drive_status.status {
if *tag == label_text {
if i as u64 != self.drive_number() {
bail!("unable to load media '{}' - media in wrong drive ({} != {})",
label_text, i, self.drive_number());
bail!(
"unable to load media '{}' - media in wrong drive ({} != {})",
label_text,
i,
self.drive_number()
);
}
return Ok(status) // already loaded
return Ok(status); // already loaded
}
}
if i as u64 == self.drive_number() {
match drive_status.status {
ElementStatus::Empty => { /* OK */ },
ElementStatus::Empty => { /* OK */ }
_ => unload_drive = true,
}
}
@ -92,7 +96,10 @@ pub trait MediaChange {
if let ElementStatus::VolumeTag(ref tag) = slot_info.status {
if tag == label_text {
if slot_info.import_export {
bail!("unable to load media '{}' - inside import/export slot", label_text);
bail!(
"unable to load media '{}' - inside import/export slot",
label_text
);
}
slot = Some(i + 1);
break;
@ -127,9 +134,13 @@ pub trait MediaChange {
}
for slot_info in status.slots.iter() {
if slot_info.import_export { continue; }
if slot_info.import_export {
continue;
}
if let ElementStatus::VolumeTag(ref tag) = slot_info.status {
if tag.starts_with("CLN") { continue; }
if tag.starts_with("CLN") {
continue;
}
list.push(tag.clone());
}
}
@ -147,15 +158,19 @@ pub trait MediaChange {
// Unload drive first. Note: This also unloads a loaded cleaning tape
if let Some(drive_status) = status.drives.get(self.drive_number() as usize) {
match drive_status.status {
ElementStatus::Empty => { /* OK */ },
_ => { status = self.unload_to_free_slot(status)?; }
ElementStatus::Empty => { /* OK */ }
_ => {
status = self.unload_to_free_slot(status)?;
}
}
}
let mut cleaning_cartridge_slot = None;
for (i, slot_info) in status.slots.iter().enumerate() {
if slot_info.import_export { continue; }
if slot_info.import_export {
continue;
}
if let ElementStatus::VolumeTag(ref tag) = slot_info.status {
if tag.starts_with("CLN") {
cleaning_cartridge_slot = Some(i + 1);
@ -169,7 +184,6 @@ pub trait MediaChange {
Some(cleaning_cartridge_slot) => cleaning_cartridge_slot as u64,
};
self.load_media_from_slot(cleaning_cartridge_slot)?;
self.unload_media(Some(cleaning_cartridge_slot))
@ -197,7 +211,9 @@ pub trait MediaChange {
for (i, slot_info) in status.slots.iter().enumerate() {
if slot_info.import_export {
if to.is_some() { continue; }
if to.is_some() {
continue;
}
if let ElementStatus::Empty = slot_info.status {
to = Some(i as u64 + 1);
}
@ -234,7 +250,6 @@ pub trait MediaChange {
///
/// Note: This method consumes status - so please use returned status afterward.
fn unload_to_free_slot(&mut self, status: MtxStatus) -> Result<MtxStatus, Error> {
let drive_status = &status.drives[self.drive_number() as usize];
if let Some(slot) = drive_status.loaded_slot {
// check if original slot is empty/usable
@ -248,7 +263,10 @@ pub trait MediaChange {
if let Some(slot) = status.find_free_slot(false) {
self.unload_media(Some(slot))
} else {
bail!("drive '{}' unload failure - no free slot", self.drive_name());
bail!(
"drive '{}' unload failure - no free slot",
self.drive_name()
);
}
}
}
@ -256,7 +274,6 @@ pub trait MediaChange {
const USE_MTX: bool = false;
impl ScsiMediaChange for ScsiTapeChanger {
fn status(&mut self, use_cache: bool) -> Result<MtxStatus, Error> {
if use_cache {
if let Some(state) = load_changer_state_cache(&self.name)? {
@ -328,11 +345,7 @@ impl ScsiMediaChange for ScsiTapeChanger {
}
}
fn save_changer_state_cache(
changer: &str,
state: &MtxStatus,
) -> Result<(), Error> {
fn save_changer_state_cache(changer: &str, state: &MtxStatus) -> Result<(), Error> {
let mut path = PathBuf::from(crate::tape::CHANGER_STATE_DIR);
path.push(changer);
@ -377,7 +390,6 @@ pub struct MtxMediaChanger {
}
impl MtxMediaChanger {
pub fn with_drive_config(drive_config: &LtoTapeDrive) -> Result<Self, Error> {
let (config, _digest) = pbs_config::drive::config()?;
let changer_config: ScsiTapeChanger = match drive_config.changer {
@ -394,7 +406,6 @@ impl MtxMediaChanger {
}
impl MediaChange for MtxMediaChanger {
fn drive_number(&self) -> u64 {
self.drive_number
}

View File

@ -1,14 +1,10 @@
use anyhow::Error;
use proxmox_sys::command::run_command;
use pbs_api_types::ScsiTapeChanger;
use pbs_tape::MtxStatus;
use proxmox_sys::command::run_command;
use crate::{
tape::changer::{
mtx::parse_mtx_status,
},
};
use crate::tape::changer::mtx::parse_mtx_status;
/// Run 'mtx status' and return parsed result.
pub fn mtx_status(config: &ScsiTapeChanger) -> Result<MtxStatus, Error> {
@ -27,12 +23,7 @@ pub fn mtx_status(config: &ScsiTapeChanger) -> Result<MtxStatus, Error> {
}
/// Run 'mtx load'
pub fn mtx_load(
path: &str,
slot: u64,
drivenum: u64,
) -> Result<(), Error> {
pub fn mtx_load(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> {
let mut command = std::process::Command::new("mtx");
command.args(&["-f", path, "load", &slot.to_string(), &drivenum.to_string()]);
run_command(command, None)?;
@ -41,28 +32,30 @@ pub fn mtx_load(
}
/// Run 'mtx unload'
pub fn mtx_unload(
path: &str,
slot: u64,
drivenum: u64,
) -> Result<(), Error> {
pub fn mtx_unload(path: &str, slot: u64, drivenum: u64) -> Result<(), Error> {
let mut command = std::process::Command::new("mtx");
command.args(&["-f", path, "unload", &slot.to_string(), &drivenum.to_string()]);
command.args(&[
"-f",
path,
"unload",
&slot.to_string(),
&drivenum.to_string(),
]);
run_command(command, None)?;
Ok(())
}
/// Run 'mtx transfer'
pub fn mtx_transfer(
path: &str,
from_slot: u64,
to_slot: u64,
) -> Result<(), Error> {
pub fn mtx_transfer(path: &str, from_slot: u64, to_slot: u64) -> Result<(), Error> {
let mut command = std::process::Command::new("mtx");
command.args(&["-f", path, "transfer", &from_slot.to_string(), &to_slot.to_string()]);
command.args(&[
"-f",
path,
"transfer",
&from_slot.to_string(),
&to_slot.to_string(),
]);
run_command(command, None)?;

View File

@ -1,15 +1,13 @@
use anyhow::Error;
use nom::bytes::complete::{take_while, tag};
use nom::bytes::complete::{tag, take_while};
use pbs_tape::{ElementStatus, MtxStatus, DriveStatus, StorageElementStatus};
use pbs_tape::{DriveStatus, ElementStatus, MtxStatus, StorageElementStatus};
use pbs_tools::nom::{
parse_complete, multispace0, multispace1, parse_u64,
parse_failure, parse_error, IResult,
multispace0, multispace1, parse_complete, parse_error, parse_failure, parse_u64, IResult,
};
// Recognizes one line
fn next_line(i: &str) -> IResult<&str, &str> {
let (i, line) = take_while(|c| (c != '\n'))(i)?;
@ -21,7 +19,6 @@ fn next_line(i: &str) -> IResult<&str, &str> {
}
fn parse_storage_changer(i: &str) -> IResult<&str, ()> {
let (i, _) = multispace0(i)?;
let (i, _) = tag("Storage Changer")(i)?;
let (i, _) = next_line(i)?; // skip
@ -30,7 +27,6 @@ fn parse_storage_changer(i: &str) -> IResult<&str, ()> {
}
fn parse_drive_status(i: &str, id: u64) -> IResult<&str, DriveStatus> {
let mut loaded_slot = None;
if let Some(empty) = i.strip_prefix("Empty") {
@ -94,7 +90,6 @@ fn parse_slot_status(i: &str) -> IResult<&str, ElementStatus> {
let (n, tag) = take_while(|c| !(c == ' ' || c == ':' || c == '\n'))(n)?;
let (n, _) = take_while(|c| c != '\n')(n)?; // skip to eol
return Ok((n, ElementStatus::VolumeTag(tag.to_string())));
}
let (n, _) = take_while(|c| c != '\n')(n)?; // skip
@ -105,7 +100,6 @@ fn parse_slot_status(i: &str) -> IResult<&str, ElementStatus> {
}
fn parse_data_transfer_element(i: &str) -> IResult<&str, (u64, DriveStatus)> {
let (i, _) = tag("Data Transfer Element")(i)?;
let (i, _) = multispace1(i)?;
let (i, id) = parse_u64(i)?;
@ -117,7 +111,6 @@ fn parse_data_transfer_element(i: &str) -> IResult<&str, (u64, DriveStatus)> {
}
fn parse_storage_element(i: &str) -> IResult<&str, (u64, bool, ElementStatus)> {
let (i, _) = multispace1(i)?;
let (i, _) = tag("Storage Element")(i)?;
let (i, _) = multispace1(i)?;
@ -132,7 +125,6 @@ fn parse_storage_element(i: &str) -> IResult<&str, (u64, bool, ElementStatus)> {
}
fn parse_status(i: &str) -> IResult<&str, MtxStatus> {
let (mut i, _) = parse_storage_changer(i)?;
let mut drives = Vec::new();
@ -158,14 +150,17 @@ fn parse_status(i: &str) -> IResult<&str, MtxStatus> {
slots.push(status);
}
let status = MtxStatus { drives, slots, transports: Vec::new() };
let status = MtxStatus {
drives,
slots,
transports: Vec::new(),
};
Ok((i, status))
}
/// Parses the output from 'mtx status'
pub fn parse_mtx_status(i: &str) -> Result<MtxStatus, Error> {
let status = parse_complete("mtx status", i, parse_status)?;
Ok(status)
@ -173,7 +168,6 @@ pub fn parse_mtx_status(i: &str) -> Result<MtxStatus, Error> {
#[test]
fn test_changer_status() -> Result<(), Error> {
let output = r###" Storage Changer /dev/tape/by-id/scsi-387408F60F0000:2 Drives, 24 Slots ( 4 Import/Export )
Data Transfer Element 0:Empty
Data Transfer Element 1:Empty

View File

@ -1,16 +1,16 @@
use std::path::Path;
use std::collections::{HashMap, HashSet};
use std::path::Path;
use anyhow::{bail, Error};
use proxmox_section_config::SectionConfigData;
use proxmox_uuid::Uuid;
use pbs_api_types::{VirtualTapeDrive, ScsiTapeChanger};
use pbs_api_types::{ScsiTapeChanger, VirtualTapeDrive};
use pbs_tape::{ElementStatus, MtxStatus};
use crate::tape::Inventory;
use crate::tape::changer::{MediaChange, ScsiMediaChange};
use crate::tape::Inventory;
/// Helper to update media online status
///
@ -23,13 +23,11 @@ pub struct OnlineStatusMap {
}
impl OnlineStatusMap {
/// Creates a new instance with one map entry for each configured
/// changer (or 'VirtualTapeDrive', which has an internal
/// changer). The map entry is set to 'None' to indicate that we
/// do not have information about the online status.
pub fn new(config: &SectionConfigData) -> Result<Self, Error> {
let mut map = HashMap::new();
let changers: Vec<ScsiTapeChanger> = config.convert_to_typed_array("changer")?;
@ -42,7 +40,10 @@ impl OnlineStatusMap {
map.insert(vtape.name.clone(), None);
}
Ok(Self { map, changer_map: HashMap::new() })
Ok(Self {
map,
changer_map: HashMap::new(),
})
}
/// Returns the assiciated changer name for a media.
@ -61,11 +62,14 @@ impl OnlineStatusMap {
}
/// Update the online set for the specified changer
pub fn update_online_status(&mut self, changer_name: &str, online_set: HashSet<Uuid>) -> Result<(), Error> {
pub fn update_online_status(
&mut self,
changer_name: &str,
online_set: HashSet<Uuid>,
) -> Result<(), Error> {
match self.map.get(changer_name) {
None => bail!("no such changer '{}' device", changer_name),
Some(None) => { /* Ok */ },
Some(None) => { /* Ok */ }
Some(Some(_)) => {
// do not allow updates to keep self.changer_map consistent
bail!("update_online_status '{}' called twice", changer_name);
@ -73,7 +77,8 @@ impl OnlineStatusMap {
}
for uuid in online_set.iter() {
self.changer_map.insert(uuid.clone(), changer_name.to_string());
self.changer_map
.insert(uuid.clone(), changer_name.to_string());
}
self.map.insert(changer_name.to_string(), Some(online_set));
@ -87,7 +92,6 @@ impl OnlineStatusMap {
/// Returns a HashSet containing all found media Uuid. This only
/// returns media found in Inventory.
pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> HashSet<Uuid> {
let mut online_set = HashSet::new();
for drive_status in status.drives.iter() {
@ -99,7 +103,9 @@ pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> Ha
}
for slot_info in status.slots.iter() {
if slot_info.import_export { continue; }
if slot_info.import_export {
continue;
}
if let ElementStatus::VolumeTag(ref label_text) = slot_info.status {
if let Some(media_id) = inventory.find_media_by_label_text(label_text) {
online_set.insert(media_id.label.uuid.clone());
@ -113,8 +119,10 @@ pub fn mtx_status_to_online_set(status: &MtxStatus, inventory: &Inventory) -> Ha
/// Update online media status
///
/// For a single 'changer', or else simply ask all changer devices.
pub fn update_online_status(state_path: &Path, changer: Option<&str>) -> Result<OnlineStatusMap, Error> {
pub fn update_online_status(
state_path: &Path,
changer: Option<&str>,
) -> Result<OnlineStatusMap, Error> {
let (config, _digest) = pbs_config::drive::config()?;
let mut inventory = Inventory::load(state_path)?;
@ -135,7 +143,10 @@ pub fn update_online_status(state_path: &Path, changer: Option<&str>) -> Result<
let status = match changer_config.status(false) {
Ok(status) => status,
Err(err) => {
eprintln!("unable to get changer '{}' status - {}", changer_config.name, err);
eprintln!(
"unable to get changer '{}' status - {}",
changer_config.name, err
);
continue;
}
};
@ -172,7 +183,10 @@ pub fn update_online_status(state_path: &Path, changer: Option<&str>) -> Result<
if let Some(changer) = changer {
if !found_changer {
bail!("update_online_status failed - no such changer '{}'", changer);
bail!(
"update_online_status failed - no such changer '{}'",
changer
);
}
}
@ -188,7 +202,6 @@ pub fn update_changer_online_status(
changer_name: &str,
label_text_list: &[String],
) -> Result<(), Error> {
let mut online_map = OnlineStatusMap::new(drive_config)?;
let mut online_set = HashSet::new();
for label_text in label_text_list.iter() {

View File

@ -11,30 +11,28 @@
//!
//! - unability to detect EOT (you just get EIO)
use std::convert::TryInto;
use std::fs::File;
use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
use std::convert::TryInto;
use anyhow::{bail, format_err, Error};
use proxmox_uuid::Uuid;
use pbs_api_types::{
Fingerprint, MamAttribute, LtoDriveAndMediaStatus, LtoTapeDrive, Lp17VolumeStatistics,
Fingerprint, Lp17VolumeStatistics, LtoDriveAndMediaStatus, LtoTapeDrive, MamAttribute,
};
use pbs_config::key_config::KeyConfig;
use proxmox_sys::command::run_command;
use pbs_tape::{
TapeWrite, TapeRead, BlockReadError, MediaContentHeader,
sg_tape::{SgTape, TapeAlertFlags},
linux_list_drives::open_lto_tape_device,
sg_tape::{SgTape, TapeAlertFlags},
BlockReadError, MediaContentHeader, TapeRead, TapeWrite,
};
use proxmox_sys::command::run_command;
use crate::{
tape::{
use crate::tape::{
drive::TapeDriver,
file_formats::{PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, MediaSetLabel},
},
file_formats::{MediaSetLabel, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0},
};
/// Open a tape device
@ -46,7 +44,6 @@ use crate::{
/// - check block size
/// - for autoloader only, try to reload ejected tapes
pub fn open_lto_tape_drive(config: &LtoTapeDrive) -> Result<LtoTapeHandle, Error> {
proxmox_lang::try_block!({
let file = open_lto_tape_device(&config.path)?;
@ -64,7 +61,15 @@ pub fn open_lto_tape_drive(config: &LtoTapeDrive) -> Result<LtoTapeHandle, Error
handle.set_default_options()?;
Ok(handle)
}).map_err(|err: Error| format_err!("open drive '{}' ({}) failed - {}", config.name, config.path, err))
})
.map_err(|err: Error| {
format_err!(
"open drive '{}' ({}) failed - {}",
config.name,
config.path,
err
)
})
}
/// Lto Tape device handle
@ -73,7 +78,6 @@ pub struct LtoTapeHandle {
}
impl LtoTapeHandle {
/// Creates a new instance
pub fn new(file: File) -> Result<Self, Error> {
let sg_tape = SgTape::new(file)?;
@ -93,7 +97,8 @@ impl LtoTapeHandle {
block_length: Option<u32>,
buffer_mode: Option<bool>,
) -> Result<(), Error> {
self.sg_tape.set_drive_options(compression, block_length, buffer_mode)
self.sg_tape
.set_drive_options(compression, block_length, buffer_mode)
}
/// Write a single EOF mark without flushing buffers
@ -147,20 +152,20 @@ impl LtoTapeHandle {
/// Lock the drive door
pub fn lock(&mut self) -> Result<(), Error> {
self.sg_tape.set_medium_removal(false)
self.sg_tape
.set_medium_removal(false)
.map_err(|err| format_err!("lock door failed - {}", err))
}
/// Unlock the drive door
pub fn unlock(&mut self) -> Result<(), Error> {
self.sg_tape.set_medium_removal(true)
self.sg_tape
.set_medium_removal(true)
.map_err(|err| format_err!("unlock door failed - {}", err))
}
}
impl TapeDriver for LtoTapeHandle {
fn sync(&mut self) -> Result<(), Error> {
self.sg_tape.sync()?;
Ok(())
@ -172,7 +177,6 @@ impl TapeDriver for LtoTapeHandle {
}
fn move_to_last_file(&mut self) -> Result<(), Error> {
self.move_to_eom(false)?;
self.sg_tape.check_filemark()?;
@ -226,7 +230,6 @@ impl TapeDriver for LtoTapeHandle {
media_set_label: &MediaSetLabel,
key_config: Option<&KeyConfig>,
) -> Result<(), Error> {
let file_number = self.current_file_number()?;
if file_number != 1 {
self.rewind()?;
@ -235,12 +238,16 @@ impl TapeDriver for LtoTapeHandle {
let file_number = self.current_file_number()?;
if file_number != 1 {
bail!("write_media_set_label failed - got wrong file number ({} != 1)", file_number);
bail!(
"write_media_set_label failed - got wrong file number ({} != 1)",
file_number
);
}
self.set_encryption(None)?;
{ // limit handle scope
{
// limit handle scope
let mut handle = self.write_file()?;
let mut value = serde_json::to_value(media_set_label)?;
@ -257,7 +264,8 @@ impl TapeDriver for LtoTapeHandle {
let raw = serde_json::to_string_pretty(&value)?;
let header = MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, raw.len() as u32);
let header =
MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, raw.len() as u32);
handle.write_header(&header, raw.as_bytes())?;
handle.finish(false)?;
}
@ -285,15 +293,11 @@ impl TapeDriver for LtoTapeHandle {
&mut self,
key_fingerprint: Option<(Fingerprint, Uuid)>,
) -> Result<(), Error> {
if nix::unistd::Uid::effective().is_root() {
if let Some((ref key_fingerprint, ref uuid)) = key_fingerprint {
let (key_map, _digest) = pbs_config::tape_encryption_keys::load_keys()?;
match key_map.get(key_fingerprint) {
Some(item) => {
// derive specialized key for each media-set
let mut tape_key = [0u8; 32];
@ -305,7 +309,8 @@ impl TapeDriver for LtoTapeHandle {
&uuid_bytes,
10,
openssl::hash::MessageDigest::sha256(),
&mut tape_key)?;
&mut tape_key,
)?;
return self.sg_tape.set_encryption(Some(tape_key));
}
@ -318,10 +323,11 @@ impl TapeDriver for LtoTapeHandle {
let output = if let Some((fingerprint, uuid)) = key_fingerprint {
let fingerprint = fingerprint.signature();
run_sg_tape_cmd("encryption", &[
"--fingerprint", &fingerprint,
"--uuid", &uuid.to_string(),
], self.sg_tape.file_mut().as_raw_fd())?
run_sg_tape_cmd(
"encryption",
&["--fingerprint", &fingerprint, "--uuid", &uuid.to_string()],
self.sg_tape.file_mut().as_raw_fd(),
)?
} else {
run_sg_tape_cmd("encryption", &[], self.sg_tape.file_mut().as_raw_fd())?
};
@ -331,8 +337,8 @@ impl TapeDriver for LtoTapeHandle {
}
fn run_sg_tape_cmd(subcmd: &str, args: &[&str], fd: RawFd) -> Result<String, Error> {
let mut command = std::process::Command::new(
"/usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd");
let mut command =
std::process::Command::new("/usr/lib/x86_64-linux-gnu/proxmox-backup/sg-tape-cmd");
command.args(&[subcmd]);
command.args(&["--stdin"]);
command.args(args);

View File

@ -8,55 +8,40 @@ pub use lto::*;
use std::path::PathBuf;
use anyhow::{bail, format_err, Error};
use serde::Deserialize;
use serde_json::Value;
use nix::fcntl::OFlag;
use nix::sys::stat::Mode;
use serde::Deserialize;
use serde_json::Value;
use proxmox_sys::fs::{
lock_file,
atomic_open_or_create_file,
file_read_optional_string,
replace_file,
CreateOptions,
atomic_open_or_create_file, file_read_optional_string, lock_file, replace_file, CreateOptions,
};
use proxmox_io::ReadExt;
use proxmox_section_config::SectionConfigData;
use proxmox_uuid::Uuid;
use proxmox_sys::{task_log, WorkerTaskContext};
use proxmox_uuid::Uuid;
use pbs_api_types::{VirtualTapeDrive, LtoTapeDrive, Fingerprint};
use pbs_api_types::{Fingerprint, LtoTapeDrive, VirtualTapeDrive};
use pbs_config::key_config::KeyConfig;
use pbs_tape::{
TapeWrite, TapeRead, BlockReadError, MediaContentHeader,
sg_tape::TapeAlertFlags,
};
use pbs_tape::{sg_tape::TapeAlertFlags, BlockReadError, MediaContentHeader, TapeRead, TapeWrite};
use crate::{
server::send_load_media_email,
tape::{
MediaId,
drive::{
virtual_tape::open_virtual_tape_drive,
},
changer::{MediaChange, MtxMediaChanger},
drive::virtual_tape::open_virtual_tape_drive,
file_formats::{
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
MediaLabel, MediaSetLabel, PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
MediaLabel,
MediaSetLabel,
},
changer::{
MediaChange,
MtxMediaChanger,
},
MediaId,
},
};
/// Tape driver interface
pub trait TapeDriver {
/// Flush all data to the tape
fn sync(&mut self) -> Result<(), Error>;
@ -90,14 +75,14 @@ pub trait TapeDriver {
/// Write label to tape (erase tape content)
fn label_tape(&mut self, label: &MediaLabel) -> Result<(), Error> {
self.set_encryption(None)?;
self.format_media(true)?; // this rewinds the tape
let raw = serde_json::to_string_pretty(&serde_json::to_value(&label)?)?;
let header = MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, raw.len() as u32);
let header =
MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, raw.len() as u32);
{
let mut writer = self.write_file()?;
@ -125,7 +110,6 @@ pub trait TapeDriver {
/// This tries to read both media labels (label and
/// media_set_label). Also returns the optional encryption key configuration.
fn read_label(&mut self) -> Result<(Option<MediaId>, Option<KeyConfig>), Error> {
self.rewind()?;
let label = {
@ -157,7 +141,10 @@ pub trait TapeDriver {
label
};
let mut media_id = MediaId { label, media_set_label: None };
let mut media_id = MediaId {
label,
media_set_label: None,
};
// try to read MediaSet label
let mut reader = match self.read_next_file() {
@ -238,10 +225,8 @@ pub fn media_changer(
config: &SectionConfigData,
drive: &str,
) -> Result<Option<(Box<dyn MediaChange>, String)>, Error> {
match config.sections.get(drive) {
Some((section_type_name, config)) => {
match section_type_name.as_ref() {
Some((section_type_name, config)) => match section_type_name.as_ref() {
"virtual" => {
let tape = VirtualTapeDrive::deserialize(config)?;
Ok(Some((Box::new(tape), drive.to_string())))
@ -258,8 +243,7 @@ pub fn media_changer(
}
}
ty => bail!("unknown drive type '{}' - internal error", ty),
}
}
},
None => {
bail!("no such drive '{}'", drive);
}
@ -274,27 +258,18 @@ pub fn required_media_changer(
drive: &str,
) -> Result<(Box<dyn MediaChange>, String), Error> {
match media_changer(config, drive) {
Ok(Some(result)) => {
Ok(result)
}
Ok(Some(result)) => Ok(result),
Ok(None) => {
bail!("drive '{}' has no associated changer device", drive);
},
Err(err) => {
Err(err)
}
Err(err) => Err(err),
}
}
/// Opens a tape drive (this fails if there is no media loaded)
pub fn open_drive(
config: &SectionConfigData,
drive: &str,
) -> Result<Box<dyn TapeDriver>, Error> {
pub fn open_drive(config: &SectionConfigData, drive: &str) -> Result<Box<dyn TapeDriver>, Error> {
match config.sections.get(drive) {
Some((section_type_name, config)) => {
match section_type_name.as_ref() {
Some((section_type_name, config)) => match section_type_name.as_ref() {
"virtual" => {
let tape = VirtualTapeDrive::deserialize(config)?;
let handle = open_virtual_tape_drive(&tape)?;
@ -306,8 +281,7 @@ pub fn open_drive(
Ok(Box::new(handle))
}
ty => bail!("unknown drive type '{}' - internal error", ty),
}
}
},
None => {
bail!("no such drive '{}'", drive);
}
@ -328,7 +302,7 @@ impl std::fmt::Display for TapeRequestError {
match self {
TapeRequestError::None => {
write!(f, "no error")
},
}
TapeRequestError::OpenFailed(reason) => {
write!(f, "tape open failed - {}", reason)
}
@ -336,7 +310,10 @@ impl std::fmt::Display for TapeRequestError {
write!(f, "wrong media label {}", label)
}
TapeRequestError::EmptyTape => {
write!(f, "found empty media without label (please label all tapes first)")
write!(
f,
"found empty media without label (please label all tapes first)"
)
}
TapeRequestError::ReadFailed(reason) => {
write!(f, "tape read failed - {}", reason)
@ -356,11 +333,7 @@ pub fn request_and_load_media(
drive: &str,
label: &MediaLabel,
notify_email: &Option<String>,
) -> Result<(
Box<dyn TapeDriver>,
MediaId,
), Error> {
) -> Result<(Box<dyn TapeDriver>, MediaId), Error> {
let check_label = |handle: &mut dyn TapeDriver, uuid: &proxmox_uuid::Uuid| {
if let Ok((Some(media_id), _)) = handle.read_label() {
task_log!(
@ -399,13 +372,18 @@ pub fn request_and_load_media(
let label_text = label.label_text.clone();
if drive_config.changer.is_some() {
task_log!(worker, "loading media '{}' into drive '{}'", label_text, drive);
task_log!(
worker,
"loading media '{}' into drive '{}'",
label_text,
drive
);
let mut changer = MtxMediaChanger::with_drive_config(&drive_config)?;
changer.load_media(&label_text)?;
let mut handle: Box<dyn TapeDriver> = Box::new(open_lto_tape_drive(&drive_config)?);
let mut handle: Box<dyn TapeDriver> =
Box::new(open_lto_tape_drive(&drive_config)?);
let media_id = check_label(handle.as_mut(), &label.uuid)?;
@ -415,8 +393,7 @@ pub fn request_and_load_media(
let mut last_error = TapeRequestError::None;
let update_and_log_request_error =
|old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error>
{
|old: &mut TapeRequestError, new: TapeRequestError| -> Result<(), Error> {
if new != *old {
task_log!(worker, "{}", new);
task_log!(
@ -442,7 +419,8 @@ pub fn request_and_load_media(
worker.check_abort()?;
if last_error != TapeRequestError::None {
for _ in 0..50 { // delay 5 seconds
for _ in 0..50 {
// delay 5 seconds
worker.check_abort()?;
std::thread::sleep(std::time::Duration::from_millis(100));
}
@ -484,12 +462,8 @@ pub fn request_and_load_media(
);
TapeRequestError::WrongLabel(label_string)
}
Ok((None, _)) => {
TapeRequestError::EmptyTape
}
Err(err) => {
TapeRequestError::ReadFailed(err.to_string())
}
Ok((None, _)) => TapeRequestError::EmptyTape,
Err(err) => TapeRequestError::ReadFailed(err.to_string()),
};
update_and_log_request_error(&mut last_error, request_error)?;
@ -537,11 +511,7 @@ pub fn lock_tape_device(
/// Writes the given state for the specified drive
///
/// This function does not lock, so make sure the drive is locked
pub fn set_tape_device_state(
drive: &str,
state: &str,
) -> Result<(), Error> {
pub fn set_tape_device_state(drive: &str, state: &str) -> Result<(), Error> {
let mut path = PathBuf::from(crate::tape::DRIVE_STATE_DIR);
path.push(drive);
@ -571,19 +541,12 @@ pub fn get_tape_device_state(
}
}
fn tape_device_path(
config: &SectionConfigData,
drive: &str,
) -> Result<String, Error> {
fn tape_device_path(config: &SectionConfigData, drive: &str) -> Result<String, Error> {
match config.sections.get(drive) {
Some((section_type_name, config)) => {
let path = match section_type_name.as_ref() {
"virtual" => {
VirtualTapeDrive::deserialize(config)?.path
}
"lto" => {
LtoTapeDrive::deserialize(config)?.path
}
"virtual" => VirtualTapeDrive::deserialize(config)?.path,
"lto" => LtoTapeDrive::deserialize(config)?.path,
ty => bail!("unknown drive type '{}' - internal error", ty),
};
Ok(path)
@ -638,13 +601,12 @@ fn lock_device_path(device_path: &str) -> Result<DeviceLockGuard, TapeLockError>
// Same logic as lock_device_path, but uses a timeout of 0, making it
// non-blocking, and returning if the file is locked or not
fn test_device_path_lock(device_path: &str) -> Result<bool, Error> {
let mut file = open_device_lock(device_path)?;
let timeout = std::time::Duration::new(0, 0);
match lock_file(&mut file, true, Some(timeout)) {
// file was not locked, continue
Ok(()) => {},
Ok(()) => {}
// file was locked, return true
Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => return Ok(true),
Err(err) => bail!("{}", err),

View File

@ -4,40 +4,19 @@ use std::fs::File;
use std::io;
use anyhow::{bail, format_err, Error};
use serde::{Serialize, Deserialize};
use serde::{Deserialize, Serialize};
use proxmox_sys::{
fs::{replace_file, CreateOptions},
};
use proxmox_sys::fs::{replace_file, CreateOptions};
use pbs_config::key_config::KeyConfig;
use pbs_tape::{
TapeWrite,
TapeRead,
BlockedReader,
BlockedWriter,
BlockReadError,
MtxStatus,
DriveStatus,
ElementStatus,
StorageElementStatus,
MediaContentHeader,
EmulateTapeReader,
EmulateTapeWriter,
BlockReadError, BlockedReader, BlockedWriter, DriveStatus, ElementStatus, EmulateTapeReader,
EmulateTapeWriter, MediaContentHeader, MtxStatus, StorageElementStatus, TapeRead, TapeWrite,
};
use crate::{
tape::{
drive::{
VirtualTapeDrive,
TapeDriver,
MediaChange,
},
file_formats::{
MediaSetLabel,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
},
},
use crate::tape::{
drive::{MediaChange, TapeDriver, VirtualTapeDrive},
file_formats::{MediaSetLabel, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0},
};
/// This needs to lock the drive
@ -56,7 +35,15 @@ pub fn open_virtual_tape_drive(config: &VirtualTapeDrive) -> Result<VirtualTapeH
max_size: config.max_size.unwrap_or(64 * 1024 * 1024),
path: std::path::PathBuf::from(&config.path),
})
}).map_err(|err: Error| format_err!("open drive '{}' ({}) failed - {}", config.name, config.path, err))
})
.map_err(|err: Error| {
format_err!(
"open drive '{}' ({}) failed - {}",
config.name,
config.path,
err
)
})
}
#[derive(Serialize, Deserialize)]
@ -83,7 +70,6 @@ pub struct VirtualTapeHandle {
}
impl VirtualTapeHandle {
fn status_file_path(&self) -> std::path::PathBuf {
let mut path = self.path.clone();
path.push("drive-status.json");
@ -125,7 +111,7 @@ impl VirtualTapeHandle {
let mut index = self.load_tape_index(tape_name)?;
if index.files <= pos {
return Ok(index.files)
return Ok(index.files);
}
for i in pos..index.files {
@ -143,9 +129,7 @@ impl VirtualTapeHandle {
fn load_status(&self) -> Result<VirtualDriveStatus, Error> {
let path = self.status_file_path();
let default = serde_json::to_value(VirtualDriveStatus {
current_tape: None,
})?;
let default = serde_json::to_value(VirtualDriveStatus { current_tape: None })?;
let data = proxmox_sys::fs::file_get_json(&path, Some(default))?;
let status: VirtualDriveStatus = serde_json::from_value(data)?;
@ -183,9 +167,12 @@ impl VirtualTapeHandle {
fn forward_space_count_files(&mut self, count: usize) -> Result<(), Error> {
let mut status = self.load_status()?;
match status.current_tape {
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
let index = self.load_tape_index(name)
Some(VirtualTapeStatus {
ref name,
ref mut pos,
}) => {
let index = self
.load_tape_index(name)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?;
let new_pos = *pos + count;
@ -210,7 +197,6 @@ impl VirtualTapeHandle {
let mut status = self.load_status()?;
match status.current_tape {
Some(VirtualTapeStatus { ref mut pos, .. }) => {
if count <= *pos {
*pos = *pos - count;
} else {
@ -225,28 +211,26 @@ impl VirtualTapeHandle {
None => bail!("drive is empty (no tape loaded)."),
}
}
}
impl TapeDriver for VirtualTapeHandle {
fn sync(&mut self) -> Result<(), Error> {
Ok(()) // do nothing for now
}
fn current_file_number(&mut self) -> Result<u64, Error> {
let status = self.load_status()
let status = self
.load_status()
.map_err(|err| format_err!("current_file_number failed: {}", err.to_string()))?;
match status.current_tape {
Some(VirtualTapeStatus { pos, .. }) => { Ok(pos as u64)},
Some(VirtualTapeStatus { pos, .. }) => Ok(pos as u64),
None => bail!("current_file_number failed: drive is empty (no tape loaded)."),
}
}
/// Move to last file
fn move_to_last_file(&mut self) -> Result<(), Error> {
self.move_to_eom(false)?;
if self.current_file_number()? == 0 {
@ -261,9 +245,12 @@ impl TapeDriver for VirtualTapeHandle {
fn move_to_file(&mut self, file: u64) -> Result<(), Error> {
let mut status = self.load_status()?;
match status.current_tape {
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
let index = self.load_tape_index(name)
Some(VirtualTapeStatus {
ref name,
ref mut pos,
}) => {
let index = self
.load_tape_index(name)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?;
if file as usize > index.files {
@ -282,46 +269,55 @@ impl TapeDriver for VirtualTapeHandle {
}
fn read_next_file(&mut self) -> Result<Box<dyn TapeRead>, BlockReadError> {
let mut status = self.load_status()
.map_err(|err| BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())))?;
let mut status = self.load_status().map_err(|err| {
BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string()))
})?;
match status.current_tape {
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
let index = self.load_tape_index(name)
.map_err(|err| BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())))?;
Some(VirtualTapeStatus {
ref name,
ref mut pos,
}) => {
let index = self.load_tape_index(name).map_err(|err| {
BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string()))
})?;
if *pos >= index.files {
return Err(BlockReadError::EndOfStream);
}
let path = self.tape_file_path(name, *pos);
let file = std::fs::OpenOptions::new()
.read(true)
.open(path)?;
let file = std::fs::OpenOptions::new().read(true).open(path)?;
*pos += 1;
self.store_status(&status)
.map_err(|err| BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string())))?;
self.store_status(&status).map_err(|err| {
BlockReadError::Error(io::Error::new(io::ErrorKind::Other, err.to_string()))
})?;
let reader = EmulateTapeReader::new(file);
let reader = BlockedReader::open(reader)?;
Ok(Box::new(reader))
}
None => {
return Err(BlockReadError::Error(proxmox_lang::io_format_err!("drive is empty (no tape loaded).")));
return Err(BlockReadError::Error(proxmox_lang::io_format_err!(
"drive is empty (no tape loaded)."
)));
}
}
}
fn write_file(&mut self) -> Result<Box<dyn TapeWrite>, io::Error> {
let mut status = self.load_status()
let mut status = self
.load_status()
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?;
match status.current_tape {
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
let mut index = self.load_tape_index(name)
Some(VirtualTapeStatus {
ref name,
ref mut pos,
}) => {
let mut index = self
.load_tape_index(name)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?;
for i in *pos..index.files {
@ -333,7 +329,6 @@ impl TapeDriver for VirtualTapeHandle {
for i in 0..*pos {
let path = self.tape_file_path(name, i);
used_space += path.metadata()?.len() as usize;
}
index.files = *pos + 1;
@ -369,9 +364,12 @@ impl TapeDriver for VirtualTapeHandle {
fn move_to_eom(&mut self, _write_missing_eof: bool) -> Result<(), Error> {
let mut status = self.load_status()?;
match status.current_tape {
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
let index = self.load_tape_index(name)
Some(VirtualTapeStatus {
ref name,
ref mut pos,
}) => {
let index = self
.load_tape_index(name)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.to_string()))?;
*pos = index.files;
@ -400,7 +398,10 @@ impl TapeDriver for VirtualTapeHandle {
fn format_media(&mut self, _fast: bool) -> Result<(), Error> {
let mut status = self.load_status()?;
match status.current_tape {
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
Some(VirtualTapeStatus {
ref name,
ref mut pos,
}) => {
*pos = self.truncate_tape(name, 0)?;
self.store_status(&status)?;
Ok(())
@ -414,7 +415,6 @@ impl TapeDriver for VirtualTapeHandle {
media_set_label: &MediaSetLabel,
key_config: Option<&KeyConfig>,
) -> Result<(), Error> {
self.set_encryption(None)?;
if key_config.is_some() {
@ -423,7 +423,10 @@ impl TapeDriver for VirtualTapeHandle {
let mut status = self.load_status()?;
match status.current_tape {
Some(VirtualTapeStatus { ref name, ref mut pos }) => {
Some(VirtualTapeStatus {
ref name,
ref mut pos,
}) => {
*pos = self.truncate_tape(name, 1)?;
let pos = *pos;
self.store_status(&status)?;
@ -432,11 +435,17 @@ impl TapeDriver for VirtualTapeHandle {
bail!("media is empty (no label).");
}
if pos != 1 {
bail!("write_media_set_label: truncate failed - got wrong pos '{}'", pos);
bail!(
"write_media_set_label: truncate failed - got wrong pos '{}'",
pos
);
}
let raw = serde_json::to_string_pretty(&serde_json::to_value(media_set_label)?)?;
let header = MediaContentHeader::new(PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, raw.len() as u32);
let header = MediaContentHeader::new(
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
raw.len() as u32,
);
{
let mut writer = self.write_file()?;
@ -451,15 +460,12 @@ impl TapeDriver for VirtualTapeHandle {
}
fn eject_media(&mut self) -> Result<(), Error> {
let status = VirtualDriveStatus {
current_tape: None,
};
let status = VirtualDriveStatus { current_tape: None };
self.store_status(&status)
}
}
impl MediaChange for VirtualTapeHandle {
fn drive_number(&self) -> u64 {
0
}
@ -469,7 +475,6 @@ impl MediaChange for VirtualTapeHandle {
}
fn status(&mut self) -> Result<MtxStatus, Error> {
let drive_status = self.load_status()?;
let mut drives = Vec::new();
@ -505,7 +510,11 @@ impl MediaChange for VirtualTapeHandle {
});
}
Ok(MtxStatus { drives, slots, transports: Vec::new() })
Ok(MtxStatus {
drives,
slots,
transports: Vec::new(),
})
}
fn transfer_media(&mut self, _from: u64, _to: u64) -> Result<MtxStatus, Error> {
@ -568,7 +577,6 @@ impl MediaChange for VirtualTapeHandle {
}
impl MediaChange for VirtualTapeDrive {
fn drive_number(&self) -> u64 {
0
}

View File

@ -4,19 +4,9 @@ use std::io::Read;
use proxmox_sys::error::SysError;
use proxmox_uuid::Uuid;
use pbs_tape::{
PROXMOX_TAPE_BLOCK_SIZE,
TapeWrite, MediaContentHeader,
};
use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE};
use crate::{
tape::{
file_formats::{
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
CatalogArchiveHeader,
},
},
};
use crate::tape::file_formats::{CatalogArchiveHeader, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0};
/// Write a media catalog to the tape
///
@ -32,17 +22,20 @@ pub fn tape_write_catalog<'a>(
seq_nr: usize,
file: &mut File,
) -> Result<Option<Uuid>, std::io::Error> {
let archive_header = CatalogArchiveHeader {
uuid: uuid.clone(),
media_set_uuid: media_set_uuid.clone(),
seq_nr: seq_nr as u64,
};
let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec();
let header_data = serde_json::to_string_pretty(&archive_header)?
.as_bytes()
.to_vec();
let header = MediaContentHeader::new(
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, header_data.len() as u32);
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
header_data.len() as u32,
);
let content_uuid: Uuid = header.uuid.into();
let leom = writer.write_header(&header, &header_data)?;
@ -54,7 +47,6 @@ pub fn tape_write_catalog<'a>(
let mut file_copy_buffer = proxmox_io::vec::undefined(PROXMOX_TAPE_BLOCK_SIZE);
let result: Result<(), std::io::Error> = proxmox_lang::try_block!({
let file_size = file.metadata()?.len();
let mut remaining = file_size;

View File

@ -7,16 +7,11 @@ use proxmox_io::ReadExt;
use proxmox_uuid::Uuid;
use pbs_datastore::DataBlob;
use pbs_tape::{
PROXMOX_TAPE_BLOCK_SIZE,
TapeWrite, MediaContentHeader,
};
use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE};
use crate::tape::file_formats::{
ChunkArchiveEntryHeader, ChunkArchiveHeader, PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0,
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0,
ChunkArchiveHeader,
ChunkArchiveEntryHeader,
};
/// Writes chunk archives to tape.
@ -33,7 +28,6 @@ pub struct ChunkArchiveWriter<'a> {
}
impl<'a> ChunkArchiveWriter<'a> {
pub const MAGIC: [u8; 8] = PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1;
/// Creates a new instance
@ -42,9 +36,12 @@ impl <'a> ChunkArchiveWriter<'a> {
store: &str,
close_on_leom: bool,
) -> Result<(Self, Uuid), Error> {
let archive_header = ChunkArchiveHeader { store: store.to_string() };
let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec();
let archive_header = ChunkArchiveHeader {
store: store.to_string(),
};
let header_data = serde_json::to_string_pretty(&archive_header)?
.as_bytes()
.to_vec();
let header = MediaContentHeader::new(Self::MAGIC, header_data.len() as u32);
writer.write_header(&header, &header_data)?;
@ -69,8 +66,9 @@ impl <'a> ChunkArchiveWriter<'a> {
fn write_all(&mut self, data: &[u8]) -> Result<bool, std::io::Error> {
match self.writer {
Some(ref mut writer) => writer.write_all(data),
None => proxmox_lang::io_bail!(
"detected write after archive finished - internal error"),
None => {
proxmox_lang::io_bail!("detected write after archive finished - internal error")
}
}
}
@ -83,7 +81,6 @@ impl <'a> ChunkArchiveWriter<'a> {
digest: &[u8; 32],
blob: &DataBlob,
) -> Result<bool, std::io::Error> {
if self.writer.is_none() {
return Ok(false);
}
@ -95,9 +92,11 @@ impl <'a> ChunkArchiveWriter<'a> {
};
let head = head.to_le();
let data = unsafe { std::slice::from_raw_parts(
let data = unsafe {
std::slice::from_raw_parts(
&head as *const ChunkArchiveEntryHeader as *const u8,
std::mem::size_of::<ChunkArchiveEntryHeader>())
std::mem::size_of::<ChunkArchiveEntryHeader>(),
)
};
self.write_all(data)?;
@ -151,7 +150,6 @@ pub struct ChunkArchiveDecoder<R> {
}
impl<R: Read> ChunkArchiveDecoder<R> {
/// Creates a new instance
pub fn new(reader: R) -> Self {
Self { reader }
@ -164,7 +162,6 @@ impl <R: Read> ChunkArchiveDecoder<R> {
/// Returns the next chunk (if any).
pub fn next_chunk(&mut self) -> Result<Option<([u8; 32], DataBlob)>, Error> {
let mut header = ChunkArchiveEntryHeader {
magic: [0u8; 8],
digest: [0u8; 32],
@ -173,11 +170,12 @@ impl <R: Read> ChunkArchiveDecoder<R> {
let data = unsafe {
std::slice::from_raw_parts_mut(
(&mut header as *mut ChunkArchiveEntryHeader) as *mut u8,
std::mem::size_of::<ChunkArchiveEntryHeader>())
std::mem::size_of::<ChunkArchiveEntryHeader>(),
)
};
match self.reader.read_exact_or_eof(data) {
Ok(true) => {},
Ok(true) => {}
Ok(false) => {
// last chunk is allowed to be incomplete - simply report EOD
return Ok(None);

View File

@ -37,7 +37,8 @@ pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0: [u8; 8] = [62, 173, 167, 95, 4
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1: [u8; 8] = [109, 49, 99, 109, 215, 2, 131, 191];
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive Entry v1.0")[0..8]
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = [72, 87, 109, 242, 222, 66, 143, 220];
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] =
[72, 87, 109, 242, 222, 66, 143, 220];
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.0")[0..8];
// only used in unreleased version - no longer supported
@ -46,7 +47,8 @@ pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 1
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1: [u8; 8] = [218, 22, 21, 208, 17, 226, 154, 98];
// openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.0")[0..8];
pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] = [183, 207, 199, 37, 158, 153, 30, 115];
pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] =
[183, 207, 199, 37, 158, 153, 30, 115];
lazy_static::lazy_static! {
// Map content magic numbers to human readable names.
@ -65,10 +67,11 @@ lazy_static::lazy_static!{
/// Map content magic numbers to human readable names.
pub fn proxmox_tape_magic_to_text(magic: &[u8; 8]) -> Option<String> {
PROXMOX_TAPE_CONTENT_NAME.get(magic).map(|s| String::from(*s))
PROXMOX_TAPE_CONTENT_NAME
.get(magic)
.map(|s| String::from(*s))
}
#[derive(Deserialize, Serialize)]
/// Header for chunk archives
pub struct ChunkArchiveHeader {
@ -122,7 +125,6 @@ pub struct MediaLabel {
pub ctime: i64,
}
#[derive(Serialize, Deserialize, Clone, Debug)]
/// `MediaSet` Label
///
@ -143,7 +145,6 @@ pub struct MediaSetLabel {
}
impl MediaSetLabel {
pub fn with_data(
pool: &str,
uuid: Uuid,
@ -160,4 +161,3 @@ impl MediaSetLabel {
}
}
}

View File

@ -1,10 +1,10 @@
use std::io::{Read};
use std::io::Read;
use anyhow::{bail, Error};
use proxmox_io::ReadExt;
use pbs_tape::{TapeRead, MediaContentHeader};
use pbs_tape::{MediaContentHeader, TapeRead};
/// Read multi volume data streams written by `MultiVolumeWriter`
///
@ -17,17 +17,17 @@ pub struct MultiVolumeReader<'a> {
}
impl<'a> MultiVolumeReader<'a> {
/// Creates a new instance
pub fn new(
reader: Box<dyn TapeRead + 'a>,
header: MediaContentHeader,
next_reader_fn: Box<dyn 'a + FnMut() -> Result<Box<dyn TapeRead + 'a>, Error>>,
) -> Result<Self, Error> {
if header.part_number != 0 {
bail!("MultiVolumeReader::new - got wrong header part_number ({} != 0)",
header.part_number);
bail!(
"MultiVolumeReader::new - got wrong header part_number ({} != 0)",
header.part_number
);
}
Ok(Self {
@ -40,7 +40,6 @@ impl <'a> MultiVolumeReader<'a> {
}
impl<'a> Read for MultiVolumeReader<'a> {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
if self.complete {
return Ok(0);
@ -64,22 +63,25 @@ impl <'a> Read for MultiVolumeReader<'a> {
let expect_part_number = self.header.part_number + 1;
if part_header.part_number != expect_part_number {
proxmox_lang::io_bail!("got wrong part number ({} != {})",
part_header.part_number, expect_part_number);
proxmox_lang::io_bail!(
"got wrong part number ({} != {})",
part_header.part_number,
expect_part_number
);
}
self.header.part_number = expect_part_number;
Ok(())
}).map_err(|err| {
})
.map_err(|err| {
proxmox_lang::io_format_err!("multi-volume read content header failed: {}", err)
})?;
}
match self.reader {
None => unreachable!(),
Some(ref mut reader) => {
match reader.read(buf) {
Some(ref mut reader) => match reader.read(buf) {
Ok(0) => {
if reader.is_incomplete()? {
self.reader = None;
@ -91,9 +93,8 @@ impl <'a> Read for MultiVolumeReader<'a> {
}
}
Ok(n) => Ok(n),
Err(err) => Err(err)
}
}
Err(err) => Err(err),
},
}
}
}

View File

@ -2,7 +2,7 @@ use anyhow::Error;
use proxmox_uuid::Uuid;
use pbs_tape::{TapeWrite, MediaContentHeader};
use pbs_tape::{MediaContentHeader, TapeWrite};
/// Writes data streams using multiple volumes
///
@ -19,7 +19,6 @@ pub struct MultiVolumeWriter<'a> {
}
impl<'a> MultiVolumeWriter<'a> {
/// Creates a new instance
pub fn new(
writer: Box<dyn TapeWrite + 'a>,
@ -27,7 +26,6 @@ impl <'a> MultiVolumeWriter<'a> {
header_data: Vec<u8>,
next_writer_fn: Box<dyn 'a + FnMut() -> Result<Box<dyn TapeWrite + 'a>, Error>>,
) -> Self {
let header = MediaContentHeader::new(content_magic, header_data.len() as u32);
Self {
@ -49,16 +47,16 @@ impl <'a> MultiVolumeWriter<'a> {
}
impl<'a> TapeWrite for MultiVolumeWriter<'a> {
fn write_all(&mut self, buf: &[u8]) -> Result<bool, std::io::Error> {
if self.finished {
proxmox_lang::io_bail!("multi-volume writer already finished: internal error");
}
if self.got_leom {
if !self.wrote_header {
proxmox_lang::io_bail!("multi-volume writer: got LEOM before writing anything - internal error");
proxmox_lang::io_bail!(
"multi-volume writer: got LEOM before writing anything - internal error"
);
}
let mut writer = match self.writer.take() {
Some(writer) => writer,
@ -72,10 +70,9 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> {
if self.header.part_number == u8::MAX {
proxmox_lang::io_bail!("multi-volume writer: too many parts");
}
self.writer = Some(
(self.next_writer_fn)()
.map_err(|err| proxmox_lang::io_format_err!("multi-volume get next volume failed: {}", err))?
);
self.writer = Some((self.next_writer_fn)().map_err(|err| {
proxmox_lang::io_format_err!("multi-volume get next volume failed: {}", err)
})?);
self.got_leom = false;
self.wrote_header = false;
self.header.part_number += 1;
@ -92,7 +89,9 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> {
}
};
if leom { self.got_leom = true; }
if leom {
self.got_leom = true;
}
Ok(false)
}
@ -108,12 +107,14 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> {
fn finish(&mut self, incomplete: bool) -> Result<bool, std::io::Error> {
if incomplete {
proxmox_lang::io_bail!(
"incomplete flag makes no sense for multi-volume stream: internal error");
"incomplete flag makes no sense for multi-volume stream: internal error"
);
}
match self.writer.take() {
None if self.finished => proxmox_lang::io_bail!(
"multi-volume writer already finished: internal error"),
None if self.finished => {
proxmox_lang::io_bail!("multi-volume writer already finished: internal error")
}
None => Ok(false),
Some(ref mut writer) => {
self.finished = true;
@ -129,5 +130,4 @@ impl <'a> TapeWrite for MultiVolumeWriter<'a> {
fn logical_end_of_media(&self) -> bool {
self.got_leom
}
}

View File

@ -5,17 +5,10 @@ use std::task::{Context, Poll};
use proxmox_sys::error::SysError;
use proxmox_uuid::Uuid;
use pbs_tape::{
PROXMOX_TAPE_BLOCK_SIZE,
TapeWrite, MediaContentHeader,
};
use pbs_datastore::SnapshotReader;
use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE};
use crate::tape::file_formats::{
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
SnapshotArchiveHeader,
};
use crate::tape::file_formats::{SnapshotArchiveHeader, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1};
/// Write a set of files as `pxar` archive to the tape
///
@ -29,17 +22,20 @@ pub fn tape_write_snapshot_archive<'a>(
writer: &mut (dyn TapeWrite + 'a),
snapshot_reader: &SnapshotReader,
) -> Result<Option<Uuid>, std::io::Error> {
let snapshot = snapshot_reader.snapshot().to_string();
let store = snapshot_reader.datastore_name().to_string();
let file_list = snapshot_reader.file_list();
let archive_header = SnapshotArchiveHeader { snapshot, store };
let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec();
let header_data = serde_json::to_string_pretty(&archive_header)?
.as_bytes()
.to_vec();
let header = MediaContentHeader::new(
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, header_data.len() as u32);
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
header_data.len() as u32,
);
let content_uuid = header.uuid.into();
let root_metadata = pxar::Metadata::dir_builder(0o0664).build();
@ -47,18 +43,20 @@ pub fn tape_write_snapshot_archive<'a>(
let mut file_copy_buffer = proxmox_io::vec::undefined(PROXMOX_TAPE_BLOCK_SIZE);
let result: Result<(), std::io::Error> = proxmox_lang::try_block!({
let leom = writer.write_header(&header, &header_data)?;
if leom {
return Err(std::io::Error::from_raw_os_error(nix::errno::Errno::ENOSPC as i32));
return Err(std::io::Error::from_raw_os_error(
nix::errno::Errno::ENOSPC as i32,
));
}
let mut encoder = pxar::encoder::sync::Encoder::new(PxarTapeWriter::new(writer), &root_metadata)?;
let mut encoder =
pxar::encoder::sync::Encoder::new(PxarTapeWriter::new(writer), &root_metadata)?;
for filename in file_list.iter() {
let mut file = snapshot_reader.open_file(filename)
.map_err(|err| proxmox_lang::io_format_err!("open file '{}' failed - {}", filename, err))?;
let mut file = snapshot_reader.open_file(filename).map_err(|err| {
proxmox_lang::io_format_err!("open file '{}' failed - {}", filename, err)
})?;
let metadata = file.metadata()?;
let file_size = metadata.len();
@ -77,7 +75,6 @@ pub fn tape_write_snapshot_archive<'a>(
}
out.write_all(&file_copy_buffer[..got])?;
remaining -= got as u64;
}
if remaining > 0 {
proxmox_lang::io_bail!("file '{}' shrunk while reading", filename);
@ -117,7 +114,6 @@ impl<'a, T: TapeWrite + ?Sized> PxarTapeWriter<'a, T> {
}
impl<'a, T: TapeWrite + ?Sized> pxar::encoder::SeqWrite for PxarTapeWriter<'a, T> {
fn poll_seq_write(
self: Pin<&mut Self>,
_cx: &mut Context,
@ -127,7 +123,9 @@ impl<'a, T: TapeWrite + ?Sized> pxar::encoder::SeqWrite for PxarTapeWriter<'a, T
Poll::Ready(match this.inner.write_all(buf) {
Ok(leom) => {
if leom {
Err(std::io::Error::from_raw_os_error(nix::errno::Errno::ENOSPC as i32))
Err(std::io::Error::from_raw_os_error(
nix::errno::Errno::ENOSPC as i32,
))
} else {
Ok(buf.len())
}

View File

@ -22,19 +22,19 @@
//! restore, to make sure it is not reused for backups.
//!
use std::collections::{HashMap, BTreeMap};
use std::collections::{BTreeMap, HashMap};
use std::path::{Path, PathBuf};
use std::time::Duration;
use anyhow::{bail, Error};
use serde::{Serialize, Deserialize};
use serde::{Deserialize, Serialize};
use serde_json::json;
use proxmox_sys::fs::{replace_file, file_get_json, CreateOptions};
use proxmox_sys::fs::{file_get_json, replace_file, CreateOptions};
use proxmox_uuid::Uuid;
use pbs_api_types::{MediaLocation, MediaSetPolicy, MediaStatus, RetentionPolicy};
use pbs_config::BackupLockGuard;
use pbs_api_types::{MediaSetPolicy, RetentionPolicy, MediaStatus, MediaLocation};
#[cfg(not(test))]
use pbs_config::open_backup_lockfile;
@ -48,18 +48,10 @@ fn open_backup_lockfile<P: AsRef<std::path::Path>>(
Ok(unsafe { pbs_config::create_mocked_lock() })
}
use crate::{
tape::{
TAPE_STATUS_DIR,
MediaSet,
MediaCatalog,
file_formats::{
MediaLabel,
MediaSetLabel,
},
use crate::tape::{
changer::OnlineStatusMap,
},
file_formats::{MediaLabel, MediaSetLabel},
MediaCatalog, MediaSet, TAPE_STATUS_DIR,
};
/// Unique Media Identifier
@ -72,7 +64,6 @@ pub struct MediaId {
pub media_set_label: Option<MediaSetLabel>,
}
#[derive(Serialize, Deserialize)]
struct MediaStateEntry {
id: MediaId,
@ -90,17 +81,15 @@ pub struct Inventory {
lockfile_path: PathBuf,
// helpers
media_set_start_times: HashMap<Uuid, i64>
media_set_start_times: HashMap<Uuid, i64>,
}
impl Inventory {
pub const MEDIA_INVENTORY_FILENAME: &'static str = "inventory.json";
pub const MEDIA_INVENTORY_LOCKFILE: &'static str = ".inventory.lck";
/// Create empty instance, no data loaded
pub fn new(base_path: &Path) -> Self {
let mut inventory_path = base_path.to_owned();
inventory_path.push(Self::MEDIA_INVENTORY_FILENAME);
@ -129,7 +118,6 @@ impl Inventory {
}
fn update_helpers(&mut self) {
// recompute media_set_start_times
let mut set_start_times = HashMap::new();
@ -153,7 +141,6 @@ impl Inventory {
}
fn load_media_db(path: &Path) -> Result<BTreeMap<Uuid, MediaStateEntry>, Error> {
let data = file_get_json(path, Some(json!([])))?;
let media_list: Vec<MediaStateEntry> = serde_json::from_value(data)?;
@ -188,11 +175,7 @@ impl Inventory {
}
/// Stores a single MediaID persistently
pub fn store(
&mut self,
mut media_id: MediaId,
clear_media_status: bool,
) -> Result<(), Error> {
pub fn store(&mut self, mut media_id: MediaId, clear_media_status: bool) -> Result<(), Error> {
let _lock = self.lock()?;
self.map = Self::load_media_db(&self.inventory_path)?;
@ -218,7 +201,11 @@ impl Inventory {
};
self.map.insert(uuid, entry);
} else {
let entry = MediaStateEntry { id: media_id, location: None, status: None };
let entry = MediaStateEntry {
id: media_id,
location: None,
status: None,
};
self.map.insert(uuid, entry);
}
@ -323,13 +310,16 @@ impl Inventory {
/// List media not assigned to any pool
pub fn list_unassigned_media(&self) -> Vec<MediaId> {
self.map.values().filter_map(|entry|
self.map
.values()
.filter_map(|entry| {
if entry.id.media_set_label.is_none() {
Some(entry.id.clone())
} else {
None
}
).collect()
})
.collect()
}
pub fn media_set_start_time(&self, media_set_uuid: &Uuid) -> Option<i64> {
@ -338,7 +328,6 @@ impl Inventory {
/// Lookup media set pool
pub fn lookup_media_set_pool(&self, media_set_uuid: &Uuid) -> Result<String, Error> {
let mut last_pool = None;
for entry in self.map.values() {
@ -363,19 +352,23 @@ impl Inventory {
match last_pool {
Some(pool) => Ok(pool.to_string()),
None => bail!("media set {} is incomplete - unable to lookup pool", media_set_uuid),
None => bail!(
"media set {} is incomplete - unable to lookup pool",
media_set_uuid
),
}
}
/// Compute a single media sets
pub fn compute_media_set_members(&self, media_set_uuid: &Uuid) -> Result<MediaSet, Error> {
let mut set = MediaSet::with_data(media_set_uuid.clone(), Vec::new());
for entry in self.map.values() {
match entry.id.media_set_label {
None => continue,
Some(MediaSetLabel { seq_nr, ref uuid, .. }) => {
Some(MediaSetLabel {
seq_nr, ref uuid, ..
}) => {
if uuid != media_set_uuid {
continue;
}
@ -389,17 +382,17 @@ impl Inventory {
/// Compute all media sets
pub fn compute_media_set_list(&self) -> Result<HashMap<Uuid, MediaSet>, Error> {
let mut set_map: HashMap<Uuid, MediaSet> = HashMap::new();
for entry in self.map.values() {
match entry.id.media_set_label {
None => continue,
Some(MediaSetLabel { seq_nr, ref uuid, .. }) => {
let set = set_map.entry(uuid.clone()).or_insert_with(|| {
MediaSet::with_data(uuid.clone(), Vec::new())
});
Some(MediaSetLabel {
seq_nr, ref uuid, ..
}) => {
let set = set_map
.entry(uuid.clone())
.or_insert_with(|| MediaSet::with_data(uuid.clone(), Vec::new()));
set.insert_media(entry.id.label.uuid.clone(), seq_nr)?;
}
@ -411,10 +404,11 @@ impl Inventory {
/// Returns the latest media set for a pool
pub fn latest_media_set(&self, pool: &str) -> Option<Uuid> {
let mut last_set: Option<(Uuid, i64)> = None;
let set_list = self.map.values()
let set_list = self
.map
.values()
.filter_map(|entry| entry.id.media_set_label.as_ref())
.filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8; 16]);
@ -437,13 +431,19 @@ impl Inventory {
};
// consistency check - must be the only set with that ctime
let set_list = self.map.values()
let set_list = self
.map
.values()
.filter_map(|entry| entry.id.media_set_label.as_ref())
.filter(|set| set.pool == pool && set.uuid.as_ref() != [0u8; 16]);
for set in set_list {
if set.uuid != uuid && set.ctime >= ctime { // should not happen
eprintln!("latest_media_set: found set with equal ctime ({}, {})", set.uuid, uuid);
if set.uuid != uuid && set.ctime >= ctime {
// should not happen
eprintln!(
"latest_media_set: found set with equal ctime ({}, {})",
set.uuid, uuid
);
return None;
}
}
@ -454,8 +454,9 @@ impl Inventory {
// Test if there is a media set (in the same pool) newer than this one.
// Return the ctime of the nearest media set
fn media_set_next_start_time(&self, media_set_uuid: &Uuid) -> Option<i64> {
let (pool, ctime) = match self.map.values()
let (pool, ctime) = match self
.map
.values()
.filter_map(|entry| entry.id.media_set_label.as_ref())
.find_map(|set| {
if &set.uuid == media_set_uuid {
@ -468,7 +469,9 @@ impl Inventory {
None => return None,
};
let set_list = self.map.values()
let set_list = self
.map
.values()
.filter_map(|entry| entry.id.media_set_label.as_ref())
.filter(|set| (&set.uuid != media_set_uuid) && (set.pool == pool));
@ -498,7 +501,6 @@ impl Inventory {
media_set_policy: &MediaSetPolicy,
retention_policy: &RetentionPolicy,
) -> i64 {
if let RetentionPolicy::KeepForever = retention_policy {
return i64::MAX;
}
@ -518,28 +520,22 @@ impl Inventory {
};
let max_use_time = match self.media_set_next_start_time(&set.uuid) {
Some(next_start_time) => {
match media_set_policy {
Some(next_start_time) => match media_set_policy {
MediaSetPolicy::AlwaysCreate => set_start_time,
_ => next_start_time,
}
}
None => {
match media_set_policy {
},
None => match media_set_policy {
MediaSetPolicy::ContinueCurrent => {
return i64::MAX;
}
MediaSetPolicy::AlwaysCreate => {
set_start_time
}
MediaSetPolicy::AlwaysCreate => set_start_time,
MediaSetPolicy::CreateAt(ref event) => {
match event.compute_next_event(set_start_time) {
Ok(Some(next)) => next,
Ok(None) | Err(_) => return i64::MAX,
}
}
}
}
},
};
match retention_policy {
@ -560,7 +556,6 @@ impl Inventory {
media_set_uuid: &Uuid,
template: Option<String>,
) -> Result<String, Error> {
if let Some(ctime) = self.media_set_start_time(media_set_uuid) {
let mut template = template.unwrap_or_else(|| String::from("%c"));
template = template.replace("%id%", &media_set_uuid.to_string());
@ -575,7 +570,6 @@ impl Inventory {
/// Generate and insert a new free tape (test helper)
pub fn generate_free_tape(&mut self, label_text: &str, ctime: i64) -> Uuid {
let label = MediaLabel {
label_text: label_text.to_string(),
uuid: Uuid::generate(),
@ -583,20 +577,21 @@ impl Inventory {
};
let uuid = label.uuid.clone();
self.store(MediaId { label, media_set_label: None }, false).unwrap();
self.store(
MediaId {
label,
media_set_label: None,
},
false,
)
.unwrap();
uuid
}
/// Generate and insert a new tape assigned to a specific pool
/// (test helper)
pub fn generate_assigned_tape(
&mut self,
label_text: &str,
pool: &str,
ctime: i64,
) -> Uuid {
pub fn generate_assigned_tape(&mut self, label_text: &str, pool: &str, ctime: i64) -> Uuid {
let label = MediaLabel {
label_text: label_text.to_string(),
uuid: Uuid::generate(),
@ -607,18 +602,20 @@ impl Inventory {
let set = MediaSetLabel::with_data(pool, [0u8; 16].into(), 0, ctime, None);
self.store(MediaId { label, media_set_label: Some(set) }, false).unwrap();
self.store(
MediaId {
label,
media_set_label: Some(set),
},
false,
)
.unwrap();
uuid
}
/// Generate and insert a used tape (test helper)
pub fn generate_used_tape(
&mut self,
label_text: &str,
set: MediaSetLabel,
ctime: i64,
) -> Uuid {
pub fn generate_used_tape(&mut self, label_text: &str, set: MediaSetLabel, ctime: i64) -> Uuid {
let label = MediaLabel {
label_text: label_text.to_string(),
uuid: Uuid::generate(),
@ -626,7 +623,14 @@ impl Inventory {
};
let uuid = label.uuid.clone();
self.store(MediaId { label, media_set_label: Some(set) }, false).unwrap();
self.store(
MediaId {
label,
media_set_label: Some(set),
},
false,
)
.unwrap();
uuid
}
@ -634,13 +638,11 @@ impl Inventory {
// Status/location handling
impl Inventory {
/// Returns status and location with reasonable defaults.
///
/// Default status is 'MediaStatus::Unknown'.
/// Default location is 'MediaLocation::Offline'.
pub fn status_and_location(&self, uuid: &Uuid) -> (MediaStatus, MediaLocation) {
match self.map.get(uuid) {
None => {
// no info stored - assume media is writable/offline
@ -689,7 +691,11 @@ impl Inventory {
}
// Lock database, reload database, set location, store database
fn set_media_location(&mut self, uuid: &Uuid, location: Option<MediaLocation>) -> Result<(), Error> {
fn set_media_location(
&mut self,
uuid: &Uuid,
location: Option<MediaLocation>,
) -> Result<(), Error> {
let _lock = self.lock()?;
self.map = Self::load_media_db(&self.inventory_path)?;
if let Some(entry) = self.map.get_mut(uuid) {
@ -742,7 +748,6 @@ impl Inventory {
Ok(())
}
}
/// Lock a media pool
@ -778,11 +783,7 @@ pub fn lock_media_set(
// shell completion helper
/// List of known media uuids
pub fn complete_media_uuid(
_arg: &str,
_param: &HashMap<String, String>,
) -> Vec<String> {
pub fn complete_media_uuid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let inventory = match Inventory::load(Path::new(TAPE_STATUS_DIR)) {
Ok(inventory) => inventory,
Err(_) => return Vec::new(),
@ -792,33 +793,32 @@ pub fn complete_media_uuid(
}
/// List of known media sets
pub fn complete_media_set_uuid(
_arg: &str,
_param: &HashMap<String, String>,
) -> Vec<String> {
pub fn complete_media_set_uuid(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let inventory = match Inventory::load(Path::new(TAPE_STATUS_DIR)) {
Ok(inventory) => inventory,
Err(_) => return Vec::new(),
};
inventory.map.values()
inventory
.map
.values()
.filter_map(|entry| entry.id.media_set_label.as_ref())
.map(|set| set.uuid.to_string()).collect()
.map(|set| set.uuid.to_string())
.collect()
}
/// List of known media labels (barcodes)
pub fn complete_media_label_text(
_arg: &str,
_param: &HashMap<String, String>,
) -> Vec<String> {
pub fn complete_media_label_text(_arg: &str, _param: &HashMap<String, String>) -> Vec<String> {
let inventory = match Inventory::load(Path::new(TAPE_STATUS_DIR)) {
Ok(inventory) => inventory,
Err(_) => return Vec::new(),
};
inventory.map.values().map(|entry| entry.id.label.label_text.clone()).collect()
inventory
.map
.values()
.map(|entry| entry.id.label.label_text.clone())
.collect()
}
pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap<String, String>) -> Vec<String> {
@ -833,11 +833,13 @@ pub fn complete_media_set_snapshots(_arg: &str, param: &HashMap<String, String>)
};
let mut res = Vec::new();
let media_ids = inventory.list_used_media().into_iter().filter(|media| {
match &media.media_set_label {
let media_ids =
inventory
.list_used_media()
.into_iter()
.filter(|media| match &media.media_set_label {
Some(label) => label.uuid == media_set_uuid,
None => false,
}
});
for media_id in media_ids {

View File

@ -1,30 +1,21 @@
use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::fs::File;
use std::io::{Write, Read, BufReader, Seek, SeekFrom};
use std::io::{BufReader, Read, Seek, SeekFrom, Write};
use std::os::unix::io::AsRawFd;
use std::path::{PathBuf, Path};
use std::collections::{HashSet, HashMap};
use std::path::{Path, PathBuf};
use anyhow::{bail, format_err, Error};
use endian_trait::Endian;
use proxmox_sys::fs::read_subdir;
use pbs_datastore::backup_info::BackupDir;
use proxmox_sys::fs::read_subdir;
use proxmox_sys::fs::{
fchown,
create_path,
CreateOptions,
};
use proxmox_io::{WriteExt, ReadExt};
use proxmox_io::{ReadExt, WriteExt};
use proxmox_sys::fs::{create_path, fchown, CreateOptions};
use proxmox_uuid::Uuid;
use crate::{
tape::{
MediaId,
file_formats::MediaSetLabel,
},
};
use crate::tape::{file_formats::MediaSetLabel, MediaId};
pub struct DatastoreContent {
pub snapshot_index: HashMap<String, u64>, // snapshot => file_nr
@ -32,7 +23,6 @@ pub struct DatastoreContent {
}
impl DatastoreContent {
pub fn new() -> Self {
Self {
chunk_index: HashMap::new(),
@ -48,7 +38,6 @@ impl DatastoreContent {
///
/// We use a simple binary format to store data on disk.
pub struct MediaCatalog {
uuid: Uuid, // BackupMedia uuid
file: Option<File>,
@ -65,14 +54,14 @@ pub struct MediaCatalog {
}
impl MediaCatalog {
/// Magic number for media catalog files.
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8]
// Note: this version did not store datastore names (not supported anymore)
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40];
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.1")[0..8]
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] = [76, 142, 232, 193, 32, 168, 137, 113];
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] =
[76, 142, 232, 193, 32, 168, 137, 113];
/// List media with catalogs
pub fn media_with_catalogs(base_path: &Path) -> Result<HashSet<Uuid>, Error> {
@ -81,7 +70,9 @@ impl MediaCatalog {
for entry in read_subdir(libc::AT_FDCWD, base_path)? {
let entry = entry?;
let name = unsafe { entry.file_name_utf8_unchecked() };
if !name.ends_with(".log") { continue; }
if !name.ends_with(".log") {
continue;
}
if let Ok(uuid) = Uuid::parse_str(&name[..(name.len() - 4)]) {
catalogs.insert(uuid);
}
@ -111,7 +102,6 @@ impl MediaCatalog {
/// Destroy the media catalog (remove all files)
pub fn destroy(base_path: &Path, uuid: &Uuid) -> Result<(), Error> {
let path = Self::catalog_path(base_path, uuid);
match std::fs::remove_file(path) {
@ -122,11 +112,7 @@ impl MediaCatalog {
}
/// Destroy the media catalog if media_set uuid does not match
pub fn destroy_unrelated_catalog(
base_path: &Path,
media_id: &MediaId,
) -> Result<(), Error> {
pub fn destroy_unrelated_catalog(base_path: &Path, media_id: &MediaId) -> Result<(), Error> {
let uuid = &media_id.label.uuid;
let path = Self::catalog_path(base_path, uuid);
@ -144,8 +130,8 @@ impl MediaCatalog {
let expected_media_set_id = match media_id.media_set_label {
None => {
std::fs::remove_file(path)?;
return Ok(())
},
return Ok(());
}
Some(ref set) => &set.uuid,
};
@ -197,13 +183,11 @@ impl MediaCatalog {
write: bool,
create: bool,
) -> Result<Self, Error> {
let uuid = &media_id.label.uuid;
let path = Self::catalog_path(base_path, uuid);
let me = proxmox_lang::try_block!({
Self::create_basedir(base_path)?;
let mut file = std::fs::OpenOptions::new()
@ -213,7 +197,11 @@ impl MediaCatalog {
.open(&path)?;
let backup_user = pbs_config::backup_user()?;
fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid))
fchown(
file.as_raw_fd(),
Some(backup_user.uid),
Some(backup_user.gid),
)
.map_err(|err| format_err!("fchown failed - {}", err))?;
let mut me = Self {
@ -234,26 +222,22 @@ impl MediaCatalog {
let (found_magic_number, _) = result?;
if !found_magic_number {
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
me.pending
.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
}
if write {
me.file = Some(file);
}
Ok(me)
}).map_err(|err: Error| {
format_err!("unable to open media catalog {:?} - {}", path, err)
})?;
})
.map_err(|err: Error| format_err!("unable to open media catalog {:?} - {}", path, err))?;
Ok(me)
}
/// Creates a temporary empty catalog file
pub fn create_temporary_database_file(
base_path: &Path,
uuid: &Uuid,
) -> Result<File, Error> {
pub fn create_temporary_database_file(base_path: &Path, uuid: &Uuid) -> Result<File, Error> {
Self::create_basedir(base_path)?;
let tmp_path = Self::tmp_catalog_path(base_path, uuid);
@ -271,7 +255,11 @@ impl MediaCatalog {
}
let backup_user = pbs_config::backup_user()?;
fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid))
fchown(
file.as_raw_fd(),
Some(backup_user.uid),
Some(backup_user.gid),
)
.map_err(|err| format_err!("fchown failed - {}", err))?;
Ok(file)
@ -285,13 +273,11 @@ impl MediaCatalog {
media_id: &MediaId,
log_to_stdout: bool,
) -> Result<Self, Error> {
let uuid = &media_id.label.uuid;
let tmp_path = Self::tmp_catalog_path(base_path, uuid);
let me = proxmox_lang::try_block!({
let file = Self::create_temporary_database_file(base_path, uuid)?;
let mut me = Self {
@ -306,7 +292,8 @@ impl MediaCatalog {
me.log_to_stdout = log_to_stdout;
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
me.pending
.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
me.register_label(&media_id.label.uuid, 0, 0)?;
@ -317,8 +304,13 @@ impl MediaCatalog {
me.commit()?;
Ok(me)
}).map_err(|err: Error| {
format_err!("unable to create temporary media catalog {:?} - {}", tmp_path, err)
})
.map_err(|err: Error| {
format_err!(
"unable to create temporary media catalog {:?} - {}",
tmp_path,
err
)
})?;
Ok(me)
@ -333,7 +325,6 @@ impl MediaCatalog {
uuid: &Uuid,
commit: bool,
) -> Result<(), Error> {
let tmp_path = Self::tmp_catalog_path(base_path, uuid);
if commit {
@ -365,7 +356,6 @@ impl MediaCatalog {
///
/// Fixme: this should be atomic ...
pub fn commit(&mut self) -> Result<(), Error> {
if self.pending.is_empty() {
return Ok(());
}
@ -410,7 +400,6 @@ impl MediaCatalog {
media_id: &MediaId,
log_to_stdout: bool,
) -> Result<Self, Error> {
let uuid = &media_id.label.uuid;
let me = Self::create_temporary_database(base_path, media_id, log_to_stdout)?;
@ -453,9 +442,11 @@ impl MediaCatalog {
}
fn check_register_label(&self, file_number: u64, uuid: &Uuid) -> Result<(), Error> {
if file_number >= 2 {
bail!("register label failed: got wrong file number ({} >= 2)", file_number);
bail!(
"register label failed: got wrong file number ({} >= 2)",
file_number
);
}
if file_number == 0 && uuid != &self.uuid {
@ -472,8 +463,11 @@ impl MediaCatalog {
};
if file_number != expected_file_number {
bail!("register label failed: got unexpected file number ({} < {})",
file_number, expected_file_number);
bail!(
"register label failed: got unexpected file number ({} < {})",
file_number,
expected_file_number
);
}
Ok(())
}
@ -485,7 +479,6 @@ impl MediaCatalog {
seq_nr: u64, // onyl used for media set labels
file_number: u64,
) -> Result<(), Error> {
self.check_register_label(file_number, uuid)?;
if file_number == 0 && seq_nr != 0 {
@ -504,7 +497,9 @@ impl MediaCatalog {
self.pending.push(b'L');
unsafe { self.pending.write_le_value(entry)?; }
unsafe {
self.pending.write_le_value(entry)?;
}
self.last_entry = Some((uuid.clone(), file_number));
@ -530,11 +525,7 @@ impl MediaCatalog {
/// Register a chunk
///
/// Only valid after start_chunk_archive.
fn register_chunk(
&mut self,
digest: &[u8;32],
) -> Result<(), Error> {
fn register_chunk(&mut self, digest: &[u8; 32]) -> Result<(), Error> {
let (file_number, store) = match self.current_archive {
None => bail!("register_chunk failed: no archive started"),
Some((_, file_number, ref store)) => (file_number, store),
@ -558,13 +549,15 @@ impl MediaCatalog {
}
fn check_start_chunk_archive(&self, file_number: u64) -> Result<(), Error> {
if self.current_archive.is_some() {
bail!("start_chunk_archive failed: already started");
}
if file_number < 2 {
bail!("start_chunk_archive failed: got wrong file number ({} < 2)", file_number);
bail!(
"start_chunk_archive failed: got wrong file number ({} < 2)",
file_number
);
}
let expect_min_file_number = match self.last_entry {
@ -573,8 +566,11 @@ impl MediaCatalog {
};
if file_number < expect_min_file_number {
bail!("start_chunk_archive: got unexpected file number ({} < {})",
file_number, expect_min_file_number);
bail!(
"start_chunk_archive: got unexpected file number ({} < {})",
file_number,
expect_min_file_number
);
}
Ok(())
@ -587,7 +583,6 @@ impl MediaCatalog {
file_number: u64,
store: &str,
) -> Result<(), Error> {
self.check_start_chunk_archive(file_number)?;
let entry = ChunkArchiveStart {
@ -602,10 +597,14 @@ impl MediaCatalog {
self.pending.push(b'A');
unsafe { self.pending.write_le_value(entry)?; }
unsafe {
self.pending.write_le_value(entry)?;
}
self.pending.extend(store.as_bytes());
self.content.entry(store.to_string()).or_insert(DatastoreContent::new());
self.content
.entry(store.to_string())
.or_insert(DatastoreContent::new());
self.current_archive = Some((uuid, file_number, store.to_string()));
@ -613,7 +612,6 @@ impl MediaCatalog {
}
fn check_end_chunk_archive(&self, uuid: &Uuid, file_number: u64) -> Result<(), Error> {
match self.current_archive {
None => bail!("end_chunk archive failed: not started"),
Some((ref expected_uuid, expected_file_number, ..)) => {
@ -621,8 +619,11 @@ impl MediaCatalog {
bail!("end_chunk_archive failed: got unexpected uuid");
}
if file_number != expected_file_number {
bail!("end_chunk_archive failed: got unexpected file number ({} != {})",
file_number, expected_file_number);
bail!(
"end_chunk_archive failed: got unexpected file number ({} != {})",
file_number,
expected_file_number
);
}
}
}
@ -631,11 +632,9 @@ impl MediaCatalog {
/// End a chunk archive section
fn end_chunk_archive(&mut self) -> Result<(), Error> {
match self.current_archive.take() {
None => bail!("end_chunk_archive failed: not started"),
Some((uuid, file_number, ..)) => {
let entry = ChunkArchiveEnd {
file_number,
uuid: *uuid.as_bytes(),
@ -647,7 +646,9 @@ impl MediaCatalog {
self.pending.push(b'E');
unsafe { self.pending.write_le_value(entry)?; }
unsafe {
self.pending.write_le_value(entry)?;
}
self.last_entry = Some((uuid, file_number));
}
@ -657,13 +658,15 @@ impl MediaCatalog {
}
fn check_register_snapshot(&self, file_number: u64, snapshot: &str) -> Result<(), Error> {
if self.current_archive.is_some() {
bail!("register_snapshot failed: inside chunk_archive");
}
if file_number < 2 {
bail!("register_snapshot failed: got wrong file number ({} < 2)", file_number);
bail!(
"register_snapshot failed: got wrong file number ({} < 2)",
file_number
);
}
let expect_min_file_number = match self.last_entry {
@ -672,12 +675,19 @@ impl MediaCatalog {
};
if file_number < expect_min_file_number {
bail!("register_snapshot failed: got unexpected file number ({} < {})",
file_number, expect_min_file_number);
bail!(
"register_snapshot failed: got unexpected file number ({} < {})",
file_number,
expect_min_file_number
);
}
if let Err(err) = snapshot.parse::<BackupDir>() {
bail!("register_snapshot failed: unable to parse snapshot '{}' - {}", snapshot, err);
bail!(
"register_snapshot failed: unable to parse snapshot '{}' - {}",
snapshot,
err
);
}
Ok(())
@ -691,7 +701,6 @@ impl MediaCatalog {
store: &str,
snapshot: &str,
) -> Result<(), Error> {
self.check_register_snapshot(file_number, snapshot)?;
let entry = SnapshotEntry {
@ -702,20 +711,32 @@ impl MediaCatalog {
};
if self.log_to_stdout {
println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, snapshot);
println!(
"S|{}|{}|{}:{}",
file_number,
uuid.to_string(),
store,
snapshot
);
}
self.pending.push(b'S');
unsafe { self.pending.write_le_value(entry)?; }
unsafe {
self.pending.write_le_value(entry)?;
}
self.pending.extend(store.as_bytes());
self.pending.push(b':');
self.pending.extend(snapshot.as_bytes());
let content = self.content.entry(store.to_string())
let content = self
.content
.entry(store.to_string())
.or_insert(DatastoreContent::new());
content.snapshot_index.insert(snapshot.to_string(), file_number);
content
.snapshot_index
.insert(snapshot.to_string(), file_number);
self.last_entry = Some((uuid, file_number));
@ -726,7 +747,6 @@ impl MediaCatalog {
pub fn parse_catalog_header<R: Read>(
reader: &mut R,
) -> Result<(bool, Option<Uuid>, Option<Uuid>), Error> {
// read/check magic number
let mut magic = [0u8; 8];
if !reader.read_exact_or_eof(&mut magic)? {
@ -774,7 +794,6 @@ impl MediaCatalog {
file: &mut File,
media_set_label: Option<&MediaSetLabel>,
) -> Result<(bool, Option<Uuid>), Error> {
let mut file = BufReader::new(file);
let mut found_magic_number = false;
let mut media_set_uuid = None;
@ -782,10 +801,14 @@ impl MediaCatalog {
loop {
let pos = file.seek(SeekFrom::Current(0))?; // get current pos
if pos == 0 { // read/check magic number
if pos == 0 {
// read/check magic number
let mut magic = [0u8; 8];
match file.read_exact_or_eof(&mut magic) {
Ok(false) => { /* EOF */ break; }
Ok(false) => {
/* EOF */
break;
}
Ok(true) => { /* OK */ }
Err(err) => bail!("read failed - {}", err),
}
@ -802,7 +825,10 @@ impl MediaCatalog {
let mut entry_type = [0u8; 1];
match file.read_exact_or_eof(&mut entry_type) {
Ok(false) => { /* EOF */ break; }
Ok(false) => {
/* EOF */
break;
}
Ok(true) => { /* OK */ }
Err(err) => bail!("read failed - {}", err),
}
@ -833,7 +859,8 @@ impl MediaCatalog {
self.check_start_chunk_archive(file_number)?;
self.content.entry(store.to_string())
self.content
.entry(store.to_string())
.or_insert(DatastoreContent::new());
self.current_archive = Some((uuid, file_number, store.to_string()));
@ -867,10 +894,14 @@ impl MediaCatalog {
self.check_register_snapshot(file_number, snapshot)?;
let content = self.content.entry(store.to_string())
let content = self
.content
.entry(store.to_string())
.or_insert(DatastoreContent::new());
content.snapshot_index.insert(snapshot.to_string(), file_number);
content
.snapshot_index
.insert(snapshot.to_string(), file_number);
self.last_entry = Some((uuid, file_number));
}
@ -899,7 +930,6 @@ impl MediaCatalog {
bail!("unknown entry type '{}'", entry_type[0]);
}
}
}
Ok((found_magic_number, media_set_uuid))
@ -914,7 +944,6 @@ pub struct MediaSetCatalog {
}
impl MediaSetCatalog {
/// Creates a new instance
pub fn new() -> Self {
Self {
@ -924,7 +953,6 @@ impl MediaSetCatalog {
/// Add a catalog
pub fn append_catalog(&mut self, catalog: MediaCatalog) -> Result<(), Error> {
if self.catalog_list.get(&catalog.uuid).is_some() {
bail!("MediaSetCatalog already contains media '{}'", catalog.uuid);
}

View File

@ -1,7 +1,7 @@
use std::path::Path;
use std::io::{BufRead, BufReader};
use std::path::Path;
use anyhow::{format_err, bail, Error};
use anyhow::{bail, format_err, Error};
use proxmox_sys::fs::CreateOptions;
@ -15,7 +15,6 @@ pub fn media_catalog_snapshot_list(
base_path: &Path,
media_id: &MediaId,
) -> Result<Vec<(String, String)>, Error> {
let uuid = &media_id.label.uuid;
let mut cache_path = base_path.to_owned();
@ -29,7 +28,10 @@ pub fn media_catalog_snapshot_list(
Err(err) => bail!("unable to stat media catalog {:?} - {}", catalog_path, err),
};
let cache_id = format!("{:016X}-{:016X}-{:016X}", stat.st_ino, stat.st_size as u64, stat.st_mtime as u64);
let cache_id = format!(
"{:016X}-{:016X}-{:016X}",
stat.st_ino, stat.st_size as u64, stat.st_mtime as u64
);
match std::fs::OpenOptions::new().read(true).open(&cache_path) {
Ok(file) => {
@ -38,7 +40,8 @@ pub fn media_catalog_snapshot_list(
let mut lines = file.lines();
match lines.next() {
Some(Ok(id)) => {
if id != cache_id { // cache is outdated - rewrite
if id != cache_id {
// cache is outdated - rewrite
return write_snapshot_cache(base_path, media_id, &cache_path, &cache_id);
}
}
@ -72,7 +75,6 @@ fn write_snapshot_cache(
cache_path: &Path,
cache_id: &str,
) -> Result<Vec<(String, String)>, Error> {
// open normal catalog and write cache
let catalog = MediaCatalog::open(base_path, media_id, false, false)?;
@ -98,12 +100,7 @@ fn write_snapshot_cache(
.owner(backup_user.uid)
.group(backup_user.gid);
proxmox_sys::fs::replace_file(
cache_path,
data.as_bytes(),
options,
false,
)?;
proxmox_sys::fs::replace_file(cache_path, data.as_bytes(), options, false)?;
Ok(list)
}

View File

@ -7,7 +7,7 @@
//!
//!
use std::path::{PathBuf, Path};
use std::path::{Path, PathBuf};
use anyhow::{bail, Error};
use serde::{Deserialize, Serialize};
@ -15,28 +15,18 @@ use serde::{Deserialize, Serialize};
use proxmox_uuid::Uuid;
use pbs_api_types::{
Fingerprint, MediaStatus, MediaLocation, MediaSetPolicy, RetentionPolicy,
MediaPoolConfig,
Fingerprint, MediaLocation, MediaPoolConfig, MediaSetPolicy, MediaStatus, RetentionPolicy,
};
use pbs_config::BackupLockGuard;
use crate::tape::{
MediaId,
file_formats::{MediaLabel, MediaSetLabel},
lock_media_pool, lock_media_set, lock_unassigned_media_pool, Inventory, MediaCatalog, MediaId,
MediaSet,
Inventory,
MediaCatalog,
lock_media_set,
lock_media_pool,
lock_unassigned_media_pool,
file_formats::{
MediaLabel,
MediaSetLabel,
},
};
/// Media Pool
pub struct MediaPool {
name: String,
state_path: PathBuf,
@ -59,7 +49,6 @@ pub struct MediaPool {
}
impl MediaPool {
/// Creates a new instance
///
/// If you specify a `changer_name`, only media accessible via
@ -76,7 +65,6 @@ impl MediaPool {
encrypt_fingerprint: Option<Fingerprint>,
no_media_set_locking: bool, // for list_media()
) -> Result<Self, Error> {
let _pool_lock = if no_media_set_locking {
None
} else {
@ -130,10 +118,17 @@ impl MediaPool {
changer_name: Option<String>,
no_media_set_locking: bool, // for list_media()
) -> Result<Self, Error> {
let allocation = config
.allocation
.clone()
.unwrap_or_else(|| String::from("continue"))
.parse()?;
let allocation = config.allocation.clone().unwrap_or_else(|| String::from("continue")).parse()?;
let retention = config.retention.clone().unwrap_or_else(|| String::from("keep")).parse()?;
let retention = config
.retention
.clone()
.unwrap_or_else(|| String::from("keep"))
.parse()?;
let encrypt_fingerprint = match config.encrypt {
Some(ref fingerprint) => Some(fingerprint.parse()?),
@ -166,7 +161,6 @@ impl MediaPool {
}
fn compute_media_state(&self, media_id: &MediaId) -> (MediaStatus, MediaLocation) {
let (status, location) = self.inventory.status_and_location(&media_id.label.uuid);
match status {
@ -183,10 +177,12 @@ impl MediaPool {
Some(ref set) => set,
};
if set.pool != self.name { // should never trigger
if set.pool != self.name {
// should never trigger
return (MediaStatus::Unknown, location); // belong to another pool
}
if set.uuid.as_ref() == [0u8;16] { // not assigned to any pool
if set.uuid.as_ref() == [0u8; 16] {
// not assigned to any pool
return (MediaStatus::Writable, location);
}
@ -211,31 +207,28 @@ impl MediaPool {
if let Some(ref set) = media_id.media_set_label {
if set.pool != self.name {
bail!("media does not belong to pool ({} != {})", set.pool, self.name);
bail!(
"media does not belong to pool ({} != {})",
set.pool,
self.name
);
}
}
let (status, location) = self.compute_media_state(&media_id);
Ok(BackupMedia::with_media_id(
media_id,
location,
status,
))
Ok(BackupMedia::with_media_id(media_id, location, status))
}
/// List all media associated with this pool
pub fn list_media(&self) -> Vec<BackupMedia> {
let media_id_list = self.inventory.list_pool_media(&self.name);
media_id_list.into_iter()
media_id_list
.into_iter()
.map(|media_id| {
let (status, location) = self.compute_media_state(&media_id);
BackupMedia::with_media_id(
media_id,
location,
status,
)
BackupMedia::with_media_id(media_id, location, status)
})
.collect()
}
@ -263,7 +256,6 @@ impl MediaPool {
current_time: i64,
force: bool,
) -> Result<Option<String>, Error> {
let _pool_lock = if self.no_media_set_locking {
None
} else {
@ -276,9 +268,7 @@ impl MediaPool {
Some(String::from("forced"))
} else {
match self.current_set_usable() {
Err(err) => {
Some(err.to_string())
}
Err(err) => Some(err.to_string()),
Ok(_) => None,
}
};
@ -289,10 +279,16 @@ impl MediaPool {
create_new_set = Some(String::from("policy is AlwaysCreate"));
}
MediaSetPolicy::CreateAt(event) => {
if let Some(set_start_time) = self.inventory.media_set_start_time(self.current_media_set.uuid()) {
if let Ok(Some(alloc_time)) = event.compute_next_event(set_start_time as i64) {
if let Some(set_start_time) = self
.inventory
.media_set_start_time(self.current_media_set.uuid())
{
if let Ok(Some(alloc_time)) =
event.compute_next_event(set_start_time as i64)
{
if current_time >= alloc_time {
create_new_set = Some(String::from("policy CreateAt event triggered"));
create_new_set =
Some(String::from("policy CreateAt event triggered"));
}
}
}
@ -335,8 +331,9 @@ impl MediaPool {
return false;
}
let expire_time = self.inventory.media_expire_time(
media.id(), &self.media_set_policy, &self.retention);
let expire_time =
self.inventory
.media_expire_time(media.id(), &self.media_set_policy, &self.retention);
current_time >= expire_time
}
@ -368,8 +365,11 @@ impl MediaPool {
}
}
fn add_media_to_current_set(&mut self, mut media_id: MediaId, current_time: i64) -> Result<(), Error> {
fn add_media_to_current_set(
&mut self,
mut media_id: MediaId,
current_time: i64,
) -> Result<(), Error> {
if self.current_media_set_lock.is_none() {
bail!("add_media_to_current_set: media set is not locked - internal error");
}
@ -406,16 +406,19 @@ impl MediaPool {
let mut free_media = Vec::new();
for media_id in media_list {
let (status, location) = self.compute_media_state(media_id);
if media_id.media_set_label.is_some() { continue; } // should not happen
if media_id.media_set_label.is_some() {
continue;
} // should not happen
if !self.location_is_available(&location) {
continue;
}
// only consider writable media
if status != MediaStatus::Writable { continue; }
if status != MediaStatus::Writable {
continue;
}
free_media.push(media_id);
}
@ -462,7 +465,11 @@ impl MediaPool {
}
// Get next expired media
pub fn next_expired_media(&self, current_time: i64, media_list: &[BackupMedia]) -> Option<MediaId> {
pub fn next_expired_media(
&self,
current_time: i64,
media_list: &[BackupMedia],
) -> Option<MediaId> {
let mut expired_media = Vec::new();
for media in media_list.into_iter() {
@ -487,7 +494,11 @@ impl MediaPool {
// sort expired_media, newest first -> oldest last
expired_media.sort_unstable_by(|a, b| {
let mut res = b.media_set_label().unwrap().ctime.cmp(&a.media_set_label().unwrap().ctime);
let mut res = b
.media_set_label()
.unwrap()
.ctime
.cmp(&a.media_set_label().unwrap().ctime);
if res == std::cmp::Ordering::Equal {
res = b.label().label_text.cmp(&a.label().label_text);
}
@ -541,13 +552,15 @@ impl MediaPool {
return Ok(media_id);
}
bail!("guess_next_writable_media in pool '{}' failed: no usable media found", self.name());
bail!(
"guess_next_writable_media in pool '{}' failed: no usable media found",
self.name()
);
}
/// Allocates a writable media to the current media set
// Note: Please keep in sync with guess_next_writable_media()
pub fn alloc_writable_media(&mut self, current_time: i64) -> Result<Uuid, Error> {
if self.current_media_set_lock.is_none() {
bail!("alloc_writable_media: media set is not locked - internal error");
}
@ -560,7 +573,8 @@ impl MediaPool {
return Ok(media.uuid().clone());
}
{ // limit pool lock scope
{
// limit pool lock scope
let _pool_lock = lock_media_pool(&self.state_path, &self.name)?;
self.inventory.reload()?;
@ -604,7 +618,10 @@ impl MediaPool {
return Ok(uuid);
}
bail!("alloc writable media in pool '{}' failed: no usable media found", self.name());
bail!(
"alloc writable media in pool '{}' failed: no usable media found",
self.name()
);
}
/// check if the current media set is usable for writing
@ -615,7 +632,6 @@ impl MediaPool {
/// This return error when the media set must not be used any
/// longer because of consistency errors.
pub fn current_set_usable(&self) -> Result<bool, Error> {
let media_list = self.current_media_set.media_list();
let media_count = media_list.len();
@ -635,15 +651,20 @@ impl MediaPool {
};
let media = self.lookup_media(uuid)?;
match media.media_set_label() {
Some(MediaSetLabel { seq_nr, uuid, ..}) if *seq_nr == seq as u64 && uuid == set_uuid => { /* OK */ },
Some(MediaSetLabel { seq_nr, uuid, .. })
if *seq_nr == seq as u64 && uuid == set_uuid =>
{ /* OK */ }
Some(MediaSetLabel { seq_nr, uuid, .. }) if uuid == set_uuid => {
bail!("media sequence error ({} != {})", *seq_nr, seq);
},
Some(MediaSetLabel { uuid, ..}) => bail!("media owner error ({} != {}", uuid, set_uuid),
}
Some(MediaSetLabel { uuid, .. }) => {
bail!("media owner error ({} != {}", uuid, set_uuid)
}
None => bail!("media owner error (no owner)"),
}
if let Some(set) = media.media_set_label() { // always true here
if let Some(set) = media.media_set_label() {
// always true here
if set.encryption_key_fingerprint != self.encrypt_fingerprint {
bail!("pool encryption key changed");
}
@ -660,7 +681,7 @@ impl MediaPool {
}
match media.status() {
MediaStatus::Full => { /* OK */ },
MediaStatus::Full => { /* OK */ }
MediaStatus::Writable if (seq + 1) == media_count => {
let media_location = media.location();
if self.location_is_available(media_location) {
@ -670,8 +691,11 @@ impl MediaPool {
bail!("writable media offsite in vault '{}'", vault);
}
}
},
_ => bail!("unable to use media set - wrong media status {:?}", media.status()),
}
_ => bail!(
"unable to use media set - wrong media status {:?}",
media.status()
),
}
}
@ -684,9 +708,9 @@ impl MediaPool {
media_set_uuid: &Uuid,
template: Option<String>,
) -> Result<String, Error> {
self.inventory.generate_media_set_name(media_set_uuid, template)
self.inventory
.generate_media_set_name(media_set_uuid, template)
}
}
/// Backup media
@ -704,14 +728,13 @@ pub struct BackupMedia {
}
impl BackupMedia {
/// Creates a new instance
pub fn with_media_id(
id: MediaId,
location: MediaLocation,
status: MediaStatus,
) -> Self {
Self { id, location, status }
pub fn with_media_id(id: MediaId, location: MediaLocation, status: MediaStatus) -> Self {
Self {
id,
location,
status,
}
}
/// Returns the media location

View File

@ -1,5 +1,5 @@
use anyhow::{bail, Error};
use serde::{Serialize, Deserialize};
use serde::{Deserialize, Serialize};
use proxmox_uuid::Uuid;
@ -13,7 +13,6 @@ pub struct MediaSet {
}
impl MediaSet {
pub const MEDIA_SET_MAX_SEQ_NR: u64 = 100;
pub fn new() -> Self {
@ -42,14 +41,21 @@ impl MediaSet {
pub fn insert_media(&mut self, uuid: Uuid, seq_nr: u64) -> Result<(), Error> {
if seq_nr > Self::MEDIA_SET_MAX_SEQ_NR {
bail!("media set sequence number to large in media set {} ({} > {})",
self.uuid.to_string(), seq_nr, Self::MEDIA_SET_MAX_SEQ_NR);
bail!(
"media set sequence number to large in media set {} ({} > {})",
self.uuid.to_string(),
seq_nr,
Self::MEDIA_SET_MAX_SEQ_NR
);
}
let seq_nr = seq_nr as usize;
if self.media_list.len() > seq_nr {
if self.media_list[seq_nr].is_some() {
bail!("found duplicate sequence number in media set '{}/{}'",
self.uuid.to_string(), seq_nr);
bail!(
"found duplicate sequence number in media set '{}/{}'",
self.uuid.to_string(),
seq_nr
);
}
} else {
self.media_list.resize(seq_nr + 1, None);

View File

@ -2,10 +2,7 @@
use anyhow::{format_err, Error};
use proxmox_sys::fs::{
create_path,
CreateOptions,
};
use proxmox_sys::fs::{create_path, CreateOptions};
use pbs_buildcfg::{PROXMOX_BACKUP_RUN_DIR_M, PROXMOX_BACKUP_STATE_DIR_M};
@ -56,7 +53,6 @@ pub const MAX_CHUNK_ARCHIVE_SIZE: usize = 4*1024*1024*1024; // 4GB for now
/// To improve performance, we need to avoid tape drive buffer flush.
pub const COMMIT_BLOCK_SIZE: usize = 128 * 1024 * 1024 * 1024; // 128 GiB
/// Create tape status dir with correct permission
pub fn create_tape_status_dir() -> Result<(), Error> {
let backup_user = pbs_config::backup_user()?;

View File

@ -2,12 +2,7 @@ use anyhow::{bail, Error};
use proxmox_uuid::Uuid;
use crate::{
tape::{
MediaCatalog,
MediaSetCatalog,
},
};
use crate::tape::{MediaCatalog, MediaSetCatalog};
/// Helper to build and query sets of catalogs
///
@ -20,7 +15,6 @@ pub struct CatalogSet {
}
impl CatalogSet {
/// Create empty instance
pub fn new() -> Self {
Self {
@ -56,7 +50,6 @@ impl CatalogSet {
/// Add a new catalog, move the old on to the read-only set
pub fn append_catalog(&mut self, new_catalog: MediaCatalog) -> Result<(), Error> {
// append current catalog to read-only set
if let Some(catalog) = self.catalog.take() {
self.media_set_catalog.append_catalog(catalog)?;

View File

@ -4,47 +4,29 @@ pub use catalog_set::*;
mod new_chunks_iterator;
pub use new_chunks_iterator::*;
use std::path::Path;
use std::fs::File;
use std::time::SystemTime;
use std::path::Path;
use std::sync::{Arc, Mutex};
use std::time::SystemTime;
use anyhow::{bail, Error};
use proxmox_uuid::Uuid;
use proxmox_sys::{task_log, task_warn};
use proxmox_uuid::Uuid;
use pbs_config::tape_encryption_keys::load_key_configs;
use pbs_tape::{
TapeWrite,
sg_tape::tape_alert_flags_critical,
};
use pbs_datastore::{DataStore, SnapshotReader};
use pbs_tape::{sg_tape::tape_alert_flags_critical, TapeWrite};
use proxmox_rest_server::WorkerTask;
use crate::{
tape::{
TAPE_STATUS_DIR,
MAX_CHUNK_ARCHIVE_SIZE,
COMMIT_BLOCK_SIZE,
MediaPool,
MediaId,
MediaCatalog,
use crate::tape::{
drive::{media_changer, request_and_load_media, TapeDriver},
file_formats::{
MediaSetLabel,
ChunkArchiveWriter,
tape_write_snapshot_archive,
tape_write_catalog,
},
drive::{
TapeDriver,
request_and_load_media,
media_changer,
},
tape_write_catalog, tape_write_snapshot_archive, ChunkArchiveWriter, MediaSetLabel,
},
MediaCatalog, MediaId, MediaPool, COMMIT_BLOCK_SIZE, MAX_CHUNK_ARCHIVE_SIZE, TAPE_STATUS_DIR,
};
struct PoolWriterState {
drive: Box<dyn TapeDriver>,
// Media Uuid from loaded media
@ -65,7 +47,6 @@ pub struct PoolWriter {
}
impl PoolWriter {
pub fn new(
mut pool: MediaPool,
drive_name: &str,
@ -73,16 +54,11 @@ impl PoolWriter {
notify_email: Option<String>,
force_media_set: bool,
) -> Result<Self, Error> {
let current_time = proxmox_time::epoch_i64();
let new_media_set_reason = pool.start_write_session(current_time, force_media_set)?;
if let Some(reason) = new_media_set_reason {
task_log!(
worker,
"starting new media set - reason: {}",
reason,
);
task_log!(worker, "starting new media set - reason: {}", reason,);
}
let media_set_uuid = pool.current_media_set().uuid();
@ -93,12 +69,8 @@ impl PoolWriter {
// load all catalogs read-only at start
for media_uuid in pool.current_media_list()? {
let media_info = pool.lookup_media(media_uuid).unwrap();
let media_catalog = MediaCatalog::open(
Path::new(TAPE_STATUS_DIR),
media_info.id(),
false,
false,
)?;
let media_catalog =
MediaCatalog::open(Path::new(TAPE_STATUS_DIR), media_info.id(), false, false)?;
catalog_set.append_read_only_catalog(media_catalog)?;
}
@ -122,7 +94,10 @@ impl PoolWriter {
}
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
self.catalog_set.lock().unwrap().contains_snapshot(store, snapshot)
self.catalog_set
.lock()
.unwrap()
.contains_snapshot(store, snapshot)
}
/// Eject media and drop PoolWriterState (close drive)
@ -155,7 +130,6 @@ impl PoolWriter {
let (drive_config, _digest) = pbs_config::drive::config()?;
if let Some((mut changer, _)) = media_changer(&drive_config, &self.drive_name)? {
if let Some(ref mut status) = status {
task_log!(worker, "rewind media");
// rewind first so that the unload command later does not run into a timeout
@ -167,14 +141,25 @@ impl PoolWriter {
let media = self.pool.lookup_media(media_uuid)?;
let label_text = media.label_text();
if let Some(slot) = changer.export_media(label_text)? {
task_log!(worker, "exported media '{}' to import/export slot {}", label_text, slot);
task_log!(
worker,
"exported media '{}' to import/export slot {}",
label_text,
slot
);
} else {
task_warn!(worker, "export failed - media '{}' is not online or in different drive", label_text);
task_warn!(
worker,
"export failed - media '{}' is not online or in different drive",
label_text
);
}
}
} else if let Some(mut status) = status {
task_log!(worker, "standalone drive - ejecting media instead of export");
task_log!(
worker,
"standalone drive - ejecting media instead of export"
);
status.drive.eject_media()?;
}
@ -214,7 +199,11 @@ impl PoolWriter {
return Ok(media_uuid);
}
task_log!(worker, "allocated new writable media '{}'", media.label_text());
task_log!(
worker,
"allocated new writable media '{}'",
media.label_text()
);
if let Some(PoolWriterState { mut drive, .. }) = self.status.take() {
if last_media_uuid.is_some() {
@ -225,8 +214,13 @@ impl PoolWriter {
let (drive_config, _digest) = pbs_config::drive::config()?;
let (mut drive, old_media_id) =
request_and_load_media(worker, &drive_config, &self.drive_name, media.label(), &self.notify_email)?;
let (mut drive, old_media_id) = request_and_load_media(
worker,
&drive_config,
&self.drive_name,
media.label(),
&self.notify_email,
)?;
// test for critical tape alert flags
if let Ok(alert_flags) = drive.tape_alert_flags() {
@ -234,7 +228,10 @@ impl PoolWriter {
task_log!(worker, "TapeAlertFlags: {:?}", alert_flags);
if tape_alert_flags_critical(alert_flags) {
self.pool.set_media_status_damaged(&media_uuid)?;
bail!("aborting due to critical tape alert flags: {:?}", alert_flags);
bail!(
"aborting due to critical tape alert flags: {:?}",
alert_flags
);
}
}
}
@ -273,15 +270,12 @@ impl PoolWriter {
}
fn open_catalog_file(uuid: &Uuid) -> Result<File, Error> {
let status_path = Path::new(TAPE_STATUS_DIR);
let mut path = status_path.to_owned();
path.push(uuid.to_string());
path.set_extension("log");
let file = std::fs::OpenOptions::new()
.read(true)
.open(&path)?;
let file = std::fs::OpenOptions::new().read(true).open(&path)?;
Ok(file)
}
@ -289,11 +283,7 @@ impl PoolWriter {
// Check it tape is loaded, then move to EOM (if not already there)
//
// Returns the tape position at EOM.
fn prepare_tape_write(
status: &mut PoolWriterState,
worker: &WorkerTask,
) -> Result<u64, Error> {
fn prepare_tape_write(status: &mut PoolWriterState, worker: &WorkerTask) -> Result<u64, Error> {
if !status.at_eom {
task_log!(worker, "moving to end of media");
status.drive.move_to_eom(true)?;
@ -302,7 +292,10 @@ impl PoolWriter {
let current_file_number = status.drive.current_file_number()?;
if current_file_number < 2 {
bail!("got strange file position number from drive ({})", current_file_number);
bail!(
"got strange file position number from drive ({})",
current_file_number
);
}
Ok(current_file_number)
@ -315,11 +308,7 @@ impl PoolWriter {
/// on the media (return value 'Ok(false, _)'). In that case, the
/// archive is marked incomplete. The caller should mark the media
/// as full and try again using another media.
pub fn append_catalog_archive(
&mut self,
worker: &WorkerTask,
) -> Result<bool, Error> {
pub fn append_catalog_archive(&mut self, worker: &WorkerTask) -> Result<bool, Error> {
let status = match self.status {
Some(ref mut status) => status,
None => bail!("PoolWriter - no media loaded"),
@ -354,23 +343,14 @@ impl PoolWriter {
let mut file = Self::open_catalog_file(uuid)?;
let done = tape_write_catalog(
writer.as_mut(),
uuid,
media_set.uuid(),
seq_nr,
&mut file,
)?.is_some();
let done = tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)?
.is_some();
Ok(done)
}
// Append catalogs for all previous media in set (without last)
fn append_media_set_catalogs(
&mut self,
worker: &WorkerTask,
) -> Result<(), Error> {
fn append_media_set_catalogs(&mut self, worker: &WorkerTask) -> Result<(), Error> {
let media_set = self.pool.current_media_set();
let mut media_list = &media_set.media_list()[..];
@ -387,7 +367,6 @@ impl PoolWriter {
Self::prepare_tape_write(status, worker)?;
for (seq_nr, uuid) in media_list.iter().enumerate() {
let uuid = match uuid {
None => bail!("got incomplete media list - internal error"),
Some(uuid) => uuid,
@ -399,13 +378,9 @@ impl PoolWriter {
task_log!(worker, "write catalog for previous media: {}", uuid);
if tape_write_catalog(
writer.as_mut(),
uuid,
media_set.uuid(),
seq_nr,
&mut file,
)?.is_none() {
if tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)?
.is_none()
{
bail!("got EOM while writing start catalog");
}
}
@ -428,7 +403,6 @@ impl PoolWriter {
worker: &WorkerTask,
snapshot_reader: &SnapshotReader,
) -> Result<(bool, usize), Error> {
let status = match self.status {
Some(ref mut status) => status,
None => bail!("PoolWriter - no media loaded"),
@ -474,7 +448,6 @@ impl PoolWriter {
chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
store: &str,
) -> Result<(bool, usize), Error> {
let status = match self.status {
Some(ref mut status) => status,
None => bail!("PoolWriter - no media loaded"),
@ -486,13 +459,8 @@ impl PoolWriter {
let start_time = SystemTime::now();
let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive(
worker,
writer,
chunk_iter,
store,
MAX_CHUNK_ARCHIVE_SIZE,
)?;
let (saved_chunks, content_uuid, leom, bytes_written) =
write_chunk_archive(worker, writer, chunk_iter, store, MAX_CHUNK_ARCHIVE_SIZE)?;
status.bytes_written += bytes_written;
@ -508,8 +476,12 @@ impl PoolWriter {
let request_sync = status.bytes_written >= COMMIT_BLOCK_SIZE;
// register chunks in media_catalog
self.catalog_set.lock().unwrap()
.register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?;
self.catalog_set.lock().unwrap().register_chunk_archive(
content_uuid,
current_file_number,
store,
&saved_chunks,
)?;
if leom || request_sync {
self.commit()?;
@ -523,11 +495,7 @@ impl PoolWriter {
datastore: Arc<DataStore>,
snapshot_reader: Arc<Mutex<SnapshotReader>>,
) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
NewChunksIterator::spawn(
datastore,
snapshot_reader,
Arc::clone(&self.catalog_set),
)
NewChunksIterator::spawn(datastore, snapshot_reader, Arc::clone(&self.catalog_set))
}
}
@ -539,7 +507,6 @@ fn write_chunk_archive<'a>(
store: &str,
max_size: usize,
) -> Result<(Vec<[u8; 32]>, Uuid, bool, usize), Error> {
let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, store, true)?;
// we want to get the chunk list in correct order
@ -589,7 +556,6 @@ fn update_media_set_label(
old_set: Option<MediaSetLabel>,
media_id: &MediaId,
) -> Result<(MediaCatalog, bool), Error> {
let media_catalog;
let new_set = match media_id.media_set_label {
@ -602,7 +568,10 @@ fn update_media_set_label(
match config_map.get(fingerprint) {
Some(key_config) => Some(key_config.clone()),
None => {
bail!("unable to find tape encryption key config '{}'", fingerprint);
bail!(
"unable to find tape encryption key config '{}'",
fingerprint
);
}
}
} else {
@ -621,10 +590,14 @@ fn update_media_set_label(
Some(media_set_label) => {
if new_set.uuid == media_set_label.uuid {
if new_set.seq_nr != media_set_label.seq_nr {
bail!("got media with wrong media sequence number ({} != {}",
new_set.seq_nr,media_set_label.seq_nr);
bail!(
"got media with wrong media sequence number ({} != {}",
new_set.seq_nr,
media_set_label.seq_nr
);
}
if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint {
if new_set.encryption_key_fingerprint != media_set_label.encryption_key_fingerprint
{
bail!("detected changed encryption fingerprint - internal error");
}
media_catalog = MediaCatalog::open(status_path, media_id, true, false)?;

View File

@ -3,7 +3,7 @@ use std::sync::{Arc, Mutex};
use anyhow::{format_err, Error};
use pbs_datastore::{DataStore, DataBlob, SnapshotReader};
use pbs_datastore::{DataBlob, DataStore, SnapshotReader};
use crate::tape::CatalogSet;
@ -16,7 +16,6 @@ pub struct NewChunksIterator {
}
impl NewChunksIterator {
/// Creates the iterator, spawning a new thread
///
/// Make sure to join() the returnd thread handle.
@ -25,11 +24,9 @@ impl NewChunksIterator {
snapshot_reader: Arc<Mutex<SnapshotReader>>,
catalog_set: Arc<Mutex<CatalogSet>>,
) -> Result<(std::thread::JoinHandle<()>, Self), Error> {
let (tx, rx) = std::sync::mpsc::sync_channel(3);
let reader_thread = std::thread::spawn(move || {
let snapshot_reader = snapshot_reader.lock().unwrap();
let mut chunk_index: HashSet<[u8; 32]> = HashSet::new();
@ -37,7 +34,6 @@ impl NewChunksIterator {
let datastore_name = snapshot_reader.datastore_name().to_string();
let result: Result<(), Error> = proxmox_lang::try_block!({
let mut chunk_iter = snapshot_reader.chunk_iterator(move |digest| {
catalog_set
.lock()
@ -61,7 +57,7 @@ impl NewChunksIterator {
let blob = datastore.load_chunk(&digest)?;
//println!("LOAD CHUNK {}", hex::encode(&digest));
match tx.send(Ok(Some((digest, blob)))) {
Ok(()) => {},
Ok(()) => {}
Err(err) => {
eprintln!("could not send chunk to reader thread: {}", err);
break;

View File

@ -2,10 +2,10 @@
//
// # cargo test --release tape::test::alloc_writable_media
use std::path::PathBuf;
use anyhow::Error;
use std::path::PathBuf;
use pbs_api_types::{RetentionPolicy, MediaSetPolicy};
use pbs_api_types::{MediaSetPolicy, RetentionPolicy};
use crate::tape::{Inventory, MediaPool};
@ -22,7 +22,6 @@ fn create_testdir(name: &str) -> Result<PathBuf, Error> {
#[test]
fn test_alloc_writable_media_1() -> Result<(), Error> {
let testdir = create_testdir("test_alloc_writable_media_1")?;
let mut ctime = 0;
@ -49,7 +48,6 @@ fn test_alloc_writable_media_1() -> Result<(), Error> {
#[test]
fn test_alloc_writable_media_2() -> Result<(), Error> {
let testdir = create_testdir("test_alloc_writable_media_2")?;
let mut inventory = Inventory::load(&testdir)?;
@ -87,7 +85,6 @@ fn test_alloc_writable_media_2() -> Result<(), Error> {
#[test]
fn test_alloc_writable_media_3() -> Result<(), Error> {
let testdir = create_testdir("test_alloc_writable_media_3")?;
let mut inventory = Inventory::load(&testdir)?;
@ -136,7 +133,6 @@ fn test_alloc_writable_media_3() -> Result<(), Error> {
#[test]
fn test_alloc_writable_media_4() -> Result<(), Error> {
let testdir = create_testdir("test_alloc_writable_media_4")?;
let mut inventory = Inventory::load(&testdir)?;

View File

@ -2,20 +2,14 @@
//
// # cargo test --release tape::test::compute_media_state
use std::path::PathBuf;
use anyhow::Error;
use std::path::PathBuf;
use proxmox_uuid::Uuid;
use pbs_api_types::{MediaStatus, MediaSetPolicy, RetentionPolicy};
use pbs_api_types::{MediaSetPolicy, MediaStatus, RetentionPolicy};
use crate::tape::{
Inventory,
MediaPool,
file_formats::{
MediaSetLabel,
},
};
use crate::tape::{file_formats::MediaSetLabel, Inventory, MediaPool};
fn create_testdir(name: &str) -> Result<PathBuf, Error> {
let mut testdir: PathBuf = String::from("./target/testout").into();
@ -30,7 +24,6 @@ fn create_testdir(name: &str) -> Result<PathBuf, Error> {
#[test]
fn test_compute_media_state() -> Result<(), Error> {
let testdir = create_testdir("test_compute_media_state")?;
let ctime = 0;
@ -66,7 +59,10 @@ fn test_compute_media_state() -> Result<(), Error> {
)?;
// tape1 is free
assert_eq!(pool.lookup_media(&tape1_uuid)?.status(), &MediaStatus::Writable);
assert_eq!(
pool.lookup_media(&tape1_uuid)?.status(),
&MediaStatus::Writable
);
// intermediate tapes should be Full
assert_eq!(pool.lookup_media(&tape2_uuid)?.status(), &MediaStatus::Full);
@ -74,14 +70,16 @@ fn test_compute_media_state() -> Result<(), Error> {
assert_eq!(pool.lookup_media(&tape4_uuid)?.status(), &MediaStatus::Full);
// last tape is writable
assert_eq!(pool.lookup_media(&tape5_uuid)?.status(), &MediaStatus::Writable);
assert_eq!(
pool.lookup_media(&tape5_uuid)?.status(),
&MediaStatus::Writable
);
Ok(())
}
#[test]
fn test_media_expire_time() -> Result<(), Error> {
let testdir = create_testdir("test_media_expire_time")?;
let ctime = 0;
@ -115,19 +113,52 @@ fn test_media_expire_time() -> Result<(), Error> {
assert_eq!(pool.lookup_media(&tape0_uuid)?.status(), &MediaStatus::Full);
assert_eq!(pool.lookup_media(&tape1_uuid)?.status(), &MediaStatus::Full);
assert_eq!(pool.lookup_media(&tape2_uuid)?.status(), &MediaStatus::Writable);
assert_eq!(
pool.lookup_media(&tape2_uuid)?.status(),
&MediaStatus::Writable
);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 0), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 60), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 120), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 180), true);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 0),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 60),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 120),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape0_uuid)?, 180),
true
);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 0), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 60), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 120), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 180), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 190), false);
assert_eq!(pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 240), true);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 0),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 60),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 120),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 180),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 190),
false
);
assert_eq!(
pool.media_is_expired(&pool.lookup_media(&tape1_uuid)?, 240),
true
);
Ok(())
}

View File

@ -2,22 +2,14 @@
//
// # cargo test --release tape::test::current_set_usable
use std::path::PathBuf;
use anyhow::Error;
use std::path::PathBuf;
use proxmox_uuid::Uuid;
use pbs_api_types::{RetentionPolicy, MediaSetPolicy};
use pbs_api_types::{MediaSetPolicy, RetentionPolicy};
use crate::{
tape::{
Inventory,
MediaPool,
file_formats::{
MediaSetLabel,
},
},
};
use crate::tape::{file_formats::MediaSetLabel, Inventory, MediaPool};
fn create_testdir(name: &str) -> Result<PathBuf, Error> {
let mut testdir: PathBuf = String::from("./target/testout").into();
@ -32,7 +24,6 @@ fn create_testdir(name: &str) -> Result<PathBuf, Error> {
#[test]
fn test_current_set_usable_1() -> Result<(), Error> {
let testdir = create_testdir("test_current_set_usable_1")?;
// pool without any media
@ -54,7 +45,6 @@ fn test_current_set_usable_1() -> Result<(), Error> {
#[test]
fn test_current_set_usable_2() -> Result<(), Error> {
let testdir = create_testdir("test_current_set_usable_2")?;
let ctime = 0;
@ -81,7 +71,6 @@ fn test_current_set_usable_2() -> Result<(), Error> {
#[test]
fn test_current_set_usable_3() -> Result<(), Error> {
let testdir = create_testdir("test_current_set_usable_3")?;
let ctime = 0;
@ -110,7 +99,6 @@ fn test_current_set_usable_3() -> Result<(), Error> {
#[test]
fn test_current_set_usable_4() -> Result<(), Error> {
let testdir = create_testdir("test_current_set_usable_4")?;
let ctime = 0;
@ -139,7 +127,6 @@ fn test_current_set_usable_4() -> Result<(), Error> {
#[test]
fn test_current_set_usable_5() -> Result<(), Error> {
let testdir = create_testdir("test_current_set_usable_5")?;
let ctime = 0;
@ -170,7 +157,6 @@ fn test_current_set_usable_5() -> Result<(), Error> {
#[test]
fn test_current_set_usable_6() -> Result<(), Error> {
let testdir = create_testdir("test_current_set_usable_6")?;
let ctime = 0;
@ -199,7 +185,6 @@ fn test_current_set_usable_6() -> Result<(), Error> {
#[test]
fn test_current_set_usable_7() -> Result<(), Error> {
let testdir = create_testdir("test_current_set_usable_7")?;
let ctime = 0;
@ -215,7 +200,6 @@ fn test_current_set_usable_7() -> Result<(), Error> {
inventory.generate_used_tape("tape2", sl2, ctime);
// pool with one two media in current set, one set to damaged
let pool = MediaPool::new(
"p1",

View File

@ -2,21 +2,14 @@
//
// # cargo test --release tape::test::inventory
use std::path::PathBuf;
use anyhow::{bail, Error};
use std::path::PathBuf;
use proxmox_uuid::Uuid;
use pbs_api_types::{MediaLocation, MediaStatus};
use crate::{
tape::{
Inventory,
file_formats::{
MediaSetLabel,
},
},
};
use crate::tape::{file_formats::MediaSetLabel, Inventory};
fn create_testdir(name: &str) -> Result<PathBuf, Error> {
let mut testdir: PathBuf = String::from("./target/testout").into();
@ -31,38 +24,56 @@ fn create_testdir(name: &str) -> Result<PathBuf, Error> {
#[test]
fn test_media_state_db() -> Result<(), Error> {
let testdir = create_testdir("test_media_state_db")?;
let mut inventory = Inventory::load(&testdir)?;
let uuid1: Uuid = inventory.generate_free_tape("tape1", 0);
assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Unknown, MediaLocation::Offline));
assert_eq!(
inventory.status_and_location(&uuid1),
(MediaStatus::Unknown, MediaLocation::Offline)
);
inventory.set_media_status_full(&uuid1)?;
assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Full, MediaLocation::Offline));
assert_eq!(
inventory.status_and_location(&uuid1),
(MediaStatus::Full, MediaLocation::Offline)
);
inventory.set_media_location_vault(&uuid1, "Office2")?;
assert_eq!(inventory.status_and_location(&uuid1),
(MediaStatus::Full, MediaLocation::Vault(String::from("Office2"))));
assert_eq!(
inventory.status_and_location(&uuid1),
(
MediaStatus::Full,
MediaLocation::Vault(String::from("Office2"))
)
);
inventory.set_media_location_offline(&uuid1)?;
assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Full, MediaLocation::Offline));
assert_eq!(
inventory.status_and_location(&uuid1),
(MediaStatus::Full, MediaLocation::Offline)
);
inventory.set_media_status_damaged(&uuid1)?;
assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Damaged, MediaLocation::Offline));
assert_eq!(
inventory.status_and_location(&uuid1),
(MediaStatus::Damaged, MediaLocation::Offline)
);
inventory.clear_media_status(&uuid1)?;
assert_eq!(inventory.status_and_location(&uuid1), (MediaStatus::Unknown, MediaLocation::Offline));
assert_eq!(
inventory.status_and_location(&uuid1),
(MediaStatus::Unknown, MediaLocation::Offline)
);
Ok(())
}
#[test]
fn test_list_pool_media() -> Result<(), Error> {
let testdir = create_testdir("test_list_pool_media")?;
let mut inventory = Inventory::load(&testdir)?;
@ -81,10 +92,16 @@ fn test_list_pool_media() -> Result<(), Error> {
let list = inventory.list_pool_media("p1");
assert_eq!(list.len(), 2);
let tape2 = list.iter().find(|media_id| &media_id.label.uuid == &tape2_uuid).unwrap();
let tape2 = list
.iter()
.find(|media_id| &media_id.label.uuid == &tape2_uuid)
.unwrap();
assert!(tape2.media_set_label.is_none());
let tape3 = list.iter().find(|media_id| &media_id.label.uuid == &tape3_uuid).unwrap();
let tape3 = list
.iter()
.find(|media_id| &media_id.label.uuid == &tape3_uuid)
.unwrap();
match tape3.media_set_label {
None => bail!("missing media set label"),
Some(ref set) => {
@ -97,7 +114,6 @@ fn test_list_pool_media() -> Result<(), Error> {
#[test]
fn test_media_set_simple() -> Result<(), Error> {
let testdir = create_testdir("test_media_set_simple")?;
let mut inventory = Inventory::load(&testdir)?;
@ -107,7 +123,6 @@ fn test_media_set_simple() -> Result<(), Error> {
let sl2 = MediaSetLabel::with_data("p1", sl1.uuid.clone(), 1, ctime + 20, None);
let sl3 = MediaSetLabel::with_data("p1", sl1.uuid.clone(), 2, ctime + 30, None);
let tape1_uuid = inventory.generate_used_tape("tape1", sl1.clone(), 0);
let tape2_uuid = inventory.generate_used_tape("tape2", sl2, 0);
let tape3_uuid = inventory.generate_used_tape("tape3", sl3, 0);
@ -141,7 +156,6 @@ fn test_media_set_simple() -> Result<(), Error> {
// test media set start time
assert_eq!(inventory.media_set_start_time(&sl1.uuid), Some(ctime + 10));
// test pool p2
let media_set = inventory.compute_media_set_members(&sl4.uuid)?;
assert_eq!(media_set.uuid(), &sl4.uuid);
@ -158,10 +172,8 @@ fn test_media_set_simple() -> Result<(), Error> {
Ok(())
}
#[test]
fn test_latest_media_set() -> Result<(), Error> {
let testdir = create_testdir("test_latest_media_set")?;
let insert_tape = |inventory: &mut Inventory, pool, label, seq_nr, ctime| -> Uuid {
@ -176,7 +188,12 @@ fn test_latest_media_set() -> Result<(), Error> {
let set = inventory.compute_media_set_members(&latest_set).unwrap();
let media_list = set.media_list();
assert_eq!(media_list.iter().filter(|s| s.is_some()).count(), 1);
let media_uuid = media_list.iter().find(|s| s.is_some()).unwrap().clone().unwrap();
let media_uuid = media_list
.iter()
.find(|s| s.is_some())
.unwrap()
.clone()
.unwrap();
let media = inventory.lookup_media(&media_uuid).unwrap();
assert_eq!(media.label.label_text, label);
};

View File

@ -1,5 +1,4 @@
mod inventory;
mod current_set_usable;
mod compute_media_state;
mod alloc_writable_media;
mod compute_media_state;
mod current_set_usable;
mod inventory;