proxmox-backup/pbs-tape/src/lib.rs

334 lines
9.6 KiB
Rust
Raw Normal View History

use std::collections::HashSet;
use anyhow::{bail, Error};
use bitflags::bitflags;
use endian_trait::Endian;
use serde::{Serialize, Deserialize};
use serde_json::Value;
use proxmox_uuid::Uuid;
use pbs_api_types::{ScsiTapeChanger, SLOT_ARRAY_SCHEMA};
pub mod linux_list_drives;
pub mod sgutils2;
mod blocked_reader;
pub use blocked_reader::BlockedReader;
mod blocked_writer;
pub use blocked_writer::BlockedWriter;
mod tape_write;
pub use tape_write::*;
mod tape_read;
pub use tape_read::*;
mod emulate_tape_reader;
pub use emulate_tape_reader::EmulateTapeReader;
mod emulate_tape_writer;
pub use emulate_tape_writer::EmulateTapeWriter;
pub mod sg_tape;
pub mod sg_pt_changer;
/// We use 256KB blocksize (always)
pub const PROXMOX_TAPE_BLOCK_SIZE: usize = 256*1024;
// openssl::sha::sha256(b"Proxmox Tape Block Header v1.0")[0..8]
pub const PROXMOX_TAPE_BLOCK_HEADER_MAGIC_1_0: [u8; 8] = [220, 189, 175, 202, 235, 160, 165, 40];
// openssl::sha::sha256(b"Proxmox Backup Content Header v1.0")[0..8];
pub const PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0: [u8; 8] = [99, 238, 20, 159, 205, 242, 155, 12];
// openssl::sha::sha256(b"Proxmox Backup Tape Label v1.0")[0..8];
pub const PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0: [u8; 8] = [42, 5, 191, 60, 176, 48, 170, 57];
// openssl::sha::sha256(b"Proxmox Backup MediaSet Label v1.0")
pub const PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0: [u8; 8] = [8, 96, 99, 249, 47, 151, 83, 216];
/// Tape Block Header with data payload
///
/// All tape files are written as sequence of blocks.
///
/// Note: this struct is large, never put this on the stack!
/// so we use an unsized type to avoid that.
///
/// Tape data block are always read/written with a fixed size
/// (`PROXMOX_TAPE_BLOCK_SIZE`). But they may contain less data, so the
/// header has an additional size field. For streams of blocks, there
/// is a sequence number (`seq_nr`) which may be use for additional
/// error checking.
#[repr(C,packed)]
pub struct BlockHeader {
/// fixed value `PROXMOX_TAPE_BLOCK_HEADER_MAGIC_1_0`
pub magic: [u8; 8],
pub flags: BlockHeaderFlags,
/// size as 3 bytes unsigned, little endian
pub size: [u8; 3],
/// block sequence number
pub seq_nr: u32,
pub payload: [u8],
}
bitflags! {
/// Header flags (e.g. `END_OF_STREAM` or `INCOMPLETE`)
pub struct BlockHeaderFlags: u8 {
/// Marks the last block in a stream.
const END_OF_STREAM = 0b00000001;
/// Mark multivolume streams (when set in the last block)
const INCOMPLETE = 0b00000010;
}
}
#[derive(Endian, Copy, Clone, Debug)]
#[repr(C,packed)]
/// Media Content Header
///
/// All tape files start with this header. The header may contain some
/// informational data indicated by `size`.
///
/// `| MediaContentHeader | header data (size) | stream data |`
///
/// Note: The stream data following may be of any size.
pub struct MediaContentHeader {
/// fixed value `PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0`
pub magic: [u8; 8],
/// magic number for the content following
pub content_magic: [u8; 8],
/// unique ID to identify this data stream
pub uuid: [u8; 16],
/// stream creation time
pub ctime: i64,
/// Size of header data
pub size: u32,
/// Part number for multipart archives.
pub part_number: u8,
/// Reserved for future use
pub reserved_0: u8,
/// Reserved for future use
pub reserved_1: u8,
/// Reserved for future use
pub reserved_2: u8,
}
impl MediaContentHeader {
/// Create a new instance with autogenerated Uuid
pub fn new(content_magic: [u8; 8], size: u32) -> Self {
let uuid = *Uuid::generate()
.into_inner();
Self {
magic: PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
content_magic,
uuid,
ctime: proxmox_time::epoch_i64(),
size,
part_number: 0,
reserved_0: 0,
reserved_1: 0,
reserved_2: 0,
}
}
/// Helper to check magic numbers and size constraints
pub fn check(&self, content_magic: [u8; 8], min_size: u32, max_size: u32) -> Result<(), Error> {
if self.magic != PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0 {
bail!("MediaContentHeader: wrong magic");
}
if self.content_magic != content_magic {
bail!("MediaContentHeader: wrong content magic");
}
if self.size < min_size || self.size > max_size {
bail!("MediaContentHeader: got unexpected size");
}
Ok(())
}
/// Returns the content Uuid
pub fn content_uuid(&self) -> Uuid {
Uuid::from(self.uuid)
}
}
impl BlockHeader {
pub const SIZE: usize = PROXMOX_TAPE_BLOCK_SIZE;
/// Allocates a new instance on the heap
pub fn new() -> Box<Self> {
use std::alloc::{alloc_zeroed, Layout};
// align to PAGESIZE, so that we can use it with SG_IO
let page_size = unsafe { libc::sysconf(libc::_SC_PAGESIZE) } as usize;
let mut buffer = unsafe {
let ptr = alloc_zeroed(
Layout::from_size_align(Self::SIZE, page_size)
.unwrap(),
);
Box::from_raw(
std::slice::from_raw_parts_mut(ptr, Self::SIZE - 16)
as *mut [u8] as *mut Self
)
};
buffer.magic = PROXMOX_TAPE_BLOCK_HEADER_MAGIC_1_0;
buffer
}
/// Set the `size` field
pub fn set_size(&mut self, size: usize) {
let size = size.to_le_bytes();
self.size.copy_from_slice(&size[..3]);
}
/// Returns the `size` field
pub fn size(&self) -> usize {
(self.size[0] as usize) + ((self.size[1] as usize)<<8) + ((self.size[2] as usize)<<16)
}
/// Set the `seq_nr` field
pub fn set_seq_nr(&mut self, seq_nr: u32) {
self.seq_nr = seq_nr.to_le();
}
/// Returns the `seq_nr` field
pub fn seq_nr(&self) -> u32 {
u32::from_le(self.seq_nr)
}
}
/// Changer element status.
///
/// Drive and slots may be `Empty`, or contain some media, either
/// with known volume tag `VolumeTag(String)`, or without (`Full`).
#[derive(Serialize, Deserialize, Debug)]
pub enum ElementStatus {
Empty,
Full,
VolumeTag(String),
}
/// Changer drive status.
#[derive(Serialize, Deserialize)]
pub struct DriveStatus {
/// The slot the element was loaded from (if known).
pub loaded_slot: Option<u64>,
/// The status.
pub status: ElementStatus,
/// Drive Identifier (Serial number)
pub drive_serial_number: Option<String>,
/// Drive Vendor
pub vendor: Option<String>,
/// Drive Model
pub model: Option<String>,
/// Element Address
pub element_address: u16,
}
/// Storage element status.
#[derive(Serialize, Deserialize)]
pub struct StorageElementStatus {
/// Flag for Import/Export slots
pub import_export: bool,
/// The status.
pub status: ElementStatus,
/// Element Address
pub element_address: u16,
}
/// Transport element status.
#[derive(Serialize, Deserialize)]
pub struct TransportElementStatus {
/// The status.
pub status: ElementStatus,
/// Element Address
pub element_address: u16,
}
/// Changer status - show drive/slot usage
#[derive(Serialize, Deserialize)]
pub struct MtxStatus {
/// List of known drives
pub drives: Vec<DriveStatus>,
/// List of known storage slots
pub slots: Vec<StorageElementStatus>,
/// Transport elements
///
/// Note: Some libraries do not report transport elements.
pub transports: Vec<TransportElementStatus>,
}
impl MtxStatus {
pub fn slot_address(&self, slot: u64) -> Result<u16, Error> {
if slot == 0 {
bail!("invalid slot number '{}' (slots numbers starts at 1)", slot);
}
if slot > (self.slots.len() as u64) {
bail!("invalid slot number '{}' (max {} slots)", slot, self.slots.len());
}
Ok(self.slots[(slot -1) as usize].element_address)
}
pub fn drive_address(&self, drivenum: u64) -> Result<u16, Error> {
if drivenum >= (self.drives.len() as u64) {
bail!("invalid drive number '{}'", drivenum);
}
Ok(self.drives[drivenum as usize].element_address)
}
pub fn transport_address(&self) -> u16 {
// simply use first transport
// (are there changers exposing more than one?)
// defaults to 0 for changer that do not report transports
self
.transports
.get(0)
.map(|t| t.element_address)
.unwrap_or(0u16)
}
pub fn find_free_slot(&self, import_export: bool) -> Option<u64> {
let mut free_slot = None;
for (i, slot_info) in self.slots.iter().enumerate() {
if slot_info.import_export != import_export {
continue; // skip slots of wrong type
}
if let ElementStatus::Empty = slot_info.status {
free_slot = Some((i+1) as u64);
break;
}
}
free_slot
}
pub fn mark_import_export_slots(&mut self, config: &ScsiTapeChanger) -> Result<(), Error>{
let mut export_slots: HashSet<u64> = HashSet::new();
if let Some(slots) = &config.export_slots {
let slots: Value = SLOT_ARRAY_SCHEMA.parse_property_string(&slots)?;
export_slots = slots
.as_array()
.unwrap()
.iter()
.filter_map(|v| v.as_u64())
.collect();
}
for (i, entry) in self.slots.iter_mut().enumerate() {
let slot = i as u64 + 1;
if export_slots.contains(&slot) {
entry.import_export = true; // mark as IMPORT/EXPORT
}
}
Ok(())
}
}