tape: bump catalog/snapshot archive magic

the snapshot string format is not backwards compatible since it now has
an in-line namespace prefix. it's possible to select which magic to use
at the start of the backup, since a tape backup job knows whether it
operates on non-root namespaces up-front.

the MediaCatalog itself also has a similar incompatible change, but
there
- updating existing catalogs in-place
- not knowing what the catalog will contain in the future when initially
  creating/opening it
makes bumping the magic there harder. since the tape contents are
sufficiently guarded by the other two bumps, ignoring the
backwards-incomaptible change of the on-disk catalogs seems like an okay
tradeoff.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2022-05-11 14:54:18 +02:00 committed by Dominik Csapak
parent 07ffb86451
commit 707c48ad46
6 changed files with 79 additions and 33 deletions

View File

@ -10,9 +10,9 @@ use proxmox_schema::api;
use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{
Authid, GroupFilter, MediaPoolConfig, Operation, TapeBackupJobConfig, TapeBackupJobSetup,
TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT,
PRIV_TAPE_WRITE, UPID_SCHEMA,
Authid, BackupNamespace, GroupFilter, MediaPoolConfig, Operation, TapeBackupJobConfig,
TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, PRIV_DATASTORE_READ,
PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
};
use pbs_config::CachedUserInfo;
@ -404,18 +404,15 @@ fn backup_worker(
task_log!(worker, "update media online status");
let changer_name = update_media_online_status(&setup.drive)?;
let root_namespace = setup.ns.clone().unwrap_or_default();
let ns_magic = !root_namespace.is_root() || setup.recursion_depth != Some(0);
let pool = MediaPool::with_config(status_path, pool_config, changer_name, false)?;
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?;
// FIXME: Namespaces! Probably just recurse for now? Not sure about the usage here...
let mut pool_writer =
PoolWriter::new(pool, &setup.drive, worker, email, force_media_set, ns_magic)?;
let mut group_list = Vec::new();
let root_namespace = if let Some(ns) = &setup.ns {
ns.clone()
} else {
Default::default()
};
let namespaces =
datastore.recursive_iter_backup_ns_ok(root_namespace, setup.recursion_depth)?;
for ns in namespaces {

View File

@ -39,10 +39,11 @@ use crate::{
drive::{lock_tape_device, request_and_load_media, set_tape_device_state, TapeDriver},
file_formats::{
CatalogArchiveHeader, ChunkArchiveDecoder, ChunkArchiveHeader, SnapshotArchiveHeader,
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2,
},
lock_media_set, Inventory, MediaCatalog, MediaId, MediaSet, MediaSetCatalog,
TAPE_STATUS_DIR,
@ -1096,7 +1097,8 @@ fn restore_snapshots_to_tmpdir(
}
match header.content_magic {
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
| PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
@ -1440,7 +1442,7 @@ fn restore_archive<'a>(
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
bail!("unexpected snapshot archive version (v1.0)");
}
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 | PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
@ -1591,7 +1593,7 @@ fn restore_archive<'a>(
reader.skip_data()?; // read all data
}
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 => {
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 | PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1 => {
let header_data = reader.read_exact_allocated(header.size as usize)?;
let archive_header: CatalogArchiveHeader = serde_json::from_slice(&header_data)
@ -1937,7 +1939,9 @@ pub fn fast_catalog_restore(
bail!("missing MediaContentHeader");
}
if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0 {
if header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
|| header.content_magic == PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1
{
task_log!(worker, "found catalog at pos {}", current_file_number);
let header_data = reader.read_exact_allocated(header.size as usize)?;

View File

@ -6,7 +6,7 @@ use proxmox_uuid::Uuid;
use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE};
use crate::tape::file_formats::{CatalogArchiveHeader, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0};
use crate::tape::file_formats::CatalogArchiveHeader;
/// Write a media catalog to the tape
///
@ -21,6 +21,7 @@ pub fn tape_write_catalog<'a>(
media_set_uuid: &Uuid,
seq_nr: usize,
file: &mut File,
version: [u8; 8],
) -> Result<Option<Uuid>, std::io::Error> {
let archive_header = CatalogArchiveHeader {
uuid: uuid.clone(),
@ -32,10 +33,7 @@ pub fn tape_write_catalog<'a>(
.as_bytes()
.to_vec();
let header = MediaContentHeader::new(
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0,
header_data.len() as u32,
);
let header = MediaContentHeader::new(version, header_data.len() as u32);
let content_uuid: Uuid = header.uuid.into();
let leom = writer.write_header(&header, &header_data)?;

View File

@ -45,10 +45,16 @@ pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] =
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133];
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.1")[0..8];
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1: [u8; 8] = [218, 22, 21, 208, 17, 226, 154, 98];
// v1.2 introduced an optional, in-line namespace prefix for the snapshot field
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.2")[0..8];
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2: [u8; 8] = [98, 16, 54, 155, 186, 16, 51, 29];
// openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.0")[0..8];
pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0: [u8; 8] =
[183, 207, 199, 37, 158, 153, 30, 115];
// v1.1 introduced an optional, in-line namespace prefix for the snapshot field
// openssl::sha::sha256(b"Proxmox Backup Catalog Archive v1.1")[0..8];
pub const PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1: [u8; 8] = [179, 236, 113, 240, 173, 236, 2, 96];
lazy_static::lazy_static! {
// Map content magic numbers to human readable names.
@ -60,7 +66,9 @@ lazy_static::lazy_static! {
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, "Proxmox Backup Chunk Archive v1.1");
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, "Proxmox Backup Snapshot Archive v1.0");
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, "Proxmox Backup Snapshot Archive v1.1");
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2, "Proxmox Backup Snapshot Archive v1.2");
map.insert(&PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, "Proxmox Backup Catalog Archive v1.0");
map.insert(&PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1, "Proxmox Backup Catalog Archive v1.1");
map
};
}

View File

@ -8,7 +8,10 @@ use proxmox_uuid::Uuid;
use pbs_datastore::SnapshotReader;
use pbs_tape::{MediaContentHeader, TapeWrite, PROXMOX_TAPE_BLOCK_SIZE};
use crate::tape::file_formats::{SnapshotArchiveHeader, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1};
use crate::tape::file_formats::{
SnapshotArchiveHeader, PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2,
};
/// Write a set of files as `pxar` archive to the tape
///
@ -34,10 +37,13 @@ pub fn tape_write_snapshot_archive<'a>(
.as_bytes()
.to_vec();
let header = MediaContentHeader::new(
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
header_data.len() as u32,
);
let version_magic = if backup_dir.backup_ns().is_root() {
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1
} else {
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_2
};
let header = MediaContentHeader::new(version_magic, header_data.len() as u32);
let content_uuid = header.uuid.into();
let root_metadata = pxar::Metadata::dir_builder(0o0664).build();

View File

@ -27,6 +27,10 @@ use crate::tape::{
MediaCatalog, MediaId, MediaPool, COMMIT_BLOCK_SIZE, MAX_CHUNK_ARCHIVE_SIZE, TAPE_STATUS_DIR,
};
use super::file_formats::{
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0, PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1,
};
struct PoolWriterState {
drive: Box<dyn TapeDriver>,
// Media Uuid from loaded media
@ -44,6 +48,7 @@ pub struct PoolWriter {
status: Option<PoolWriterState>,
catalog_set: Arc<Mutex<CatalogSet>>,
notify_email: Option<String>,
ns_magic: bool,
}
impl PoolWriter {
@ -53,6 +58,7 @@ impl PoolWriter {
worker: &WorkerTask,
notify_email: Option<String>,
force_media_set: bool,
ns_magic: bool,
) -> Result<Self, Error> {
let current_time = proxmox_time::epoch_i64();
@ -80,6 +86,7 @@ impl PoolWriter {
status: None,
catalog_set: Arc::new(Mutex::new(catalog_set)),
notify_email,
ns_magic,
})
}
@ -310,6 +317,8 @@ impl PoolWriter {
/// archive is marked incomplete. The caller should mark the media
/// as full and try again using another media.
pub fn append_catalog_archive(&mut self, worker: &WorkerTask) -> Result<bool, Error> {
let catalog_magic = self.catalog_version();
let status = match self.status {
Some(ref mut status) => status,
None => bail!("PoolWriter - no media loaded"),
@ -344,8 +353,15 @@ impl PoolWriter {
let mut file = Self::open_catalog_file(uuid)?;
let done = tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)?
.is_some();
let done = tape_write_catalog(
writer.as_mut(),
uuid,
media_set.uuid(),
seq_nr,
&mut file,
catalog_magic,
)?
.is_some();
Ok(done)
}
@ -360,6 +376,8 @@ impl PoolWriter {
}
media_list = &media_list[..(media_list.len() - 1)];
let catalog_magic = self.catalog_version();
let status = match self.status {
Some(ref mut status) => status,
None => bail!("PoolWriter - no media loaded"),
@ -379,8 +397,15 @@ impl PoolWriter {
task_log!(worker, "write catalog for previous media: {}", uuid);
if tape_write_catalog(writer.as_mut(), uuid, media_set.uuid(), seq_nr, &mut file)?
.is_none()
if tape_write_catalog(
writer.as_mut(),
uuid,
media_set.uuid(),
seq_nr,
&mut file,
catalog_magic,
)?
.is_none()
{
bail!("got EOM while writing start catalog");
}
@ -499,6 +524,14 @@ impl PoolWriter {
) -> Result<(std::thread::JoinHandle<()>, NewChunksIterator), Error> {
NewChunksIterator::spawn(datastore, snapshot_reader, Arc::clone(&self.catalog_set))
}
pub(crate) fn catalog_version(&self) -> [u8; 8] {
if self.ns_magic {
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_1
} else {
PROXMOX_BACKUP_CATALOG_ARCHIVE_MAGIC_1_0
}
}
}
/// write up to <max_size> of chunks