tape: store datastore name in tape archives and media catalog
So that we can store multiple datastores on a single media set. Deduplication is now per datastore (not per media set).
This commit is contained in:
parent
0e2bf3aa1d
commit
54722acada
@ -402,6 +402,8 @@ fn backup_worker(
|
|||||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let datastore_name = datastore.name();
|
||||||
|
|
||||||
let mut errors = false;
|
let mut errors = false;
|
||||||
|
|
||||||
for (group_number, group) in group_list.into_iter().enumerate() {
|
for (group_number, group) in group_list.into_iter().enumerate() {
|
||||||
@ -416,7 +418,7 @@ fn backup_worker(
|
|||||||
if latest_only {
|
if latest_only {
|
||||||
progress.group_snapshots = 1;
|
progress.group_snapshots = 1;
|
||||||
if let Some(info) = snapshot_list.pop() {
|
if let Some(info) = snapshot_list.pop() {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -433,7 +435,7 @@ fn backup_worker(
|
|||||||
} else {
|
} else {
|
||||||
progress.group_snapshots = snapshot_list.len() as u64;
|
progress.group_snapshots = snapshot_list.len() as u64;
|
||||||
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
||||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -531,7 +533,7 @@ pub fn backup_snapshot(
|
|||||||
|
|
||||||
worker.check_abort()?;
|
worker.check_abort()?;
|
||||||
|
|
||||||
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter)?;
|
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
|
||||||
|
|
||||||
if leom {
|
if leom {
|
||||||
pool_writer.set_media_status_full(&uuid)?;
|
pool_writer.set_media_status_full(&uuid)?;
|
||||||
|
@ -434,27 +434,30 @@ pub fn list_content(
|
|||||||
|
|
||||||
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
||||||
|
|
||||||
for snapshot in catalog.snapshot_index().keys() {
|
for (store, content) in catalog.content() {
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
for snapshot in content.snapshot_index.keys() {
|
||||||
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some(ref backup_type) = filter.backup_type {
|
if let Some(ref backup_type) = filter.backup_type {
|
||||||
if backup_dir.group().backup_type() != backup_type { continue; }
|
if backup_dir.group().backup_type() != backup_type { continue; }
|
||||||
}
|
}
|
||||||
if let Some(ref backup_id) = filter.backup_id {
|
if let Some(ref backup_id) = filter.backup_id {
|
||||||
if backup_dir.group().backup_id() != backup_id { continue; }
|
if backup_dir.group().backup_id() != backup_id { continue; }
|
||||||
}
|
}
|
||||||
|
|
||||||
list.push(MediaContentEntry {
|
list.push(MediaContentEntry {
|
||||||
uuid: media_id.label.uuid.clone(),
|
uuid: media_id.label.uuid.clone(),
|
||||||
label_text: media_id.label.label_text.to_string(),
|
label_text: media_id.label.label_text.to_string(),
|
||||||
pool: set.pool.clone(),
|
pool: set.pool.clone(),
|
||||||
media_set_name: media_set_name.clone(),
|
media_set_name: media_set_name.clone(),
|
||||||
media_set_uuid: set.uuid.clone(),
|
media_set_uuid: set.uuid.clone(),
|
||||||
media_set_ctime: set.ctime,
|
media_set_ctime: set.ctime,
|
||||||
seq_nr: set.seq_nr,
|
seq_nr: set.seq_nr,
|
||||||
snapshot: snapshot.to_owned(),
|
snapshot: snapshot.to_owned(),
|
||||||
backup_time: backup_dir.backup_time(),
|
store: store.to_owned(),
|
||||||
});
|
backup_time: backup_dir.backup_time(),
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,11 +71,15 @@ use crate::{
|
|||||||
file_formats::{
|
file_formats::{
|
||||||
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
|
||||||
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
||||||
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
|
||||||
MediaContentHeader,
|
MediaContentHeader,
|
||||||
|
ChunkArchiveHeader,
|
||||||
ChunkArchiveDecoder,
|
ChunkArchiveDecoder,
|
||||||
|
SnapshotArchiveHeader,
|
||||||
},
|
},
|
||||||
drive::{
|
drive::{
|
||||||
TapeDriver,
|
TapeDriver,
|
||||||
@ -362,10 +366,18 @@ fn restore_archive<'a>(
|
|||||||
bail!("unexpected content magic (label)");
|
bail!("unexpected content magic (label)");
|
||||||
}
|
}
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
||||||
let snapshot = reader.read_exact_allocated(header.size as usize)?;
|
bail!("unexpected snapshot archive version (v1.0)");
|
||||||
let snapshot = std::str::from_utf8(&snapshot)
|
}
|
||||||
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
|
||||||
task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
|
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
|
||||||
|
|
||||||
|
let datastore_name = archive_header.store;
|
||||||
|
let snapshot = archive_header.snapshot;
|
||||||
|
|
||||||
|
task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
|
||||||
|
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
@ -393,7 +405,7 @@ fn restore_archive<'a>(
|
|||||||
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
||||||
}
|
}
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
|
||||||
catalog.commit_if_large()?;
|
catalog.commit_if_large()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -403,17 +415,26 @@ fn restore_archive<'a>(
|
|||||||
|
|
||||||
reader.skip_to_end()?; // read all data
|
reader.skip_to_end()?; // read all data
|
||||||
if let Ok(false) = reader.is_incomplete() {
|
if let Ok(false) = reader.is_incomplete() {
|
||||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
|
||||||
catalog.commit_if_large()?;
|
catalog.commit_if_large()?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
||||||
|
bail!("unexpected chunk archive version (v1.0)");
|
||||||
|
}
|
||||||
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
|
||||||
|
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||||
|
|
||||||
task_log!(worker, "Found chunk archive: {}", current_file_number);
|
let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
|
||||||
|
.map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
|
||||||
|
|
||||||
|
let source_datastore = archive_header.store;
|
||||||
|
|
||||||
|
task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
|
||||||
let datastore = target.as_ref().map(|t| t.0);
|
let datastore = target.as_ref().map(|t| t.0);
|
||||||
|
|
||||||
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
||||||
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
|
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number, &source_datastore)?;
|
||||||
for digest in chunks.iter() {
|
for digest in chunks.iter() {
|
||||||
catalog.register_chunk(&digest)?;
|
catalog.register_chunk(&digest)?;
|
||||||
}
|
}
|
||||||
|
@ -144,6 +144,8 @@ pub struct MediaContentEntry {
|
|||||||
pub seq_nr: u64,
|
pub seq_nr: u64,
|
||||||
/// Media Pool
|
/// Media Pool
|
||||||
pub pool: String,
|
pub pool: String,
|
||||||
|
/// Datastore Name
|
||||||
|
pub store: String,
|
||||||
/// Backup snapshot
|
/// Backup snapshot
|
||||||
pub snapshot: String,
|
pub snapshot: String,
|
||||||
/// Snapshot creation time (epoch)
|
/// Snapshot creation time (epoch)
|
||||||
|
@ -14,9 +14,10 @@ use crate::tape::{
|
|||||||
TapeWrite,
|
TapeWrite,
|
||||||
file_formats::{
|
file_formats::{
|
||||||
PROXMOX_TAPE_BLOCK_SIZE,
|
PROXMOX_TAPE_BLOCK_SIZE,
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
|
||||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0,
|
PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0,
|
||||||
MediaContentHeader,
|
MediaContentHeader,
|
||||||
|
ChunkArchiveHeader,
|
||||||
ChunkArchiveEntryHeader,
|
ChunkArchiveEntryHeader,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
@ -36,13 +37,20 @@ pub struct ChunkArchiveWriter<'a> {
|
|||||||
|
|
||||||
impl <'a> ChunkArchiveWriter<'a> {
|
impl <'a> ChunkArchiveWriter<'a> {
|
||||||
|
|
||||||
pub const MAGIC: [u8; 8] = PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0;
|
pub const MAGIC: [u8; 8] = PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1;
|
||||||
|
|
||||||
/// Creates a new instance
|
/// Creates a new instance
|
||||||
pub fn new(mut writer: Box<dyn TapeWrite + 'a>, close_on_leom: bool) -> Result<(Self,Uuid), Error> {
|
pub fn new(
|
||||||
|
mut writer: Box<dyn TapeWrite + 'a>,
|
||||||
|
store: &str,
|
||||||
|
close_on_leom: bool,
|
||||||
|
) -> Result<(Self,Uuid), Error> {
|
||||||
|
|
||||||
let header = MediaContentHeader::new(Self::MAGIC, 0);
|
let archive_header = ChunkArchiveHeader { store: store.to_string() };
|
||||||
writer.write_header(&header, &[])?;
|
let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec();
|
||||||
|
|
||||||
|
let header = MediaContentHeader::new(Self::MAGIC, header_data.len() as u32);
|
||||||
|
writer.write_header(&header, &header_data)?;
|
||||||
|
|
||||||
let me = Self {
|
let me = Self {
|
||||||
writer: Some(writer),
|
writer: Some(writer),
|
||||||
|
@ -44,12 +44,19 @@ pub const PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0: [u8; 8] = [42, 5, 191, 60, 176,
|
|||||||
pub const PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0: [u8; 8] = [8, 96, 99, 249, 47, 151, 83, 216];
|
pub const PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0: [u8; 8] = [8, 96, 99, 249, 47, 151, 83, 216];
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive v1.0")[0..8]
|
||||||
|
// only used in unreleased version - no longer supported
|
||||||
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0: [u8; 8] = [62, 173, 167, 95, 49, 76, 6, 110];
|
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0: [u8; 8] = [62, 173, 167, 95, 49, 76, 6, 110];
|
||||||
|
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive v1.1")[0..8]
|
||||||
|
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1: [u8; 8] = [109, 49, 99, 109, 215, 2, 131, 191];
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive Entry v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup Chunk Archive Entry v1.0")[0..8]
|
||||||
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = [72, 87, 109, 242, 222, 66, 143, 220];
|
pub const PROXMOX_BACKUP_CHUNK_ARCHIVE_ENTRY_MAGIC_1_0: [u8; 8] = [72, 87, 109, 242, 222, 66, 143, 220];
|
||||||
|
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.0")[0..8];
|
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.0")[0..8];
|
||||||
|
// only used in unreleased version - no longer supported
|
||||||
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133];
|
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0: [u8; 8] = [9, 182, 2, 31, 125, 232, 114, 133];
|
||||||
|
// openssl::sha::sha256(b"Proxmox Backup Snapshot Archive v1.1")[0..8];
|
||||||
|
pub const PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1: [u8; 8] = [218, 22, 21, 208, 17, 226, 154, 98];
|
||||||
|
|
||||||
lazy_static::lazy_static!{
|
lazy_static::lazy_static!{
|
||||||
// Map content magic numbers to human readable names.
|
// Map content magic numbers to human readable names.
|
||||||
@ -58,7 +65,9 @@ lazy_static::lazy_static!{
|
|||||||
map.insert(&PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, "Proxmox Backup Tape Label v1.0");
|
map.insert(&PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0, "Proxmox Backup Tape Label v1.0");
|
||||||
map.insert(&PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, "Proxmox Backup MediaSet Label v1.0");
|
map.insert(&PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0, "Proxmox Backup MediaSet Label v1.0");
|
||||||
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, "Proxmox Backup Chunk Archive v1.0");
|
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0, "Proxmox Backup Chunk Archive v1.0");
|
||||||
|
map.insert(&PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1, "Proxmox Backup Chunk Archive v1.1");
|
||||||
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, "Proxmox Backup Snapshot Archive v1.0");
|
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, "Proxmox Backup Snapshot Archive v1.0");
|
||||||
|
map.insert(&PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, "Proxmox Backup Snapshot Archive v1.1");
|
||||||
map
|
map
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@ -172,6 +181,13 @@ impl MediaContentHeader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Header for chunk archives
|
||||||
|
pub struct ChunkArchiveHeader {
|
||||||
|
// Datastore name
|
||||||
|
pub store: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Endian)]
|
#[derive(Endian)]
|
||||||
#[repr(C,packed)]
|
#[repr(C,packed)]
|
||||||
/// Header for data blobs inside a chunk archive
|
/// Header for data blobs inside a chunk archive
|
||||||
@ -184,6 +200,15 @@ pub struct ChunkArchiveEntryHeader {
|
|||||||
pub size: u64,
|
pub size: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize, Serialize)]
|
||||||
|
/// Header for snapshot archives
|
||||||
|
pub struct SnapshotArchiveHeader {
|
||||||
|
/// Snapshot name
|
||||||
|
pub snapshot: String,
|
||||||
|
/// Datastore name
|
||||||
|
pub store: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Serialize,Deserialize,Clone,Debug)]
|
#[derive(Serialize,Deserialize,Clone,Debug)]
|
||||||
/// Media Label
|
/// Media Label
|
||||||
///
|
///
|
||||||
|
@ -12,11 +12,13 @@ use crate::tape::{
|
|||||||
SnapshotReader,
|
SnapshotReader,
|
||||||
file_formats::{
|
file_formats::{
|
||||||
PROXMOX_TAPE_BLOCK_SIZE,
|
PROXMOX_TAPE_BLOCK_SIZE,
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
|
||||||
MediaContentHeader,
|
MediaContentHeader,
|
||||||
|
SnapshotArchiveHeader,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/// Write a set of files as `pxar` archive to the tape
|
/// Write a set of files as `pxar` archive to the tape
|
||||||
///
|
///
|
||||||
/// This ignores file attributes like ACLs and xattrs.
|
/// This ignores file attributes like ACLs and xattrs.
|
||||||
@ -31,12 +33,15 @@ pub fn tape_write_snapshot_archive<'a>(
|
|||||||
) -> Result<Option<Uuid>, std::io::Error> {
|
) -> Result<Option<Uuid>, std::io::Error> {
|
||||||
|
|
||||||
let snapshot = snapshot_reader.snapshot().to_string();
|
let snapshot = snapshot_reader.snapshot().to_string();
|
||||||
|
let store = snapshot_reader.datastore_name().to_string();
|
||||||
let file_list = snapshot_reader.file_list();
|
let file_list = snapshot_reader.file_list();
|
||||||
|
|
||||||
let header_data = snapshot.as_bytes().to_vec();
|
let archive_header = SnapshotArchiveHeader { snapshot, store };
|
||||||
|
|
||||||
|
let header_data = serde_json::to_string_pretty(&archive_header)?.as_bytes().to_vec();
|
||||||
|
|
||||||
let header = MediaContentHeader::new(
|
let header = MediaContentHeader::new(
|
||||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0, header_data.len() as u32);
|
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1, header_data.len() as u32);
|
||||||
let content_uuid = header.uuid.into();
|
let content_uuid = header.uuid.into();
|
||||||
|
|
||||||
let root_metadata = pxar::Metadata::dir_builder(0o0664).build();
|
let root_metadata = pxar::Metadata::dir_builder(0o0664).build();
|
||||||
|
@ -29,6 +29,20 @@ use crate::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub struct DatastoreContent {
|
||||||
|
pub snapshot_index: HashMap<String, u64>, // snapshot => file_nr
|
||||||
|
pub chunk_index: HashMap<[u8;32], u64>, // chunk => file_nr
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatastoreContent {
|
||||||
|
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
chunk_index: HashMap::new(),
|
||||||
|
snapshot_index: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The Media Catalog
|
/// The Media Catalog
|
||||||
///
|
///
|
||||||
@ -44,13 +58,11 @@ pub struct MediaCatalog {
|
|||||||
|
|
||||||
log_to_stdout: bool,
|
log_to_stdout: bool,
|
||||||
|
|
||||||
current_archive: Option<(Uuid, u64)>,
|
current_archive: Option<(Uuid, u64, String)>, // (uuid, file_nr, store)
|
||||||
|
|
||||||
last_entry: Option<(Uuid, u64)>,
|
last_entry: Option<(Uuid, u64)>,
|
||||||
|
|
||||||
chunk_index: HashMap<[u8;32], u64>,
|
content: HashMap<String, DatastoreContent>,
|
||||||
|
|
||||||
snapshot_index: HashMap<String, u64>,
|
|
||||||
|
|
||||||
pending: Vec<u8>,
|
pending: Vec<u8>,
|
||||||
}
|
}
|
||||||
@ -59,8 +71,12 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
/// Magic number for media catalog files.
|
/// Magic number for media catalog files.
|
||||||
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8]
|
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8]
|
||||||
|
// Note: this version did not store datastore names (not supported anymore)
|
||||||
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40];
|
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40];
|
||||||
|
|
||||||
|
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.1")[0..8]
|
||||||
|
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] = [76, 142, 232, 193, 32, 168, 137, 113];
|
||||||
|
|
||||||
/// List media with catalogs
|
/// List media with catalogs
|
||||||
pub fn media_with_catalogs(base_path: &Path) -> Result<HashSet<Uuid>, Error> {
|
pub fn media_with_catalogs(base_path: &Path) -> Result<HashSet<Uuid>, Error> {
|
||||||
let mut catalogs = HashSet::new();
|
let mut catalogs = HashSet::new();
|
||||||
@ -149,15 +165,14 @@ impl MediaCatalog {
|
|||||||
log_to_stdout: false,
|
log_to_stdout: false,
|
||||||
current_archive: None,
|
current_archive: None,
|
||||||
last_entry: None,
|
last_entry: None,
|
||||||
chunk_index: HashMap::new(),
|
content: HashMap::new(),
|
||||||
snapshot_index: HashMap::new(),
|
|
||||||
pending: Vec::new(),
|
pending: Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let found_magic_number = me.load_catalog(&mut file)?;
|
let found_magic_number = me.load_catalog(&mut file)?;
|
||||||
|
|
||||||
if !found_magic_number {
|
if !found_magic_number {
|
||||||
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0);
|
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if write {
|
if write {
|
||||||
@ -207,14 +222,13 @@ impl MediaCatalog {
|
|||||||
log_to_stdout: false,
|
log_to_stdout: false,
|
||||||
current_archive: None,
|
current_archive: None,
|
||||||
last_entry: None,
|
last_entry: None,
|
||||||
chunk_index: HashMap::new(),
|
content: HashMap::new(),
|
||||||
snapshot_index: HashMap::new(),
|
|
||||||
pending: Vec::new(),
|
pending: Vec::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
me.log_to_stdout = log_to_stdout;
|
me.log_to_stdout = log_to_stdout;
|
||||||
|
|
||||||
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0);
|
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
|
||||||
|
|
||||||
me.register_label(&media_id.label.uuid, 0)?;
|
me.register_label(&media_id.label.uuid, 0)?;
|
||||||
|
|
||||||
@ -265,8 +279,8 @@ impl MediaCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Accessor to content list
|
/// Accessor to content list
|
||||||
pub fn snapshot_index(&self) -> &HashMap<String, u64> {
|
pub fn content(&self) -> &HashMap<String, DatastoreContent> {
|
||||||
&self.snapshot_index
|
&self.content
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Commit pending changes
|
/// Commit pending changes
|
||||||
@ -319,23 +333,35 @@ impl MediaCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a snapshot
|
/// Test if the catalog already contain a snapshot
|
||||||
pub fn contains_snapshot(&self, snapshot: &str) -> bool {
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
self.snapshot_index.contains_key(snapshot)
|
match self.content.get(store) {
|
||||||
|
None => false,
|
||||||
|
Some(content) => content.snapshot_index.contains_key(snapshot),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the chunk archive file number
|
/// Returns the snapshot archive file number
|
||||||
pub fn lookup_snapshot(&self, snapshot: &str) -> Option<u64> {
|
pub fn lookup_snapshot(&self, store: &str, snapshot: &str) -> Option<u64> {
|
||||||
self.snapshot_index.get(snapshot).copied()
|
match self.content.get(store) {
|
||||||
|
None => None,
|
||||||
|
Some(content) => content.snapshot_index.get(snapshot).copied(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a chunk
|
/// Test if the catalog already contain a chunk
|
||||||
pub fn contains_chunk(&self, digest: &[u8;32]) -> bool {
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
||||||
self.chunk_index.contains_key(digest)
|
match self.content.get(store) {
|
||||||
|
None => false,
|
||||||
|
Some(content) => content.chunk_index.contains_key(digest),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the chunk archive file number
|
/// Returns the chunk archive file number
|
||||||
pub fn lookup_chunk(&self, digest: &[u8;32]) -> Option<u64> {
|
pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option<u64> {
|
||||||
self.chunk_index.get(digest).copied()
|
match self.content.get(store) {
|
||||||
|
None => None,
|
||||||
|
Some(content) => content.chunk_index.get(digest).copied(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_register_label(&self, file_number: u64) -> Result<(), Error> {
|
fn check_register_label(&self, file_number: u64) -> Result<(), Error> {
|
||||||
@ -395,9 +421,9 @@ impl MediaCatalog {
|
|||||||
digest: &[u8;32],
|
digest: &[u8;32],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
let file_number = match self.current_archive {
|
let (file_number, store) = match self.current_archive {
|
||||||
None => bail!("register_chunk failed: no archive started"),
|
None => bail!("register_chunk failed: no archive started"),
|
||||||
Some((_, file_number)) => file_number,
|
Some((_, file_number, ref store)) => (file_number, store),
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
@ -407,7 +433,12 @@ impl MediaCatalog {
|
|||||||
self.pending.push(b'C');
|
self.pending.push(b'C');
|
||||||
self.pending.extend(digest);
|
self.pending.extend(digest);
|
||||||
|
|
||||||
self.chunk_index.insert(*digest, file_number);
|
match self.content.get_mut(store) {
|
||||||
|
None => bail!("storage {} not registered - internal error", store),
|
||||||
|
Some(content) => {
|
||||||
|
content.chunk_index.insert(*digest, file_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -440,24 +471,29 @@ impl MediaCatalog {
|
|||||||
&mut self,
|
&mut self,
|
||||||
uuid: Uuid, // Uuid form MediaContentHeader
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
) -> Result<(), Error> {
|
store: &str,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
self.check_start_chunk_archive(file_number)?;
|
self.check_start_chunk_archive(file_number)?;
|
||||||
|
|
||||||
let entry = ChunkArchiveStart {
|
let entry = ChunkArchiveStart {
|
||||||
file_number,
|
file_number,
|
||||||
uuid: *uuid.as_bytes(),
|
uuid: *uuid.as_bytes(),
|
||||||
|
store_name_len: u8::try_from(store.len())?,
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("A|{}|{}", file_number, uuid.to_string());
|
println!("A|{}|{}|{}", file_number, uuid.to_string(), store);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'A');
|
self.pending.push(b'A');
|
||||||
|
|
||||||
unsafe { self.pending.write_le_value(entry)?; }
|
unsafe { self.pending.write_le_value(entry)?; }
|
||||||
|
self.pending.extend(store.as_bytes());
|
||||||
|
|
||||||
self.current_archive = Some((uuid, file_number));
|
self.content.entry(store.to_string()).or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -466,7 +502,7 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
match self.current_archive {
|
match self.current_archive {
|
||||||
None => bail!("end_chunk archive failed: not started"),
|
None => bail!("end_chunk archive failed: not started"),
|
||||||
Some((ref expected_uuid, expected_file_number)) => {
|
Some((ref expected_uuid, expected_file_number, ..)) => {
|
||||||
if uuid != expected_uuid {
|
if uuid != expected_uuid {
|
||||||
bail!("end_chunk_archive failed: got unexpected uuid");
|
bail!("end_chunk_archive failed: got unexpected uuid");
|
||||||
}
|
}
|
||||||
@ -476,7 +512,6 @@ impl MediaCatalog {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -485,7 +520,7 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
match self.current_archive.take() {
|
match self.current_archive.take() {
|
||||||
None => bail!("end_chunk_archive failed: not started"),
|
None => bail!("end_chunk_archive failed: not started"),
|
||||||
Some((uuid, file_number)) => {
|
Some((uuid, file_number, ..)) => {
|
||||||
|
|
||||||
let entry = ChunkArchiveEnd {
|
let entry = ChunkArchiveEnd {
|
||||||
file_number,
|
file_number,
|
||||||
@ -539,6 +574,7 @@ impl MediaCatalog {
|
|||||||
&mut self,
|
&mut self,
|
||||||
uuid: Uuid, // Uuid form MediaContentHeader
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
|
store: &str,
|
||||||
snapshot: &str,
|
snapshot: &str,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
|
||||||
@ -547,19 +583,25 @@ impl MediaCatalog {
|
|||||||
let entry = SnapshotEntry {
|
let entry = SnapshotEntry {
|
||||||
file_number,
|
file_number,
|
||||||
uuid: *uuid.as_bytes(),
|
uuid: *uuid.as_bytes(),
|
||||||
|
store_name_len: u8::try_from(store.len())?,
|
||||||
name_len: u16::try_from(snapshot.len())?,
|
name_len: u16::try_from(snapshot.len())?,
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.log_to_stdout {
|
if self.log_to_stdout {
|
||||||
println!("S|{}|{}|{}", file_number, uuid.to_string(), snapshot);
|
println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, snapshot);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.pending.push(b'S');
|
self.pending.push(b'S');
|
||||||
|
|
||||||
unsafe { self.pending.write_le_value(entry)?; }
|
unsafe { self.pending.write_le_value(entry)?; }
|
||||||
|
self.pending.extend(store.as_bytes());
|
||||||
|
self.pending.push(b':');
|
||||||
self.pending.extend(snapshot.as_bytes());
|
self.pending.extend(snapshot.as_bytes());
|
||||||
|
|
||||||
self.snapshot_index.insert(snapshot.to_string(), file_number);
|
let content = self.content.entry(store.to_string())
|
||||||
|
.or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
content.snapshot_index.insert(snapshot.to_string(), file_number);
|
||||||
|
|
||||||
self.last_entry = Some((uuid, file_number));
|
self.last_entry = Some((uuid, file_number));
|
||||||
|
|
||||||
@ -581,7 +623,11 @@ impl MediaCatalog {
|
|||||||
Ok(true) => { /* OK */ }
|
Ok(true) => { /* OK */ }
|
||||||
Err(err) => bail!("read failed - {}", err),
|
Err(err) => bail!("read failed - {}", err),
|
||||||
}
|
}
|
||||||
if magic != Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0 {
|
if magic == Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0 {
|
||||||
|
// only use in unreleased versions
|
||||||
|
bail!("old catalog format (v1.0) is no longer supported");
|
||||||
|
}
|
||||||
|
if magic != Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1 {
|
||||||
bail!("wrong magic number");
|
bail!("wrong magic number");
|
||||||
}
|
}
|
||||||
found_magic_number = true;
|
found_magic_number = true;
|
||||||
@ -597,23 +643,35 @@ impl MediaCatalog {
|
|||||||
|
|
||||||
match entry_type[0] {
|
match entry_type[0] {
|
||||||
b'C' => {
|
b'C' => {
|
||||||
let file_number = match self.current_archive {
|
let (file_number, store) = match self.current_archive {
|
||||||
None => bail!("register_chunk failed: no archive started"),
|
None => bail!("register_chunk failed: no archive started"),
|
||||||
Some((_, file_number)) => file_number,
|
Some((_, file_number, ref store)) => (file_number, store),
|
||||||
};
|
};
|
||||||
let mut digest = [0u8; 32];
|
let mut digest = [0u8; 32];
|
||||||
file.read_exact(&mut digest)?;
|
file.read_exact(&mut digest)?;
|
||||||
self.chunk_index.insert(digest, file_number);
|
match self.content.get_mut(store) {
|
||||||
|
None => bail!("storage {} not registered - internal error", store),
|
||||||
|
Some(content) => {
|
||||||
|
content.chunk_index.insert(digest, file_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
b'A' => {
|
b'A' => {
|
||||||
let entry: ChunkArchiveStart = unsafe { file.read_le_value()? };
|
let entry: ChunkArchiveStart = unsafe { file.read_le_value()? };
|
||||||
let file_number = entry.file_number;
|
let file_number = entry.file_number;
|
||||||
let uuid = Uuid::from(entry.uuid);
|
let uuid = Uuid::from(entry.uuid);
|
||||||
|
let store_name_len = entry.store_name_len as usize;
|
||||||
|
|
||||||
|
let store = file.read_exact_allocated(store_name_len)?;
|
||||||
|
let store = std::str::from_utf8(&store)?;
|
||||||
|
|
||||||
self.check_start_chunk_archive(file_number)?;
|
self.check_start_chunk_archive(file_number)?;
|
||||||
|
|
||||||
self.current_archive = Some((uuid, file_number));
|
self.content.entry(store.to_string())
|
||||||
}
|
.or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
||||||
|
}
|
||||||
b'E' => {
|
b'E' => {
|
||||||
let entry: ChunkArchiveEnd = unsafe { file.read_le_value()? };
|
let entry: ChunkArchiveEnd = unsafe { file.read_le_value()? };
|
||||||
let file_number = entry.file_number;
|
let file_number = entry.file_number;
|
||||||
@ -627,15 +685,22 @@ impl MediaCatalog {
|
|||||||
b'S' => {
|
b'S' => {
|
||||||
let entry: SnapshotEntry = unsafe { file.read_le_value()? };
|
let entry: SnapshotEntry = unsafe { file.read_le_value()? };
|
||||||
let file_number = entry.file_number;
|
let file_number = entry.file_number;
|
||||||
|
let store_name_len = entry.store_name_len;
|
||||||
let name_len = entry.name_len;
|
let name_len = entry.name_len;
|
||||||
let uuid = Uuid::from(entry.uuid);
|
let uuid = Uuid::from(entry.uuid);
|
||||||
|
|
||||||
|
let store = file.read_exact_allocated(store_name_len as usize + 1)?;
|
||||||
|
let store = std::str::from_utf8(&store[..store_name_len as usize])?;
|
||||||
|
|
||||||
let snapshot = file.read_exact_allocated(name_len.into())?;
|
let snapshot = file.read_exact_allocated(name_len.into())?;
|
||||||
let snapshot = std::str::from_utf8(&snapshot)?;
|
let snapshot = std::str::from_utf8(&snapshot)?;
|
||||||
|
|
||||||
self.check_register_snapshot(file_number, snapshot)?;
|
self.check_register_snapshot(file_number, snapshot)?;
|
||||||
|
|
||||||
self.snapshot_index.insert(snapshot.to_string(), file_number);
|
let content = self.content.entry(store.to_string())
|
||||||
|
.or_insert(DatastoreContent::new());
|
||||||
|
|
||||||
|
content.snapshot_index.insert(snapshot.to_string(), file_number);
|
||||||
|
|
||||||
self.last_entry = Some((uuid, file_number));
|
self.last_entry = Some((uuid, file_number));
|
||||||
}
|
}
|
||||||
@ -693,9 +758,9 @@ impl MediaSetCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a snapshot
|
/// Test if the catalog already contain a snapshot
|
||||||
pub fn contains_snapshot(&self, snapshot: &str) -> bool {
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
for catalog in self.catalog_list.values() {
|
for catalog in self.catalog_list.values() {
|
||||||
if catalog.contains_snapshot(snapshot) {
|
if catalog.contains_snapshot(store, snapshot) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -703,9 +768,9 @@ impl MediaSetCatalog {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contain a chunk
|
/// Test if the catalog already contain a chunk
|
||||||
pub fn contains_chunk(&self, digest: &[u8;32]) -> bool {
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
||||||
for catalog in self.catalog_list.values() {
|
for catalog in self.catalog_list.values() {
|
||||||
if catalog.contains_chunk(digest) {
|
if catalog.contains_chunk(store, digest) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -727,6 +792,8 @@ struct LabelEntry {
|
|||||||
struct ChunkArchiveStart {
|
struct ChunkArchiveStart {
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
uuid: [u8;16],
|
uuid: [u8;16],
|
||||||
|
store_name_len: u8,
|
||||||
|
/* datastore name follows */
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Endian)]
|
#[derive(Endian)]
|
||||||
@ -741,6 +808,7 @@ struct ChunkArchiveEnd{
|
|||||||
struct SnapshotEntry{
|
struct SnapshotEntry{
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
uuid: [u8;16],
|
uuid: [u8;16],
|
||||||
|
store_name_len: u8,
|
||||||
name_len: u16,
|
name_len: u16,
|
||||||
/* snapshot name follows */
|
/* datastore name, ':', snapshot name follows */
|
||||||
}
|
}
|
||||||
|
@ -50,23 +50,23 @@ pub struct CatalogBuilder {
|
|||||||
impl CatalogBuilder {
|
impl CatalogBuilder {
|
||||||
|
|
||||||
/// Test if the catalog already contains a snapshot
|
/// Test if the catalog already contains a snapshot
|
||||||
pub fn contains_snapshot(&self, snapshot: &str) -> bool {
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
if let Some(ref catalog) = self.catalog {
|
if let Some(ref catalog) = self.catalog {
|
||||||
if catalog.contains_snapshot(snapshot) {
|
if catalog.contains_snapshot(store, snapshot) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.media_set_catalog.contains_snapshot(snapshot)
|
self.media_set_catalog.contains_snapshot(store, snapshot)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Test if the catalog already contains a chunk
|
/// Test if the catalog already contains a chunk
|
||||||
pub fn contains_chunk(&self, digest: &[u8;32]) -> bool {
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
||||||
if let Some(ref catalog) = self.catalog {
|
if let Some(ref catalog) = self.catalog {
|
||||||
if catalog.contains_chunk(digest) {
|
if catalog.contains_chunk(store, digest) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.media_set_catalog.contains_chunk(digest)
|
self.media_set_catalog.contains_chunk(store, digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Add a new catalog, move the old on to the read-only set
|
/// Add a new catalog, move the old on to the read-only set
|
||||||
@ -90,11 +90,12 @@ impl CatalogBuilder {
|
|||||||
&mut self,
|
&mut self,
|
||||||
uuid: Uuid, // Uuid form MediaContentHeader
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
|
store: &str,
|
||||||
snapshot: &str,
|
snapshot: &str,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
match self.catalog {
|
match self.catalog {
|
||||||
Some(ref mut catalog) => {
|
Some(ref mut catalog) => {
|
||||||
catalog.register_snapshot(uuid, file_number, snapshot)?;
|
catalog.register_snapshot(uuid, file_number, store, snapshot)?;
|
||||||
}
|
}
|
||||||
None => bail!("no catalog loaded - internal error"),
|
None => bail!("no catalog loaded - internal error"),
|
||||||
}
|
}
|
||||||
@ -106,11 +107,12 @@ impl CatalogBuilder {
|
|||||||
&mut self,
|
&mut self,
|
||||||
uuid: Uuid, // Uuid form MediaContentHeader
|
uuid: Uuid, // Uuid form MediaContentHeader
|
||||||
file_number: u64,
|
file_number: u64,
|
||||||
|
store: &str,
|
||||||
chunk_list: &[[u8; 32]],
|
chunk_list: &[[u8; 32]],
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
match self.catalog {
|
match self.catalog {
|
||||||
Some(ref mut catalog) => {
|
Some(ref mut catalog) => {
|
||||||
catalog.start_chunk_archive(uuid, file_number)?;
|
catalog.start_chunk_archive(uuid, file_number, store)?;
|
||||||
for digest in chunk_list {
|
for digest in chunk_list {
|
||||||
catalog.register_chunk(digest)?;
|
catalog.register_chunk(digest)?;
|
||||||
}
|
}
|
||||||
@ -157,6 +159,8 @@ impl NewChunksIterator {
|
|||||||
|
|
||||||
let mut chunk_index: HashSet<[u8;32]> = HashSet::new();
|
let mut chunk_index: HashSet<[u8;32]> = HashSet::new();
|
||||||
|
|
||||||
|
let datastore_name = snapshot_reader.datastore_name();
|
||||||
|
|
||||||
let result: Result<(), Error> = proxmox::try_block!({
|
let result: Result<(), Error> = proxmox::try_block!({
|
||||||
|
|
||||||
let mut chunk_iter = snapshot_reader.chunk_iterator()?;
|
let mut chunk_iter = snapshot_reader.chunk_iterator()?;
|
||||||
@ -174,7 +178,7 @@ impl NewChunksIterator {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if catalog_builder.lock().unwrap().contains_chunk(&digest) {
|
if catalog_builder.lock().unwrap().contains_chunk(&datastore_name, &digest) {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -279,8 +283,8 @@ impl PoolWriter {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn contains_snapshot(&self, snapshot: &str) -> bool {
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
||||||
self.catalog_builder.lock().unwrap().contains_snapshot(snapshot)
|
self.catalog_builder.lock().unwrap().contains_snapshot(store, snapshot)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Eject media and drop PoolWriterState (close drive)
|
/// Eject media and drop PoolWriterState (close drive)
|
||||||
@ -462,6 +466,7 @@ impl PoolWriter {
|
|||||||
self.catalog_builder.lock().unwrap().register_snapshot(
|
self.catalog_builder.lock().unwrap().register_snapshot(
|
||||||
content_uuid,
|
content_uuid,
|
||||||
current_file_number,
|
current_file_number,
|
||||||
|
&snapshot_reader.datastore_name().to_string(),
|
||||||
&snapshot_reader.snapshot().to_string(),
|
&snapshot_reader.snapshot().to_string(),
|
||||||
)?;
|
)?;
|
||||||
(true, writer.bytes_written())
|
(true, writer.bytes_written())
|
||||||
@ -489,6 +494,7 @@ impl PoolWriter {
|
|||||||
&mut self,
|
&mut self,
|
||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
|
chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
|
||||||
|
store: &str,
|
||||||
) -> Result<(bool, usize), Error> {
|
) -> Result<(bool, usize), Error> {
|
||||||
|
|
||||||
let status = match self.status {
|
let status = match self.status {
|
||||||
@ -514,6 +520,7 @@ impl PoolWriter {
|
|||||||
worker,
|
worker,
|
||||||
writer,
|
writer,
|
||||||
chunk_iter,
|
chunk_iter,
|
||||||
|
store,
|
||||||
MAX_CHUNK_ARCHIVE_SIZE,
|
MAX_CHUNK_ARCHIVE_SIZE,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
@ -531,7 +538,7 @@ impl PoolWriter {
|
|||||||
|
|
||||||
// register chunks in media_catalog
|
// register chunks in media_catalog
|
||||||
self.catalog_builder.lock().unwrap()
|
self.catalog_builder.lock().unwrap()
|
||||||
.register_chunk_archive(content_uuid, current_file_number, &saved_chunks)?;
|
.register_chunk_archive(content_uuid, current_file_number, store, &saved_chunks)?;
|
||||||
|
|
||||||
if leom || request_sync {
|
if leom || request_sync {
|
||||||
self.commit()?;
|
self.commit()?;
|
||||||
@ -558,10 +565,11 @@ fn write_chunk_archive<'a>(
|
|||||||
_worker: &WorkerTask,
|
_worker: &WorkerTask,
|
||||||
writer: Box<dyn 'a + TapeWrite>,
|
writer: Box<dyn 'a + TapeWrite>,
|
||||||
chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
|
chunk_iter: &mut std::iter::Peekable<NewChunksIterator>,
|
||||||
|
store: &str,
|
||||||
max_size: usize,
|
max_size: usize,
|
||||||
) -> Result<(Vec<[u8;32]>, Uuid, bool, usize), Error> {
|
) -> Result<(Vec<[u8;32]>, Uuid, bool, usize), Error> {
|
||||||
|
|
||||||
let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, true)?;
|
let (mut writer, content_uuid) = ChunkArchiveWriter::new(writer, store, true)?;
|
||||||
|
|
||||||
// we want to get the chunk list in correct order
|
// we want to get the chunk list in correct order
|
||||||
let mut chunk_list: Vec<[u8;32]> = Vec::new();
|
let mut chunk_list: Vec<[u8;32]> = Vec::new();
|
||||||
|
Loading…
Reference in New Issue
Block a user