tape: store datastore name in tape archives and media catalog
So that we can store multiple datastores on a single media set. Deduplication is now per datastore (not per media set).
This commit is contained in:
@ -402,6 +402,8 @@ fn backup_worker(
|
||||
task_log!(worker, "latest-only: true (only considering latest snapshots)");
|
||||
}
|
||||
|
||||
let datastore_name = datastore.name();
|
||||
|
||||
let mut errors = false;
|
||||
|
||||
for (group_number, group) in group_list.into_iter().enumerate() {
|
||||
@ -416,7 +418,7 @@ fn backup_worker(
|
||||
if latest_only {
|
||||
progress.group_snapshots = 1;
|
||||
if let Some(info) = snapshot_list.pop() {
|
||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||
continue;
|
||||
}
|
||||
@ -433,7 +435,7 @@ fn backup_worker(
|
||||
} else {
|
||||
progress.group_snapshots = snapshot_list.len() as u64;
|
||||
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
|
||||
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
|
||||
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) {
|
||||
task_log!(worker, "skip snapshot {}", info.backup_dir);
|
||||
continue;
|
||||
}
|
||||
@ -531,7 +533,7 @@ pub fn backup_snapshot(
|
||||
|
||||
worker.check_abort()?;
|
||||
|
||||
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter)?;
|
||||
let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &mut chunk_iter, datastore.name())?;
|
||||
|
||||
if leom {
|
||||
pool_writer.set_media_status_full(&uuid)?;
|
||||
|
@ -434,27 +434,30 @@ pub fn list_content(
|
||||
|
||||
let catalog = MediaCatalog::open(status_path, &media_id.label.uuid, false, false)?;
|
||||
|
||||
for snapshot in catalog.snapshot_index().keys() {
|
||||
let backup_dir: BackupDir = snapshot.parse()?;
|
||||
for (store, content) in catalog.content() {
|
||||
for snapshot in content.snapshot_index.keys() {
|
||||
let backup_dir: BackupDir = snapshot.parse()?;
|
||||
|
||||
if let Some(ref backup_type) = filter.backup_type {
|
||||
if backup_dir.group().backup_type() != backup_type { continue; }
|
||||
}
|
||||
if let Some(ref backup_id) = filter.backup_id {
|
||||
if backup_dir.group().backup_id() != backup_id { continue; }
|
||||
}
|
||||
if let Some(ref backup_type) = filter.backup_type {
|
||||
if backup_dir.group().backup_type() != backup_type { continue; }
|
||||
}
|
||||
if let Some(ref backup_id) = filter.backup_id {
|
||||
if backup_dir.group().backup_id() != backup_id { continue; }
|
||||
}
|
||||
|
||||
list.push(MediaContentEntry {
|
||||
uuid: media_id.label.uuid.clone(),
|
||||
label_text: media_id.label.label_text.to_string(),
|
||||
pool: set.pool.clone(),
|
||||
media_set_name: media_set_name.clone(),
|
||||
media_set_uuid: set.uuid.clone(),
|
||||
media_set_ctime: set.ctime,
|
||||
seq_nr: set.seq_nr,
|
||||
snapshot: snapshot.to_owned(),
|
||||
backup_time: backup_dir.backup_time(),
|
||||
});
|
||||
list.push(MediaContentEntry {
|
||||
uuid: media_id.label.uuid.clone(),
|
||||
label_text: media_id.label.label_text.to_string(),
|
||||
pool: set.pool.clone(),
|
||||
media_set_name: media_set_name.clone(),
|
||||
media_set_uuid: set.uuid.clone(),
|
||||
media_set_ctime: set.ctime,
|
||||
seq_nr: set.seq_nr,
|
||||
snapshot: snapshot.to_owned(),
|
||||
store: store.to_owned(),
|
||||
backup_time: backup_dir.backup_time(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,11 +71,15 @@ use crate::{
|
||||
file_formats::{
|
||||
PROXMOX_BACKUP_MEDIA_LABEL_MAGIC_1_0,
|
||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0,
|
||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1,
|
||||
PROXMOX_BACKUP_MEDIA_SET_LABEL_MAGIC_1_0,
|
||||
PROXMOX_BACKUP_CONTENT_HEADER_MAGIC_1_0,
|
||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0,
|
||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1,
|
||||
MediaContentHeader,
|
||||
ChunkArchiveHeader,
|
||||
ChunkArchiveDecoder,
|
||||
SnapshotArchiveHeader,
|
||||
},
|
||||
drive::{
|
||||
TapeDriver,
|
||||
@ -362,10 +366,18 @@ fn restore_archive<'a>(
|
||||
bail!("unexpected content magic (label)");
|
||||
}
|
||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_0 => {
|
||||
let snapshot = reader.read_exact_allocated(header.size as usize)?;
|
||||
let snapshot = std::str::from_utf8(&snapshot)
|
||||
.map_err(|_| format_err!("found snapshot archive with non-utf8 characters in name"))?;
|
||||
task_log!(worker, "Found snapshot archive: {} {}", current_file_number, snapshot);
|
||||
bail!("unexpected snapshot archive version (v1.0)");
|
||||
}
|
||||
PROXMOX_BACKUP_SNAPSHOT_ARCHIVE_MAGIC_1_1 => {
|
||||
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||
|
||||
let archive_header: SnapshotArchiveHeader = serde_json::from_slice(&header_data)
|
||||
.map_err(|err| format_err!("unable to parse snapshot archive header - {}", err))?;
|
||||
|
||||
let datastore_name = archive_header.store;
|
||||
let snapshot = archive_header.snapshot;
|
||||
|
||||
task_log!(worker, "File {}: snapshot archive {}:{}", current_file_number, datastore_name, snapshot);
|
||||
|
||||
let backup_dir: BackupDir = snapshot.parse()?;
|
||||
|
||||
@ -393,7 +405,7 @@ fn restore_archive<'a>(
|
||||
task_log!(worker, "skip incomplete snapshot {}", backup_dir);
|
||||
}
|
||||
Ok(true) => {
|
||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
|
||||
catalog.commit_if_large()?;
|
||||
}
|
||||
}
|
||||
@ -403,17 +415,26 @@ fn restore_archive<'a>(
|
||||
|
||||
reader.skip_to_end()?; // read all data
|
||||
if let Ok(false) = reader.is_incomplete() {
|
||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, snapshot)?;
|
||||
catalog.register_snapshot(Uuid::from(header.uuid), current_file_number, &datastore_name, &snapshot)?;
|
||||
catalog.commit_if_large()?;
|
||||
}
|
||||
}
|
||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_0 => {
|
||||
bail!("unexpected chunk archive version (v1.0)");
|
||||
}
|
||||
PROXMOX_BACKUP_CHUNK_ARCHIVE_MAGIC_1_1 => {
|
||||
let header_data = reader.read_exact_allocated(header.size as usize)?;
|
||||
|
||||
task_log!(worker, "Found chunk archive: {}", current_file_number);
|
||||
let archive_header: ChunkArchiveHeader = serde_json::from_slice(&header_data)
|
||||
.map_err(|err| format_err!("unable to parse chunk archive header - {}", err))?;
|
||||
|
||||
let source_datastore = archive_header.store;
|
||||
|
||||
task_log!(worker, "File {}: chunk archive for datastore '{}'", current_file_number, source_datastore);
|
||||
let datastore = target.as_ref().map(|t| t.0);
|
||||
|
||||
if let Some(chunks) = restore_chunk_archive(worker, reader, datastore, verbose)? {
|
||||
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number)?;
|
||||
catalog.start_chunk_archive(Uuid::from(header.uuid), current_file_number, &source_datastore)?;
|
||||
for digest in chunks.iter() {
|
||||
catalog.register_chunk(&digest)?;
|
||||
}
|
||||
|
Reference in New Issue
Block a user