2020-12-15 12:13:44 +00:00
|
|
|
use std::convert::TryFrom;
|
|
|
|
use std::fs::File;
|
2020-12-16 07:59:27 +00:00
|
|
|
use std::io::{Write, Read, BufReader, Seek, SeekFrom};
|
|
|
|
use std::os::unix::io::AsRawFd;
|
2021-07-28 08:10:02 +00:00
|
|
|
use std::path::{PathBuf, Path};
|
2021-01-09 09:24:48 +00:00
|
|
|
use std::collections::{HashSet, HashMap};
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
use anyhow::{bail, format_err, Error};
|
|
|
|
use endian_trait::Endian;
|
|
|
|
|
2021-07-06 11:26:35 +00:00
|
|
|
use pbs_tools::fs::read_subdir;
|
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
use proxmox::tools::{
|
|
|
|
Uuid,
|
|
|
|
fs::{
|
|
|
|
fchown,
|
|
|
|
create_path,
|
|
|
|
CreateOptions,
|
|
|
|
},
|
|
|
|
io::{
|
|
|
|
WriteExt,
|
|
|
|
ReadExt,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
use crate::{
|
|
|
|
backup::BackupDir,
|
2020-12-16 12:27:53 +00:00
|
|
|
tape::{
|
|
|
|
MediaId,
|
2021-03-18 07:43:55 +00:00
|
|
|
file_formats::MediaSetLabel,
|
2020-12-16 12:27:53 +00:00
|
|
|
},
|
2020-12-15 12:13:44 +00:00
|
|
|
};
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
pub struct DatastoreContent {
|
|
|
|
pub snapshot_index: HashMap<String, u64>, // snapshot => file_nr
|
|
|
|
pub chunk_index: HashMap<[u8;32], u64>, // chunk => file_nr
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DatastoreContent {
|
|
|
|
|
|
|
|
pub fn new() -> Self {
|
|
|
|
Self {
|
|
|
|
chunk_index: HashMap::new(),
|
|
|
|
snapshot_index: HashMap::new(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-12-16 07:59:27 +00:00
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
/// The Media Catalog
|
|
|
|
///
|
|
|
|
/// Stores what chunks and snapshots are stored on a specific media,
|
|
|
|
/// including the file position.
|
|
|
|
///
|
|
|
|
/// We use a simple binary format to store data on disk.
|
|
|
|
pub struct MediaCatalog {
|
|
|
|
|
|
|
|
uuid: Uuid, // BackupMedia uuid
|
|
|
|
|
|
|
|
file: Option<File>,
|
|
|
|
|
2020-12-17 07:04:56 +00:00
|
|
|
log_to_stdout: bool,
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
current_archive: Option<(Uuid, u64, String)>, // (uuid, file_nr, store)
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
last_entry: Option<(Uuid, u64)>,
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
content: HashMap<String, DatastoreContent>,
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
pending: Vec<u8>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl MediaCatalog {
|
|
|
|
|
2021-01-21 16:48:07 +00:00
|
|
|
/// Magic number for media catalog files.
|
|
|
|
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.0")[0..8]
|
2021-03-16 11:52:49 +00:00
|
|
|
// Note: this version did not store datastore names (not supported anymore)
|
2021-01-21 16:48:07 +00:00
|
|
|
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0: [u8; 8] = [221, 29, 164, 1, 59, 69, 19, 40];
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
// openssl::sha::sha256(b"Proxmox Backup Media Catalog v1.1")[0..8]
|
|
|
|
pub const PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1: [u8; 8] = [76, 142, 232, 193, 32, 168, 137, 113];
|
|
|
|
|
2021-01-09 09:24:48 +00:00
|
|
|
/// List media with catalogs
|
|
|
|
pub fn media_with_catalogs(base_path: &Path) -> Result<HashSet<Uuid>, Error> {
|
|
|
|
let mut catalogs = HashSet::new();
|
|
|
|
|
|
|
|
for entry in read_subdir(libc::AT_FDCWD, base_path)? {
|
|
|
|
let entry = entry?;
|
|
|
|
let name = unsafe { entry.file_name_utf8_unchecked() };
|
|
|
|
if !name.ends_with(".log") { continue; }
|
|
|
|
if let Ok(uuid) = Uuid::parse_str(&name[..(name.len()-4)]) {
|
|
|
|
catalogs.insert(uuid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(catalogs)
|
|
|
|
}
|
|
|
|
|
2021-07-28 08:10:02 +00:00
|
|
|
pub fn catalog_path(base_path: &Path, uuid: &Uuid) -> PathBuf {
|
2020-12-15 12:13:44 +00:00
|
|
|
let mut path = base_path.to_owned();
|
|
|
|
path.push(uuid.to_string());
|
|
|
|
path.set_extension("log");
|
2021-07-28 08:10:02 +00:00
|
|
|
path
|
|
|
|
}
|
|
|
|
|
|
|
|
fn tmp_catalog_path(base_path: &Path, uuid: &Uuid) -> PathBuf {
|
|
|
|
let mut path = base_path.to_owned();
|
|
|
|
path.push(uuid.to_string());
|
|
|
|
path.set_extension("tmp");
|
|
|
|
path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test if a catalog exists
|
|
|
|
pub fn exists(base_path: &Path, uuid: &Uuid) -> bool {
|
|
|
|
Self::catalog_path(base_path, uuid).exists()
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Destroy the media catalog (remove all files)
|
|
|
|
pub fn destroy(base_path: &Path, uuid: &Uuid) -> Result<(), Error> {
|
|
|
|
|
2021-07-28 08:10:02 +00:00
|
|
|
let path = Self::catalog_path(base_path, uuid);
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
match std::fs::remove_file(path) {
|
|
|
|
Ok(()) => Ok(()),
|
|
|
|
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()),
|
|
|
|
Err(err) => Err(err.into()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-22 11:01:26 +00:00
|
|
|
/// Destroy the media catalog if media_set uuid does not match
|
|
|
|
pub fn destroy_unrelated_catalog(
|
|
|
|
base_path: &Path,
|
|
|
|
media_id: &MediaId,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
|
|
|
let uuid = &media_id.label.uuid;
|
|
|
|
|
2021-07-28 08:10:02 +00:00
|
|
|
let path = Self::catalog_path(base_path, uuid);
|
2021-03-22 11:01:26 +00:00
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
let file = match std::fs::OpenOptions::new().read(true).open(&path) {
|
2021-03-22 11:01:26 +00:00
|
|
|
Ok(file) => file,
|
|
|
|
Err(ref err) if err.kind() == std::io::ErrorKind::NotFound => {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
Err(err) => return Err(err.into()),
|
|
|
|
};
|
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
let mut file = BufReader::new(file);
|
|
|
|
|
2021-03-22 11:01:26 +00:00
|
|
|
let expected_media_set_id = match media_id.media_set_label {
|
|
|
|
None => {
|
|
|
|
std::fs::remove_file(path)?;
|
|
|
|
return Ok(())
|
|
|
|
},
|
|
|
|
Some(ref set) => &set.uuid,
|
|
|
|
};
|
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
let (found_magic_number, media_uuid, media_set_uuid) =
|
|
|
|
Self::parse_catalog_header(&mut file)?;
|
2021-03-22 11:01:26 +00:00
|
|
|
|
|
|
|
if !found_magic_number {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
if let Some(ref media_uuid) = media_uuid {
|
|
|
|
if media_uuid != uuid {
|
|
|
|
std::fs::remove_file(path)?;
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-22 11:01:26 +00:00
|
|
|
if let Some(ref media_set_uuid) = media_set_uuid {
|
|
|
|
if media_set_uuid != expected_media_set_id {
|
|
|
|
std::fs::remove_file(path)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-12-17 07:04:56 +00:00
|
|
|
/// Enable/Disable logging to stdout (disabled by default)
|
|
|
|
pub fn log_to_stdout(&mut self, enable: bool) {
|
|
|
|
self.log_to_stdout = enable;
|
|
|
|
}
|
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
fn create_basedir(base_path: &Path) -> Result<(), Error> {
|
|
|
|
let backup_user = crate::backup::backup_user()?;
|
|
|
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0640);
|
|
|
|
let opts = CreateOptions::new()
|
|
|
|
.perm(mode)
|
|
|
|
.owner(backup_user.uid)
|
|
|
|
.group(backup_user.gid);
|
|
|
|
|
|
|
|
create_path(base_path, None, Some(opts))
|
|
|
|
.map_err(|err: Error| format_err!("unable to create media catalog dir - {}", err))?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Open a catalog database, load into memory
|
|
|
|
pub fn open(
|
|
|
|
base_path: &Path,
|
2021-03-18 07:43:55 +00:00
|
|
|
media_id: &MediaId,
|
2020-12-15 12:13:44 +00:00
|
|
|
write: bool,
|
|
|
|
create: bool,
|
|
|
|
) -> Result<Self, Error> {
|
|
|
|
|
2021-03-18 07:43:55 +00:00
|
|
|
let uuid = &media_id.label.uuid;
|
|
|
|
|
2021-07-28 08:10:02 +00:00
|
|
|
let path = Self::catalog_path(base_path, uuid);
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
let me = proxmox::try_block!({
|
|
|
|
|
|
|
|
Self::create_basedir(base_path)?;
|
|
|
|
|
|
|
|
let mut file = std::fs::OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(write)
|
|
|
|
.create(create)
|
|
|
|
.open(&path)?;
|
|
|
|
|
|
|
|
let backup_user = crate::backup::backup_user()?;
|
2020-12-16 07:59:27 +00:00
|
|
|
fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid))
|
|
|
|
.map_err(|err| format_err!("fchown failed - {}", err))?;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
let mut me = Self {
|
|
|
|
uuid: uuid.clone(),
|
|
|
|
file: None,
|
|
|
|
log_to_stdout: false,
|
|
|
|
current_archive: None,
|
|
|
|
last_entry: None,
|
2021-03-16 11:52:49 +00:00
|
|
|
content: HashMap::new(),
|
2020-12-15 12:13:44 +00:00
|
|
|
pending: Vec::new(),
|
|
|
|
};
|
|
|
|
|
2021-07-28 08:25:52 +00:00
|
|
|
// Note: lock file, to get a consistent view with load_catalog
|
|
|
|
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::LockExclusive)?;
|
|
|
|
let result = me.load_catalog(&mut file, media_id.media_set_label.as_ref());
|
|
|
|
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::Unlock)?;
|
|
|
|
|
|
|
|
let (found_magic_number, _) = result?;
|
2020-12-16 07:59:27 +00:00
|
|
|
|
|
|
|
if !found_magic_number {
|
2021-03-16 11:52:49 +00:00
|
|
|
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
|
2020-12-16 07:59:27 +00:00
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
if write {
|
|
|
|
me.file = Some(file);
|
|
|
|
}
|
|
|
|
Ok(me)
|
|
|
|
}).map_err(|err: Error| {
|
|
|
|
format_err!("unable to open media catalog {:?} - {}", path, err)
|
|
|
|
})?;
|
|
|
|
|
|
|
|
Ok(me)
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
/// Creates a temporary empty catalog file
|
|
|
|
pub fn create_temporary_database_file(
|
|
|
|
base_path: &Path,
|
|
|
|
uuid: &Uuid,
|
|
|
|
) -> Result<File, Error> {
|
|
|
|
|
|
|
|
Self::create_basedir(base_path)?;
|
|
|
|
|
2021-07-28 08:10:02 +00:00
|
|
|
let tmp_path = Self::tmp_catalog_path(base_path, uuid);
|
2021-03-23 12:36:41 +00:00
|
|
|
|
|
|
|
let file = std::fs::OpenOptions::new()
|
|
|
|
.read(true)
|
|
|
|
.write(true)
|
|
|
|
.create(true)
|
|
|
|
.truncate(true)
|
|
|
|
.open(&tmp_path)?;
|
|
|
|
|
2021-04-13 12:02:37 +00:00
|
|
|
if cfg!(test) {
|
|
|
|
// We cannot use chown inside test environment (no permissions)
|
|
|
|
return Ok(file);
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
let backup_user = crate::backup::backup_user()?;
|
|
|
|
fchown(file.as_raw_fd(), Some(backup_user.uid), Some(backup_user.gid))
|
|
|
|
.map_err(|err| format_err!("fchown failed - {}", err))?;
|
|
|
|
|
|
|
|
Ok(file)
|
|
|
|
}
|
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
/// Creates a temporary, empty catalog database
|
2020-12-17 07:04:56 +00:00
|
|
|
///
|
|
|
|
/// Creates a new catalog file using a ".tmp" file extension.
|
2020-12-15 12:13:44 +00:00
|
|
|
pub fn create_temporary_database(
|
|
|
|
base_path: &Path,
|
2020-12-16 12:27:53 +00:00
|
|
|
media_id: &MediaId,
|
2020-12-15 12:13:44 +00:00
|
|
|
log_to_stdout: bool,
|
|
|
|
) -> Result<Self, Error> {
|
|
|
|
|
2020-12-16 12:27:53 +00:00
|
|
|
let uuid = &media_id.label.uuid;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2021-07-28 08:10:02 +00:00
|
|
|
let tmp_path = Self::tmp_catalog_path(base_path, uuid);
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2020-12-16 07:59:27 +00:00
|
|
|
let me = proxmox::try_block!({
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
let file = Self::create_temporary_database_file(base_path, uuid)?;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2020-12-16 07:59:27 +00:00
|
|
|
let mut me = Self {
|
|
|
|
uuid: uuid.clone(),
|
|
|
|
file: Some(file),
|
|
|
|
log_to_stdout: false,
|
|
|
|
current_archive: None,
|
|
|
|
last_entry: None,
|
2021-03-16 11:52:49 +00:00
|
|
|
content: HashMap::new(),
|
2020-12-16 07:59:27 +00:00
|
|
|
pending: Vec::new(),
|
|
|
|
};
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2020-12-16 07:59:27 +00:00
|
|
|
me.log_to_stdout = log_to_stdout;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
me.pending.extend(&Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1);
|
2020-12-26 10:05:25 +00:00
|
|
|
|
2021-03-18 07:43:55 +00:00
|
|
|
me.register_label(&media_id.label.uuid, 0, 0)?;
|
2020-12-16 07:59:27 +00:00
|
|
|
|
2020-12-16 12:27:53 +00:00
|
|
|
if let Some(ref set) = media_id.media_set_label {
|
2021-03-18 07:43:55 +00:00
|
|
|
me.register_label(&set.uuid, set.seq_nr, 1)?;
|
2020-12-16 07:59:27 +00:00
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2020-12-16 07:59:27 +00:00
|
|
|
me.commit()?;
|
|
|
|
|
|
|
|
Ok(me)
|
|
|
|
}).map_err(|err: Error| {
|
|
|
|
format_err!("unable to create temporary media catalog {:?} - {}", tmp_path, err)
|
|
|
|
})?;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
Ok(me)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Commit or Abort a temporary catalog database
|
2020-12-17 07:04:56 +00:00
|
|
|
///
|
|
|
|
/// With commit set, we rename the ".tmp" file extension to
|
|
|
|
/// ".log". When commit is false, we remove the ".tmp" file.
|
2020-12-15 12:13:44 +00:00
|
|
|
pub fn finish_temporary_database(
|
|
|
|
base_path: &Path,
|
|
|
|
uuid: &Uuid,
|
|
|
|
commit: bool,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
2021-07-28 08:10:02 +00:00
|
|
|
let tmp_path = Self::tmp_catalog_path(base_path, uuid);
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
if commit {
|
|
|
|
let mut catalog_path = tmp_path.clone();
|
|
|
|
catalog_path.set_extension("log");
|
|
|
|
|
|
|
|
if let Err(err) = std::fs::rename(&tmp_path, &catalog_path) {
|
|
|
|
bail!("Atomic rename catalog {:?} failed - {}", catalog_path, err);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
std::fs::remove_file(&tmp_path)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the BackupMedia uuid
|
|
|
|
pub fn uuid(&self) -> &Uuid {
|
|
|
|
&self.uuid
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Accessor to content list
|
2021-03-16 11:52:49 +00:00
|
|
|
pub fn content(&self) -> &HashMap<String, DatastoreContent> {
|
|
|
|
&self.content
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Commit pending changes
|
|
|
|
///
|
|
|
|
/// This is necessary to store changes persistently.
|
|
|
|
///
|
|
|
|
/// Fixme: this should be atomic ...
|
|
|
|
pub fn commit(&mut self) -> Result<(), Error> {
|
|
|
|
|
|
|
|
if self.pending.is_empty() {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
match self.file {
|
|
|
|
Some(ref mut file) => {
|
2021-07-28 08:25:52 +00:00
|
|
|
let pending = &self.pending;
|
|
|
|
// Note: lock file, to get a consistent view with load_catalog
|
|
|
|
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::LockExclusive)?;
|
|
|
|
let result: Result<(), Error> = proxmox::try_block!({
|
|
|
|
file.write_all(pending)?;
|
|
|
|
file.flush()?;
|
|
|
|
file.sync_data()?;
|
|
|
|
Ok(())
|
|
|
|
});
|
|
|
|
nix::fcntl::flock(file.as_raw_fd(), nix::fcntl::FlockArg::Unlock)?;
|
|
|
|
|
|
|
|
result?;
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
None => bail!("media catalog not writable (opened read only)"),
|
|
|
|
}
|
|
|
|
|
|
|
|
self.pending = Vec::new();
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Conditionally commit if in pending data is large (> 1Mb)
|
|
|
|
pub fn commit_if_large(&mut self) -> Result<(), Error> {
|
2021-03-19 06:50:32 +00:00
|
|
|
if self.current_archive.is_some() {
|
|
|
|
bail!("can't commit catalog in the middle of an chunk archive");
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
if self.pending.len() > 1024*1024 {
|
|
|
|
self.commit()?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Destroy existing catalog, opens a new one
|
|
|
|
pub fn overwrite(
|
|
|
|
base_path: &Path,
|
2020-12-16 12:27:53 +00:00
|
|
|
media_id: &MediaId,
|
2020-12-15 12:13:44 +00:00
|
|
|
log_to_stdout: bool,
|
|
|
|
) -> Result<Self, Error> {
|
|
|
|
|
2020-12-16 12:27:53 +00:00
|
|
|
let uuid = &media_id.label.uuid;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2020-12-16 12:27:53 +00:00
|
|
|
let me = Self::create_temporary_database(base_path, &media_id, log_to_stdout)?;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
Self::finish_temporary_database(base_path, uuid, true)?;
|
|
|
|
|
|
|
|
Ok(me)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test if the catalog already contain a snapshot
|
2021-03-16 11:52:49 +00:00
|
|
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
|
|
|
match self.content.get(store) {
|
|
|
|
None => false,
|
|
|
|
Some(content) => content.snapshot_index.contains_key(snapshot),
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
/// Returns the snapshot archive file number
|
|
|
|
pub fn lookup_snapshot(&self, store: &str, snapshot: &str) -> Option<u64> {
|
|
|
|
match self.content.get(store) {
|
|
|
|
None => None,
|
|
|
|
Some(content) => content.snapshot_index.get(snapshot).copied(),
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Test if the catalog already contain a chunk
|
2021-03-16 11:52:49 +00:00
|
|
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
|
|
|
match self.content.get(store) {
|
|
|
|
None => false,
|
|
|
|
Some(content) => content.chunk_index.contains_key(digest),
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the chunk archive file number
|
2021-03-16 11:52:49 +00:00
|
|
|
pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option<u64> {
|
|
|
|
match self.content.get(store) {
|
|
|
|
None => None,
|
|
|
|
Some(content) => content.chunk_index.get(digest).copied(),
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
2021-03-18 07:43:55 +00:00
|
|
|
fn check_register_label(&self, file_number: u64, uuid: &Uuid) -> Result<(), Error> {
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
if file_number >= 2 {
|
|
|
|
bail!("register label failed: got wrong file number ({} >= 2)", file_number);
|
|
|
|
}
|
|
|
|
|
2021-03-18 07:43:55 +00:00
|
|
|
if file_number == 0 && uuid != &self.uuid {
|
|
|
|
bail!("register label failed: uuid does not match");
|
|
|
|
}
|
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
if self.current_archive.is_some() {
|
|
|
|
bail!("register label failed: inside chunk archive");
|
|
|
|
}
|
|
|
|
|
|
|
|
let expected_file_number = match self.last_entry {
|
|
|
|
Some((_, last_number)) => last_number + 1,
|
|
|
|
None => 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
if file_number != expected_file_number {
|
|
|
|
bail!("register label failed: got unexpected file number ({} < {})",
|
|
|
|
file_number, expected_file_number);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Register media labels (file 0 and 1)
|
|
|
|
pub fn register_label(
|
|
|
|
&mut self,
|
2021-03-18 07:43:55 +00:00
|
|
|
uuid: &Uuid, // Media/MediaSet Uuid
|
|
|
|
seq_nr: u64, // onyl used for media set labels
|
2020-12-15 12:13:44 +00:00
|
|
|
file_number: u64,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
2021-03-18 07:43:55 +00:00
|
|
|
self.check_register_label(file_number, uuid)?;
|
|
|
|
|
|
|
|
if file_number == 0 && seq_nr != 0 {
|
|
|
|
bail!("register_label failed - seq_nr should be 0 - iternal error");
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
let entry = LabelEntry {
|
|
|
|
file_number,
|
|
|
|
uuid: *uuid.as_bytes(),
|
2021-03-18 07:43:55 +00:00
|
|
|
seq_nr,
|
2020-12-15 12:13:44 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
if self.log_to_stdout {
|
|
|
|
println!("L|{}|{}", file_number, uuid.to_string());
|
|
|
|
}
|
|
|
|
|
|
|
|
self.pending.push(b'L');
|
|
|
|
|
|
|
|
unsafe { self.pending.write_le_value(entry)?; }
|
|
|
|
|
|
|
|
self.last_entry = Some((uuid.clone(), file_number));
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-07-22 13:41:00 +00:00
|
|
|
/// Register a chunk archive
|
|
|
|
pub fn register_chunk_archive(
|
|
|
|
&mut self,
|
|
|
|
uuid: Uuid, // Uuid form MediaContentHeader
|
|
|
|
file_number: u64,
|
|
|
|
store: &str,
|
|
|
|
chunk_list: &[[u8; 32]],
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
self.start_chunk_archive(uuid, file_number, store)?;
|
|
|
|
for digest in chunk_list {
|
|
|
|
self.register_chunk(digest)?;
|
|
|
|
}
|
|
|
|
self.end_chunk_archive()?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
/// Register a chunk
|
|
|
|
///
|
|
|
|
/// Only valid after start_chunk_archive.
|
2021-07-22 13:41:00 +00:00
|
|
|
fn register_chunk(
|
2020-12-15 12:13:44 +00:00
|
|
|
&mut self,
|
|
|
|
digest: &[u8;32],
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
let (file_number, store) = match self.current_archive {
|
2020-12-15 12:13:44 +00:00
|
|
|
None => bail!("register_chunk failed: no archive started"),
|
2021-03-16 11:52:49 +00:00
|
|
|
Some((_, file_number, ref store)) => (file_number, store),
|
2020-12-15 12:13:44 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
if self.log_to_stdout {
|
|
|
|
println!("C|{}", proxmox::tools::digest_to_hex(digest));
|
|
|
|
}
|
|
|
|
|
|
|
|
self.pending.push(b'C');
|
|
|
|
self.pending.extend(digest);
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
match self.content.get_mut(store) {
|
|
|
|
None => bail!("storage {} not registered - internal error", store),
|
|
|
|
Some(content) => {
|
|
|
|
content.chunk_index.insert(*digest, file_number);
|
|
|
|
}
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn check_start_chunk_archive(&self, file_number: u64) -> Result<(), Error> {
|
|
|
|
|
|
|
|
if self.current_archive.is_some() {
|
|
|
|
bail!("start_chunk_archive failed: already started");
|
|
|
|
}
|
|
|
|
|
|
|
|
if file_number < 2 {
|
|
|
|
bail!("start_chunk_archive failed: got wrong file number ({} < 2)", file_number);
|
|
|
|
}
|
|
|
|
|
|
|
|
let expect_min_file_number = match self.last_entry {
|
|
|
|
Some((_, last_number)) => last_number + 1,
|
|
|
|
None => 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
if file_number < expect_min_file_number {
|
|
|
|
bail!("start_chunk_archive: got unexpected file number ({} < {})",
|
|
|
|
file_number, expect_min_file_number);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Start a chunk archive section
|
2021-07-22 13:41:00 +00:00
|
|
|
fn start_chunk_archive(
|
2020-12-15 12:13:44 +00:00
|
|
|
&mut self,
|
|
|
|
uuid: Uuid, // Uuid form MediaContentHeader
|
|
|
|
file_number: u64,
|
2021-03-16 11:52:49 +00:00
|
|
|
store: &str,
|
|
|
|
) -> Result<(), Error> {
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
self.check_start_chunk_archive(file_number)?;
|
|
|
|
|
|
|
|
let entry = ChunkArchiveStart {
|
|
|
|
file_number,
|
|
|
|
uuid: *uuid.as_bytes(),
|
2021-03-16 11:52:49 +00:00
|
|
|
store_name_len: u8::try_from(store.len())?,
|
2020-12-15 12:13:44 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
if self.log_to_stdout {
|
2021-03-16 11:52:49 +00:00
|
|
|
println!("A|{}|{}|{}", file_number, uuid.to_string(), store);
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
self.pending.push(b'A');
|
|
|
|
|
|
|
|
unsafe { self.pending.write_le_value(entry)?; }
|
2021-03-16 11:52:49 +00:00
|
|
|
self.pending.extend(store.as_bytes());
|
2020-12-15 12:13:44 +00:00
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
self.content.entry(store.to_string()).or_insert(DatastoreContent::new());
|
|
|
|
|
|
|
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn check_end_chunk_archive(&self, uuid: &Uuid, file_number: u64) -> Result<(), Error> {
|
|
|
|
|
|
|
|
match self.current_archive {
|
|
|
|
None => bail!("end_chunk archive failed: not started"),
|
2021-03-16 11:52:49 +00:00
|
|
|
Some((ref expected_uuid, expected_file_number, ..)) => {
|
2020-12-15 12:13:44 +00:00
|
|
|
if uuid != expected_uuid {
|
|
|
|
bail!("end_chunk_archive failed: got unexpected uuid");
|
|
|
|
}
|
|
|
|
if file_number != expected_file_number {
|
|
|
|
bail!("end_chunk_archive failed: got unexpected file number ({} != {})",
|
|
|
|
file_number, expected_file_number);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// End a chunk archive section
|
2021-07-22 13:41:00 +00:00
|
|
|
fn end_chunk_archive(&mut self) -> Result<(), Error> {
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
match self.current_archive.take() {
|
|
|
|
None => bail!("end_chunk_archive failed: not started"),
|
2021-03-16 11:52:49 +00:00
|
|
|
Some((uuid, file_number, ..)) => {
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
let entry = ChunkArchiveEnd {
|
|
|
|
file_number,
|
|
|
|
uuid: *uuid.as_bytes(),
|
|
|
|
};
|
|
|
|
|
|
|
|
if self.log_to_stdout {
|
|
|
|
println!("E|{}|{}\n", file_number, uuid.to_string());
|
|
|
|
}
|
|
|
|
|
|
|
|
self.pending.push(b'E');
|
|
|
|
|
|
|
|
unsafe { self.pending.write_le_value(entry)?; }
|
|
|
|
|
|
|
|
self.last_entry = Some((uuid, file_number));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn check_register_snapshot(&self, file_number: u64, snapshot: &str) -> Result<(), Error> {
|
|
|
|
|
|
|
|
if self.current_archive.is_some() {
|
|
|
|
bail!("register_snapshot failed: inside chunk_archive");
|
|
|
|
}
|
|
|
|
|
|
|
|
if file_number < 2 {
|
|
|
|
bail!("register_snapshot failed: got wrong file number ({} < 2)", file_number);
|
|
|
|
}
|
|
|
|
|
|
|
|
let expect_min_file_number = match self.last_entry {
|
|
|
|
Some((_, last_number)) => last_number + 1,
|
|
|
|
None => 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
if file_number < expect_min_file_number {
|
|
|
|
bail!("register_snapshot failed: got unexpected file number ({} < {})",
|
|
|
|
file_number, expect_min_file_number);
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Err(err) = snapshot.parse::<BackupDir>() {
|
|
|
|
bail!("register_snapshot failed: unable to parse snapshot '{}' - {}", snapshot, err);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Register a snapshot
|
|
|
|
pub fn register_snapshot(
|
|
|
|
&mut self,
|
|
|
|
uuid: Uuid, // Uuid form MediaContentHeader
|
|
|
|
file_number: u64,
|
2021-03-16 11:52:49 +00:00
|
|
|
store: &str,
|
2020-12-15 12:13:44 +00:00
|
|
|
snapshot: &str,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
|
|
|
self.check_register_snapshot(file_number, snapshot)?;
|
|
|
|
|
|
|
|
let entry = SnapshotEntry {
|
|
|
|
file_number,
|
|
|
|
uuid: *uuid.as_bytes(),
|
2021-03-16 11:52:49 +00:00
|
|
|
store_name_len: u8::try_from(store.len())?,
|
2020-12-15 12:13:44 +00:00
|
|
|
name_len: u16::try_from(snapshot.len())?,
|
|
|
|
};
|
|
|
|
|
|
|
|
if self.log_to_stdout {
|
2021-03-16 11:52:49 +00:00
|
|
|
println!("S|{}|{}|{}:{}", file_number, uuid.to_string(), store, snapshot);
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
self.pending.push(b'S');
|
|
|
|
|
|
|
|
unsafe { self.pending.write_le_value(entry)?; }
|
2021-03-16 11:52:49 +00:00
|
|
|
self.pending.extend(store.as_bytes());
|
|
|
|
self.pending.push(b':');
|
2020-12-15 12:13:44 +00:00
|
|
|
self.pending.extend(snapshot.as_bytes());
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
let content = self.content.entry(store.to_string())
|
|
|
|
.or_insert(DatastoreContent::new());
|
|
|
|
|
|
|
|
content.snapshot_index.insert(snapshot.to_string(), file_number);
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
self.last_entry = Some((uuid, file_number));
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2021-03-23 12:36:41 +00:00
|
|
|
/// Parse the catalog header
|
|
|
|
pub fn parse_catalog_header<R: Read>(
|
|
|
|
reader: &mut R,
|
|
|
|
) -> Result<(bool, Option<Uuid>, Option<Uuid>), Error> {
|
|
|
|
|
|
|
|
// read/check magic number
|
|
|
|
let mut magic = [0u8; 8];
|
|
|
|
if !reader.read_exact_or_eof(&mut magic)? {
|
|
|
|
/* EOF */
|
|
|
|
return Ok((false, None, None));
|
|
|
|
}
|
|
|
|
|
|
|
|
if magic == Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0 {
|
|
|
|
// only use in unreleased versions
|
|
|
|
bail!("old catalog format (v1.0) is no longer supported");
|
|
|
|
}
|
|
|
|
if magic != Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1 {
|
|
|
|
bail!("wrong magic number");
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut entry_type = [0u8; 1];
|
|
|
|
if !reader.read_exact_or_eof(&mut entry_type)? {
|
|
|
|
/* EOF */
|
|
|
|
return Ok((true, None, None));
|
|
|
|
}
|
|
|
|
|
|
|
|
if entry_type[0] != b'L' {
|
|
|
|
bail!("got unexpected entry type");
|
|
|
|
}
|
|
|
|
|
|
|
|
let entry0: LabelEntry = unsafe { reader.read_le_value()? };
|
|
|
|
|
|
|
|
let mut entry_type = [0u8; 1];
|
|
|
|
if !reader.read_exact_or_eof(&mut entry_type)? {
|
|
|
|
/* EOF */
|
|
|
|
return Ok((true, Some(entry0.uuid.into()), None));
|
|
|
|
}
|
|
|
|
|
|
|
|
if entry_type[0] != b'L' {
|
|
|
|
bail!("got unexpected entry type");
|
|
|
|
}
|
|
|
|
|
|
|
|
let entry1: LabelEntry = unsafe { reader.read_le_value()? };
|
|
|
|
|
|
|
|
Ok((true, Some(entry0.uuid.into()), Some(entry1.uuid.into())))
|
|
|
|
}
|
|
|
|
|
2021-03-18 07:43:55 +00:00
|
|
|
fn load_catalog(
|
|
|
|
&mut self,
|
|
|
|
file: &mut File,
|
|
|
|
media_set_label: Option<&MediaSetLabel>,
|
2021-03-22 11:01:26 +00:00
|
|
|
) -> Result<(bool, Option<Uuid>), Error> {
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
let mut file = BufReader::new(file);
|
2020-12-16 07:59:27 +00:00
|
|
|
let mut found_magic_number = false;
|
2021-03-22 11:01:26 +00:00
|
|
|
let mut media_set_uuid = None;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
loop {
|
2021-03-23 12:36:41 +00:00
|
|
|
let pos = file.seek(SeekFrom::Current(0))?; // get current pos
|
2020-12-16 07:59:27 +00:00
|
|
|
|
|
|
|
if pos == 0 { // read/check magic number
|
|
|
|
let mut magic = [0u8; 8];
|
|
|
|
match file.read_exact_or_eof(&mut magic) {
|
|
|
|
Ok(false) => { /* EOF */ break; }
|
|
|
|
Ok(true) => { /* OK */ }
|
|
|
|
Err(err) => bail!("read failed - {}", err),
|
|
|
|
}
|
2021-03-16 11:52:49 +00:00
|
|
|
if magic == Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_0 {
|
|
|
|
// only use in unreleased versions
|
|
|
|
bail!("old catalog format (v1.0) is no longer supported");
|
|
|
|
}
|
|
|
|
if magic != Self::PROXMOX_BACKUP_MEDIA_CATALOG_MAGIC_1_1 {
|
2020-12-16 07:59:27 +00:00
|
|
|
bail!("wrong magic number");
|
|
|
|
}
|
|
|
|
found_magic_number = true;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
let mut entry_type = [0u8; 1];
|
|
|
|
match file.read_exact_or_eof(&mut entry_type) {
|
|
|
|
Ok(false) => { /* EOF */ break; }
|
|
|
|
Ok(true) => { /* OK */ }
|
|
|
|
Err(err) => bail!("read failed - {}", err),
|
|
|
|
}
|
|
|
|
|
|
|
|
match entry_type[0] {
|
|
|
|
b'C' => {
|
2021-03-16 11:52:49 +00:00
|
|
|
let (file_number, store) = match self.current_archive {
|
2020-12-15 12:13:44 +00:00
|
|
|
None => bail!("register_chunk failed: no archive started"),
|
2021-03-16 11:52:49 +00:00
|
|
|
Some((_, file_number, ref store)) => (file_number, store),
|
2020-12-15 12:13:44 +00:00
|
|
|
};
|
|
|
|
let mut digest = [0u8; 32];
|
|
|
|
file.read_exact(&mut digest)?;
|
2021-03-16 11:52:49 +00:00
|
|
|
match self.content.get_mut(store) {
|
|
|
|
None => bail!("storage {} not registered - internal error", store),
|
|
|
|
Some(content) => {
|
|
|
|
content.chunk_index.insert(digest, file_number);
|
|
|
|
}
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
b'A' => {
|
|
|
|
let entry: ChunkArchiveStart = unsafe { file.read_le_value()? };
|
|
|
|
let file_number = entry.file_number;
|
|
|
|
let uuid = Uuid::from(entry.uuid);
|
2021-03-16 11:52:49 +00:00
|
|
|
let store_name_len = entry.store_name_len as usize;
|
|
|
|
|
|
|
|
let store = file.read_exact_allocated(store_name_len)?;
|
|
|
|
let store = std::str::from_utf8(&store)?;
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
self.check_start_chunk_archive(file_number)?;
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
self.content.entry(store.to_string())
|
|
|
|
.or_insert(DatastoreContent::new());
|
|
|
|
|
|
|
|
self.current_archive = Some((uuid, file_number, store.to_string()));
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
b'E' => {
|
|
|
|
let entry: ChunkArchiveEnd = unsafe { file.read_le_value()? };
|
|
|
|
let file_number = entry.file_number;
|
|
|
|
let uuid = Uuid::from(entry.uuid);
|
|
|
|
|
|
|
|
self.check_end_chunk_archive(&uuid, file_number)?;
|
|
|
|
|
|
|
|
self.current_archive = None;
|
|
|
|
self.last_entry = Some((uuid, file_number));
|
|
|
|
}
|
|
|
|
b'S' => {
|
|
|
|
let entry: SnapshotEntry = unsafe { file.read_le_value()? };
|
|
|
|
let file_number = entry.file_number;
|
2021-03-17 10:29:23 +00:00
|
|
|
let store_name_len = entry.store_name_len as usize;
|
|
|
|
let name_len = entry.name_len as usize;
|
2020-12-15 12:13:44 +00:00
|
|
|
let uuid = Uuid::from(entry.uuid);
|
|
|
|
|
2021-03-17 10:29:23 +00:00
|
|
|
let store = file.read_exact_allocated(store_name_len + 1)?;
|
|
|
|
if store[store_name_len] != b':' {
|
|
|
|
bail!("parse-error: missing separator in SnapshotEntry");
|
|
|
|
}
|
|
|
|
|
|
|
|
let store = std::str::from_utf8(&store[..store_name_len])?;
|
2021-03-16 11:52:49 +00:00
|
|
|
|
2021-03-17 10:29:23 +00:00
|
|
|
let snapshot = file.read_exact_allocated(name_len)?;
|
2020-12-15 12:13:44 +00:00
|
|
|
let snapshot = std::str::from_utf8(&snapshot)?;
|
|
|
|
|
|
|
|
self.check_register_snapshot(file_number, snapshot)?;
|
|
|
|
|
2021-03-16 11:52:49 +00:00
|
|
|
let content = self.content.entry(store.to_string())
|
|
|
|
.or_insert(DatastoreContent::new());
|
|
|
|
|
|
|
|
content.snapshot_index.insert(snapshot.to_string(), file_number);
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
self.last_entry = Some((uuid, file_number));
|
|
|
|
}
|
|
|
|
b'L' => {
|
|
|
|
let entry: LabelEntry = unsafe { file.read_le_value()? };
|
|
|
|
let file_number = entry.file_number;
|
|
|
|
let uuid = Uuid::from(entry.uuid);
|
|
|
|
|
2021-03-18 07:43:55 +00:00
|
|
|
self.check_register_label(file_number, &uuid)?;
|
|
|
|
|
|
|
|
if file_number == 1 {
|
|
|
|
if let Some(set) = media_set_label {
|
|
|
|
if set.uuid != uuid {
|
|
|
|
bail!("got unexpected media set uuid");
|
|
|
|
}
|
|
|
|
if set.seq_nr != entry.seq_nr {
|
|
|
|
bail!("got unexpected media set sequence number");
|
|
|
|
}
|
|
|
|
}
|
2021-03-22 11:01:26 +00:00
|
|
|
media_set_uuid = Some(uuid.clone());
|
2021-03-18 07:43:55 +00:00
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
|
|
|
|
self.last_entry = Some((uuid, file_number));
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
bail!("unknown entry type '{}'", entry_type[0]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2021-03-22 11:01:26 +00:00
|
|
|
Ok((found_magic_number, media_set_uuid))
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Media set catalog
|
|
|
|
///
|
|
|
|
/// Catalog for multiple media.
|
|
|
|
pub struct MediaSetCatalog {
|
|
|
|
catalog_list: HashMap<Uuid, MediaCatalog>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl MediaSetCatalog {
|
|
|
|
|
|
|
|
/// Creates a new instance
|
|
|
|
pub fn new() -> Self {
|
|
|
|
Self {
|
|
|
|
catalog_list: HashMap::new(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Add a catalog
|
|
|
|
pub fn append_catalog(&mut self, catalog: MediaCatalog) -> Result<(), Error> {
|
|
|
|
|
|
|
|
if self.catalog_list.get(&catalog.uuid).is_some() {
|
|
|
|
bail!("MediaSetCatalog already contains media '{}'", catalog.uuid);
|
|
|
|
}
|
|
|
|
|
|
|
|
self.catalog_list.insert(catalog.uuid.clone(), catalog);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Remove a catalog
|
|
|
|
pub fn remove_catalog(&mut self, media_uuid: &Uuid) {
|
|
|
|
self.catalog_list.remove(media_uuid);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Test if the catalog already contain a snapshot
|
2021-03-16 11:52:49 +00:00
|
|
|
pub fn contains_snapshot(&self, store: &str, snapshot: &str) -> bool {
|
2020-12-15 12:13:44 +00:00
|
|
|
for catalog in self.catalog_list.values() {
|
2021-03-16 11:52:49 +00:00
|
|
|
if catalog.contains_snapshot(store, snapshot) {
|
2020-12-15 12:13:44 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2021-05-05 10:09:12 +00:00
|
|
|
/// Returns the media uuid and snapshot archive file number
|
|
|
|
pub fn lookup_snapshot(&self, store: &str, snapshot: &str) -> Option<(&Uuid, u64)> {
|
|
|
|
for (uuid, catalog) in self.catalog_list.iter() {
|
|
|
|
if let Some(nr) = catalog.lookup_snapshot(store, snapshot) {
|
|
|
|
return Some((uuid, nr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
|
|
|
|
2020-12-15 12:13:44 +00:00
|
|
|
/// Test if the catalog already contain a chunk
|
2021-03-16 11:52:49 +00:00
|
|
|
pub fn contains_chunk(&self, store: &str, digest: &[u8;32]) -> bool {
|
2020-12-15 12:13:44 +00:00
|
|
|
for catalog in self.catalog_list.values() {
|
2021-03-16 11:52:49 +00:00
|
|
|
if catalog.contains_chunk(store, digest) {
|
2020-12-15 12:13:44 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
2021-05-05 10:09:12 +00:00
|
|
|
|
|
|
|
/// Returns the media uuid and chunk archive file number
|
|
|
|
pub fn lookup_chunk(&self, store: &str, digest: &[u8;32]) -> Option<(&Uuid, u64)> {
|
|
|
|
for (uuid, catalog) in self.catalog_list.iter() {
|
|
|
|
if let Some(nr) = catalog.lookup_chunk(store, digest) {
|
|
|
|
return Some((uuid, nr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None
|
|
|
|
}
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Type definitions for internal binary catalog encoding
|
|
|
|
|
|
|
|
#[derive(Endian)]
|
|
|
|
#[repr(C)]
|
2020-12-16 11:43:51 +00:00
|
|
|
struct LabelEntry {
|
2020-12-15 12:13:44 +00:00
|
|
|
file_number: u64,
|
|
|
|
uuid: [u8;16],
|
2021-03-18 07:43:55 +00:00
|
|
|
seq_nr: u64, // only used for media set labels
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Endian)]
|
|
|
|
#[repr(C)]
|
2020-12-16 11:43:51 +00:00
|
|
|
struct ChunkArchiveStart {
|
2020-12-15 12:13:44 +00:00
|
|
|
file_number: u64,
|
|
|
|
uuid: [u8;16],
|
2021-03-16 11:52:49 +00:00
|
|
|
store_name_len: u8,
|
|
|
|
/* datastore name follows */
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Endian)]
|
|
|
|
#[repr(C)]
|
2020-12-16 11:43:51 +00:00
|
|
|
struct ChunkArchiveEnd{
|
2020-12-15 12:13:44 +00:00
|
|
|
file_number: u64,
|
|
|
|
uuid: [u8;16],
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Endian)]
|
|
|
|
#[repr(C)]
|
2020-12-16 11:43:51 +00:00
|
|
|
struct SnapshotEntry{
|
2020-12-15 12:13:44 +00:00
|
|
|
file_number: u64,
|
|
|
|
uuid: [u8;16],
|
2021-03-16 11:52:49 +00:00
|
|
|
store_name_len: u8,
|
2020-12-15 12:13:44 +00:00
|
|
|
name_len: u16,
|
2021-03-16 11:52:49 +00:00
|
|
|
/* datastore name, ':', snapshot name follows */
|
2020-12-15 12:13:44 +00:00
|
|
|
}
|