move backup id related types to pbs-api-types
Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
394
pbs-datastore/src/backup_info.rs
Normal file
394
pbs-datastore/src/backup_info.rs
Normal file
@ -0,0 +1,394 @@
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use anyhow::{bail, format_err, Error};
|
||||
|
||||
use crate::api2::types::{
|
||||
BACKUP_ID_REGEX,
|
||||
BACKUP_TYPE_REGEX,
|
||||
BACKUP_DATE_REGEX,
|
||||
GROUP_PATH_REGEX,
|
||||
SNAPSHOT_PATH_REGEX,
|
||||
BACKUP_FILE_REGEX,
|
||||
};
|
||||
|
||||
use super::manifest::MANIFEST_BLOB_NAME;
|
||||
|
||||
/// BackupGroup is a directory containing a list of BackupDir
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||
pub struct BackupGroup {
|
||||
/// Type of backup
|
||||
backup_type: String,
|
||||
/// Unique (for this type) ID
|
||||
backup_id: String,
|
||||
}
|
||||
|
||||
impl std::cmp::Ord for BackupGroup {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
let type_order = self.backup_type.cmp(&other.backup_type);
|
||||
if type_order != std::cmp::Ordering::Equal {
|
||||
return type_order;
|
||||
}
|
||||
// try to compare IDs numerically
|
||||
let id_self = self.backup_id.parse::<u64>();
|
||||
let id_other = other.backup_id.parse::<u64>();
|
||||
match (id_self, id_other) {
|
||||
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
||||
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
||||
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
||||
_ => self.backup_id.cmp(&other.backup_id),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::PartialOrd for BackupGroup {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl BackupGroup {
|
||||
pub fn new<T: Into<String>, U: Into<String>>(backup_type: T, backup_id: U) -> Self {
|
||||
Self {
|
||||
backup_type: backup_type.into(),
|
||||
backup_id: backup_id.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn backup_type(&self) -> &str {
|
||||
&self.backup_type
|
||||
}
|
||||
|
||||
pub fn backup_id(&self) -> &str {
|
||||
&self.backup_id
|
||||
}
|
||||
|
||||
pub fn group_path(&self) -> PathBuf {
|
||||
let mut relative_path = PathBuf::new();
|
||||
|
||||
relative_path.push(&self.backup_type);
|
||||
|
||||
relative_path.push(&self.backup_id);
|
||||
|
||||
relative_path
|
||||
}
|
||||
|
||||
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
||||
let mut list = vec![];
|
||||
|
||||
let mut path = base_path.to_owned();
|
||||
path.push(self.group_path());
|
||||
|
||||
pbs_tools::fs::scandir(
|
||||
libc::AT_FDCWD,
|
||||
&path,
|
||||
&BACKUP_DATE_REGEX,
|
||||
|l2_fd, backup_time, file_type| {
|
||||
if file_type != nix::dir::Type::Directory {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let backup_dir =
|
||||
BackupDir::with_rfc3339(&self.backup_type, &self.backup_id, backup_time)?;
|
||||
let files = list_backup_files(l2_fd, backup_time)?;
|
||||
|
||||
list.push(BackupInfo { backup_dir, files });
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn last_successful_backup(&self, base_path: &Path) -> Result<Option<i64>, Error> {
|
||||
let mut last = None;
|
||||
|
||||
let mut path = base_path.to_owned();
|
||||
path.push(self.group_path());
|
||||
|
||||
pbs_tools::fs::scandir(
|
||||
libc::AT_FDCWD,
|
||||
&path,
|
||||
&BACKUP_DATE_REGEX,
|
||||
|l2_fd, backup_time, file_type| {
|
||||
if file_type != nix::dir::Type::Directory {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut manifest_path = PathBuf::from(backup_time);
|
||||
manifest_path.push(MANIFEST_BLOB_NAME);
|
||||
|
||||
use nix::fcntl::{openat, OFlag};
|
||||
match openat(
|
||||
l2_fd,
|
||||
&manifest_path,
|
||||
OFlag::O_RDONLY,
|
||||
nix::sys::stat::Mode::empty(),
|
||||
) {
|
||||
Ok(rawfd) => {
|
||||
/* manifest exists --> assume backup was successful */
|
||||
/* close else this leaks! */
|
||||
nix::unistd::close(rawfd)?;
|
||||
}
|
||||
Err(nix::Error::Sys(nix::errno::Errno::ENOENT)) => {
|
||||
return Ok(());
|
||||
}
|
||||
Err(err) => {
|
||||
bail!("last_successful_backup: unexpected error - {}", err);
|
||||
}
|
||||
}
|
||||
|
||||
let timestamp = proxmox::tools::time::parse_rfc3339(backup_time)?;
|
||||
if let Some(last_timestamp) = last {
|
||||
if timestamp > last_timestamp {
|
||||
last = Some(timestamp);
|
||||
}
|
||||
} else {
|
||||
last = Some(timestamp);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(last)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupGroup {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.backup_type();
|
||||
let id = self.backup_id();
|
||||
write!(f, "{}/{}", backup_type, id)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupGroup {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a backup group path
|
||||
///
|
||||
/// This parses strings like `vm/100".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = GROUP_PATH_REGEX
|
||||
.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
backup_type: cap.get(1).unwrap().as_str().to_owned(),
|
||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Uniquely identify a Backup (relative to data store)
|
||||
///
|
||||
/// We also call this a backup snaphost.
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct BackupDir {
|
||||
/// Backup group
|
||||
group: BackupGroup,
|
||||
/// Backup timestamp
|
||||
backup_time: i64,
|
||||
// backup_time as rfc3339
|
||||
backup_time_string: String,
|
||||
}
|
||||
|
||||
impl BackupDir {
|
||||
pub fn new<T, U>(backup_type: T, backup_id: U, backup_time: i64) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
{
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
BackupDir::with_group(group, backup_time)
|
||||
}
|
||||
|
||||
pub fn with_rfc3339<T, U, V>(
|
||||
backup_type: T,
|
||||
backup_id: U,
|
||||
backup_time_string: V,
|
||||
) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
U: Into<String>,
|
||||
V: Into<String>,
|
||||
{
|
||||
let backup_time_string = backup_time_string.into();
|
||||
let backup_time = proxmox::tools::time::parse_rfc3339(&backup_time_string)?;
|
||||
let group = BackupGroup::new(backup_type.into(), backup_id.into());
|
||||
Ok(Self {
|
||||
group,
|
||||
backup_time,
|
||||
backup_time_string,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||
Ok(Self {
|
||||
group,
|
||||
backup_time,
|
||||
backup_time_string,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn group(&self) -> &BackupGroup {
|
||||
&self.group
|
||||
}
|
||||
|
||||
pub fn backup_time(&self) -> i64 {
|
||||
self.backup_time
|
||||
}
|
||||
|
||||
pub fn backup_time_string(&self) -> &str {
|
||||
&self.backup_time_string
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
let mut relative_path = self.group.group_path();
|
||||
|
||||
relative_path.push(self.backup_time_string.clone());
|
||||
|
||||
relative_path
|
||||
}
|
||||
|
||||
pub fn backup_time_to_string(backup_time: i64) -> Result<String, Error> {
|
||||
// fixme: can this fail? (avoid unwrap)
|
||||
proxmox::tools::time::epoch_to_rfc3339_utc(backup_time)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupDir {
|
||||
type Err = Error;
|
||||
|
||||
/// Parse a snapshot path
|
||||
///
|
||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
||||
let cap = SNAPSHOT_PATH_REGEX
|
||||
.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
BackupDir::with_rfc3339(
|
||||
cap.get(1).unwrap().as_str(),
|
||||
cap.get(2).unwrap().as_str(),
|
||||
cap.get(3).unwrap().as_str(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let backup_type = self.group.backup_type();
|
||||
let id = self.group.backup_id();
|
||||
write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
|
||||
}
|
||||
}
|
||||
|
||||
/// Detailed Backup Information, lists files inside a BackupDir
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct BackupInfo {
|
||||
/// the backup directory
|
||||
pub backup_dir: BackupDir,
|
||||
/// List of data files
|
||||
pub files: Vec<String>,
|
||||
}
|
||||
|
||||
impl BackupInfo {
|
||||
pub fn new(base_path: &Path, backup_dir: BackupDir) -> Result<BackupInfo, Error> {
|
||||
let mut path = base_path.to_owned();
|
||||
path.push(backup_dir.relative_path());
|
||||
|
||||
let files = list_backup_files(libc::AT_FDCWD, &path)?;
|
||||
|
||||
Ok(BackupInfo { backup_dir, files })
|
||||
}
|
||||
|
||||
/// Finds the latest backup inside a backup group
|
||||
pub fn last_backup(
|
||||
base_path: &Path,
|
||||
group: &BackupGroup,
|
||||
only_finished: bool,
|
||||
) -> Result<Option<BackupInfo>, Error> {
|
||||
let backups = group.list_backups(base_path)?;
|
||||
Ok(backups
|
||||
.into_iter()
|
||||
.filter(|item| !only_finished || item.is_finished())
|
||||
.max_by_key(|item| item.backup_dir.backup_time()))
|
||||
}
|
||||
|
||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||
if ascendending {
|
||||
// oldest first
|
||||
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
||||
} else {
|
||||
// newest first
|
||||
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn list_files(base_path: &Path, backup_dir: &BackupDir) -> Result<Vec<String>, Error> {
|
||||
let mut path = base_path.to_owned();
|
||||
path.push(backup_dir.relative_path());
|
||||
|
||||
let files = list_backup_files(libc::AT_FDCWD, &path)?;
|
||||
|
||||
Ok(files)
|
||||
}
|
||||
|
||||
pub fn list_backup_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
pbs_tools::fs::scandir(
|
||||
libc::AT_FDCWD,
|
||||
base_path,
|
||||
&BACKUP_TYPE_REGEX,
|
||||
|l0_fd, backup_type, file_type| {
|
||||
if file_type != nix::dir::Type::Directory {
|
||||
return Ok(());
|
||||
}
|
||||
pbs_tools::fs::scandir(
|
||||
l0_fd,
|
||||
backup_type,
|
||||
&BACKUP_ID_REGEX,
|
||||
|_, backup_id, file_type| {
|
||||
if file_type != nix::dir::Type::Directory {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
list.push(BackupGroup::new(backup_type, backup_id));
|
||||
|
||||
Ok(())
|
||||
},
|
||||
)
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(list)
|
||||
}
|
||||
|
||||
pub fn is_finished(&self) -> bool {
|
||||
// backup is considered unfinished if there is no manifest
|
||||
self.files
|
||||
.iter()
|
||||
.any(|name| name == super::MANIFEST_BLOB_NAME)
|
||||
}
|
||||
}
|
||||
|
||||
fn list_backup_files<P: ?Sized + nix::NixPath>(
|
||||
dirfd: RawFd,
|
||||
path: &P,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let mut files = vec![];
|
||||
|
||||
pbs_tools::fs::scandir(dirfd, path, &BACKUP_FILE_REGEX, |_, filename, file_type| {
|
||||
if file_type != nix::dir::Type::File {
|
||||
return Ok(());
|
||||
}
|
||||
files.push(filename.to_owned());
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
Ok(files)
|
||||
}
|
317
pbs-datastore/src/manifest.rs
Normal file
317
pbs-datastore/src/manifest.rs
Normal file
@ -0,0 +1,317 @@
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use std::convert::TryFrom;
|
||||
use std::path::Path;
|
||||
|
||||
use serde_json::{json, Value};
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::backup::{BackupDir, CryptMode, CryptConfig, Fingerprint};
|
||||
|
||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||
pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
|
||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||
pub const ENCRYPTED_KEY_BLOB_NAME: &str = "rsa-encrypted.key.blob";
|
||||
|
||||
mod hex_csum {
|
||||
use serde::{self, Deserialize, Serializer, Deserializer};
|
||||
|
||||
pub fn serialize<S>(
|
||||
csum: &[u8; 32],
|
||||
serializer: S,
|
||||
) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let s = proxmox::tools::digest_to_hex(csum);
|
||||
serializer.serialize_str(&s)
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(
|
||||
deserializer: D,
|
||||
) -> Result<[u8; 32], D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
proxmox::tools::hex_to_digest(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
fn crypt_mode_none() -> CryptMode { CryptMode::None }
|
||||
fn empty_value() -> Value { json!({}) }
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct FileInfo {
|
||||
pub filename: String,
|
||||
#[serde(default="crypt_mode_none")] // to be compatible with < 0.8.0 backups
|
||||
pub crypt_mode: CryptMode,
|
||||
pub size: u64,
|
||||
#[serde(with = "hex_csum")]
|
||||
pub csum: [u8; 32],
|
||||
}
|
||||
|
||||
impl FileInfo {
|
||||
|
||||
/// Return expected CryptMode of referenced chunks
|
||||
///
|
||||
/// Encrypted Indices should only reference encrypted chunks, while signed or plain indices
|
||||
/// should only reference plain chunks.
|
||||
pub fn chunk_crypt_mode (&self) -> CryptMode {
|
||||
match self.crypt_mode {
|
||||
CryptMode::Encrypt => CryptMode::Encrypt,
|
||||
CryptMode::SignOnly | CryptMode::None => CryptMode::None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[serde(rename_all="kebab-case")]
|
||||
pub struct BackupManifest {
|
||||
backup_type: String,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
files: Vec<FileInfo>,
|
||||
#[serde(default="empty_value")] // to be compatible with < 0.8.0 backups
|
||||
pub unprotected: Value,
|
||||
pub signature: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(PartialEq)]
|
||||
pub enum ArchiveType {
|
||||
FixedIndex,
|
||||
DynamicIndex,
|
||||
Blob,
|
||||
}
|
||||
|
||||
pub fn archive_type<P: AsRef<Path>>(
|
||||
archive_name: P,
|
||||
) -> Result<ArchiveType, Error> {
|
||||
|
||||
let archive_name = archive_name.as_ref();
|
||||
let archive_type = match archive_name.extension().and_then(|ext| ext.to_str()) {
|
||||
Some("didx") => ArchiveType::DynamicIndex,
|
||||
Some("fidx") => ArchiveType::FixedIndex,
|
||||
Some("blob") => ArchiveType::Blob,
|
||||
_ => bail!("unknown archive type: {:?}", archive_name),
|
||||
};
|
||||
Ok(archive_type)
|
||||
}
|
||||
|
||||
|
||||
impl BackupManifest {
|
||||
|
||||
pub fn new(snapshot: BackupDir) -> Self {
|
||||
Self {
|
||||
backup_type: snapshot.group().backup_type().into(),
|
||||
backup_id: snapshot.group().backup_id().into(),
|
||||
backup_time: snapshot.backup_time(),
|
||||
files: Vec::new(),
|
||||
unprotected: json!({}),
|
||||
signature: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_file(&mut self, filename: String, size: u64, csum: [u8; 32], crypt_mode: CryptMode) -> Result<(), Error> {
|
||||
let _archive_type = archive_type(&filename)?; // check type
|
||||
self.files.push(FileInfo { filename, size, csum, crypt_mode });
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn files(&self) -> &[FileInfo] {
|
||||
&self.files[..]
|
||||
}
|
||||
|
||||
pub fn lookup_file_info(&self, name: &str) -> Result<&FileInfo, Error> {
|
||||
|
||||
let info = self.files.iter().find(|item| item.filename == name);
|
||||
|
||||
match info {
|
||||
None => bail!("manifest does not contain file '{}'", name),
|
||||
Some(info) => Ok(info),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_file(&self, name: &str, csum: &[u8; 32], size: u64) -> Result<(), Error> {
|
||||
|
||||
let info = self.lookup_file_info(name)?;
|
||||
|
||||
if size != info.size {
|
||||
bail!("wrong size for file '{}' ({} != {})", name, info.size, size);
|
||||
}
|
||||
|
||||
if csum != &info.csum {
|
||||
bail!("wrong checksum for file '{}'", name);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Generate canonical json
|
||||
fn to_canonical_json(value: &Value) -> Result<Vec<u8>, Error> {
|
||||
crate::tools::json::to_canonical_json(value)
|
||||
}
|
||||
|
||||
/// Compute manifest signature
|
||||
///
|
||||
/// By generating a HMAC SHA256 over the canonical json
|
||||
/// representation, The 'unpreotected' property is excluded.
|
||||
pub fn signature(&self, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
Self::json_signature(&serde_json::to_value(&self)?, crypt_config)
|
||||
}
|
||||
|
||||
fn json_signature(data: &Value, crypt_config: &CryptConfig) -> Result<[u8; 32], Error> {
|
||||
|
||||
let mut signed_data = data.clone();
|
||||
|
||||
signed_data.as_object_mut().unwrap().remove("unprotected"); // exclude
|
||||
signed_data.as_object_mut().unwrap().remove("signature"); // exclude
|
||||
|
||||
let canonical = Self::to_canonical_json(&signed_data)?;
|
||||
|
||||
let sig = crypt_config.compute_auth_tag(&canonical);
|
||||
|
||||
Ok(sig)
|
||||
}
|
||||
|
||||
/// Converts the Manifest into json string, and add a signature if there is a crypt_config.
|
||||
pub fn to_string(&self, crypt_config: Option<&CryptConfig>) -> Result<String, Error> {
|
||||
|
||||
let mut manifest = serde_json::to_value(&self)?;
|
||||
|
||||
if let Some(crypt_config) = crypt_config {
|
||||
let sig = self.signature(crypt_config)?;
|
||||
manifest["signature"] = proxmox::tools::digest_to_hex(&sig).into();
|
||||
let fingerprint = &crypt_config.fingerprint();
|
||||
manifest["unprotected"]["key-fingerprint"] = serde_json::to_value(fingerprint)?;
|
||||
}
|
||||
|
||||
let manifest = serde_json::to_string_pretty(&manifest).unwrap();
|
||||
Ok(manifest)
|
||||
}
|
||||
|
||||
pub fn fingerprint(&self) -> Result<Option<Fingerprint>, Error> {
|
||||
match &self.unprotected["key-fingerprint"] {
|
||||
Value::Null => Ok(None),
|
||||
value => Ok(Some(serde_json::from_value(value.clone())?))
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a BackupManifest and a CryptConfig share a valid fingerprint combination.
|
||||
///
|
||||
/// An unsigned manifest is valid with any or no CryptConfig.
|
||||
/// A signed manifest is only valid with a matching CryptConfig.
|
||||
pub fn check_fingerprint(&self, crypt_config: Option<&CryptConfig>) -> Result<(), Error> {
|
||||
if let Some(fingerprint) = self.fingerprint()? {
|
||||
match crypt_config {
|
||||
None => bail!(
|
||||
"missing key - manifest was created with key {}",
|
||||
fingerprint,
|
||||
),
|
||||
Some(crypt_config) => {
|
||||
let config_fp = crypt_config.fingerprint();
|
||||
if config_fp != fingerprint {
|
||||
bail!(
|
||||
"wrong key - manifest's key {} does not match provided key {}",
|
||||
fingerprint,
|
||||
config_fp
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Try to read the manifest. This verifies the signature if there is a crypt_config.
|
||||
pub fn from_data(data: &[u8], crypt_config: Option<&CryptConfig>) -> Result<BackupManifest, Error> {
|
||||
let json: Value = serde_json::from_slice(data)?;
|
||||
let signature = json["signature"].as_str().map(String::from);
|
||||
|
||||
if let Some(ref crypt_config) = crypt_config {
|
||||
if let Some(signature) = signature {
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&Self::json_signature(&json, crypt_config)?);
|
||||
|
||||
let fingerprint = &json["unprotected"]["key-fingerprint"];
|
||||
if fingerprint != &Value::Null {
|
||||
let fingerprint = serde_json::from_value(fingerprint.clone())?;
|
||||
let config_fp = crypt_config.fingerprint();
|
||||
if config_fp != fingerprint {
|
||||
bail!(
|
||||
"wrong key - unable to verify signature since manifest's key {} does not match provided key {}",
|
||||
fingerprint,
|
||||
config_fp
|
||||
);
|
||||
}
|
||||
}
|
||||
if signature != expected_signature {
|
||||
bail!("wrong signature in manifest");
|
||||
}
|
||||
} else {
|
||||
// not signed: warn/fail?
|
||||
}
|
||||
}
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl TryFrom<super::DataBlob> for BackupManifest {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(blob: super::DataBlob) -> Result<Self, Error> {
|
||||
// no expected digest available
|
||||
let data = blob.decode(None, None)
|
||||
.map_err(|err| format_err!("decode backup manifest blob failed - {}", err))?;
|
||||
let json: Value = serde_json::from_slice(&data[..])
|
||||
.map_err(|err| format_err!("unable to parse backup manifest json - {}", err))?;
|
||||
let manifest: BackupManifest = serde_json::from_value(json)?;
|
||||
Ok(manifest)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_manifest_signature() -> Result<(), Error> {
|
||||
|
||||
use crate::backup::{KeyDerivationConfig};
|
||||
|
||||
let pw = b"test";
|
||||
|
||||
let kdf = KeyDerivationConfig::Scrypt {
|
||||
n: 65536,
|
||||
r: 8,
|
||||
p: 1,
|
||||
salt: Vec::new(),
|
||||
};
|
||||
|
||||
let testkey = kdf.derive_key(pw)?;
|
||||
|
||||
let crypt_config = CryptConfig::new(testkey)?;
|
||||
|
||||
let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
|
||||
|
||||
let mut manifest = BackupManifest::new(snapshot);
|
||||
|
||||
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
||||
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
||||
|
||||
manifest.unprotected["note"] = "This is not protected by the signature.".into();
|
||||
|
||||
let text = manifest.to_string(Some(&crypt_config))?;
|
||||
|
||||
let manifest: Value = serde_json::from_str(&text)?;
|
||||
let signature = manifest["signature"].as_str().unwrap().to_string();
|
||||
|
||||
assert_eq!(signature, "d7b446fb7db081662081d4b40fedd858a1d6307a5aff4ecff7d5bf4fd35679e9");
|
||||
|
||||
let manifest: BackupManifest = serde_json::from_value(manifest)?;
|
||||
let expected_signature = proxmox::tools::digest_to_hex(&manifest.signature(&crypt_config)?);
|
||||
|
||||
assert_eq!(signature, expected_signature);
|
||||
|
||||
Ok(())
|
||||
}
|
Reference in New Issue
Block a user