make datastore BackupGroup/Dir ctors private
And use the api-types for their contents. These are supposed to be instances for a datastore, the pure specifications are the ones in pbs_api_types which should be preferred in crates like clients which do not need to deal with the datastore directly. Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
parent
38aa71fcc8
commit
db87d93efc
@ -15,7 +15,6 @@ use proxmox_schema::*;
|
|||||||
use proxmox_sys::fs::file_get_json;
|
use proxmox_sys::fs::file_get_json;
|
||||||
|
|
||||||
use pbs_api_types::{Authid, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL};
|
use pbs_api_types::{Authid, RateLimitConfig, UserWithTokens, BACKUP_REPO_URL};
|
||||||
use pbs_datastore::BackupDir;
|
|
||||||
use pbs_tools::json::json_object_to_query;
|
use pbs_tools::json::json_object_to_query;
|
||||||
|
|
||||||
use crate::{BackupRepository, HttpClient, HttpClientOptions};
|
use crate::{BackupRepository, HttpClient, HttpClientOptions};
|
||||||
@ -258,24 +257,15 @@ pub async fn complete_backup_snapshot_do(param: &HashMap<String, String>) -> Vec
|
|||||||
|
|
||||||
let data = try_get(&repo, &path).await;
|
let data = try_get(&repo, &path).await;
|
||||||
|
|
||||||
if let Some(list) = data.as_array() {
|
if let Value::Array(list) = data {
|
||||||
for item in list {
|
for item in list {
|
||||||
if let (Some(backup_id), Some(backup_type), Some(backup_time)) = (
|
match serde_json::from_value::<pbs_api_types::BackupDir>(item) {
|
||||||
item["backup-id"].as_str(),
|
Ok(item) => result.push(item.to_string()),
|
||||||
item["backup-type"].as_str(),
|
Err(_) => {
|
||||||
item["backup-time"].as_i64(),
|
// FIXME: print error in completion?
|
||||||
) {
|
continue;
|
||||||
let backup_type = match backup_type.parse() {
|
|
||||||
Ok(ty) => ty,
|
|
||||||
Err(_) => {
|
|
||||||
// FIXME: print error in completion?
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if let Ok(snapshot) = BackupDir::new(backup_type, backup_id, backup_time) {
|
|
||||||
result.push(snapshot.relative_path().to_str().unwrap().to_owned());
|
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -294,7 +284,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
|||||||
_ => return result,
|
_ => return result,
|
||||||
};
|
};
|
||||||
|
|
||||||
let snapshot: BackupDir = match param.get("snapshot") {
|
let snapshot: pbs_api_types::BackupDir = match param.get("snapshot") {
|
||||||
Some(path) => match path.parse() {
|
Some(path) => match path.parse() {
|
||||||
Ok(v) => v,
|
Ok(v) => v,
|
||||||
_ => return result,
|
_ => return result,
|
||||||
@ -303,9 +293,9 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
|||||||
};
|
};
|
||||||
|
|
||||||
let query = json_object_to_query(json!({
|
let query = json_object_to_query(json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group.ty,
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group.id,
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.time,
|
||||||
}))
|
}))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
@ -1,40 +1,21 @@
|
|||||||
use std::os::unix::io::RawFd;
|
use std::os::unix::io::RawFd;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, Error};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX};
|
||||||
BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, GROUP_PATH_REGEX,
|
|
||||||
SNAPSHOT_PATH_REGEX,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::manifest::MANIFEST_BLOB_NAME;
|
use super::manifest::MANIFEST_BLOB_NAME;
|
||||||
|
|
||||||
/// BackupGroup is a directory containing a list of BackupDir
|
/// BackupGroup is a directory containing a list of BackupDir
|
||||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||||
pub struct BackupGroup {
|
pub struct BackupGroup {
|
||||||
/// Type of backup
|
group: pbs_api_types::BackupGroup,
|
||||||
backup_type: BackupType,
|
|
||||||
/// Unique (for this type) ID
|
|
||||||
backup_id: String,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::cmp::Ord for BackupGroup {
|
impl std::cmp::Ord for BackupGroup {
|
||||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||||
let type_order = self.backup_type.cmp(&other.backup_type);
|
self.group.cmp(&other.group)
|
||||||
if type_order != std::cmp::Ordering::Equal {
|
|
||||||
return type_order;
|
|
||||||
}
|
|
||||||
// try to compare IDs numerically
|
|
||||||
let id_self = self.backup_id.parse::<u64>();
|
|
||||||
let id_other = other.backup_id.parse::<u64>();
|
|
||||||
match (id_self, id_other) {
|
|
||||||
(Ok(id_self), Ok(id_other)) => id_self.cmp(&id_other),
|
|
||||||
(Ok(_), Err(_)) => std::cmp::Ordering::Less,
|
|
||||||
(Err(_), Ok(_)) => std::cmp::Ordering::Greater,
|
|
||||||
_ => self.backup_id.cmp(&other.backup_id),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,29 +26,22 @@ impl std::cmp::PartialOrd for BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupGroup {
|
impl BackupGroup {
|
||||||
pub fn new<T: Into<String>>(backup_type: BackupType, backup_id: T) -> Self {
|
pub(crate) fn new<T: Into<String>>(backup_type: BackupType, backup_id: T) -> Self {
|
||||||
Self {
|
Self {
|
||||||
backup_type,
|
group: (backup_type, backup_id.into()).into(),
|
||||||
backup_id: backup_id.into(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_type(&self) -> BackupType {
|
pub fn backup_type(&self) -> BackupType {
|
||||||
self.backup_type
|
self.group.ty
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_id(&self) -> &str {
|
pub fn backup_id(&self) -> &str {
|
||||||
&self.backup_id
|
&self.group.id
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn relative_group_path(&self) -> PathBuf {
|
pub fn relative_group_path(&self) -> PathBuf {
|
||||||
let mut relative_path = PathBuf::new();
|
self.group.to_string().into()
|
||||||
|
|
||||||
relative_path.push(self.backup_type.as_str());
|
|
||||||
|
|
||||||
relative_path.push(&self.backup_id);
|
|
||||||
|
|
||||||
relative_path
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
pub fn list_backups(&self, base_path: &Path) -> Result<Vec<BackupInfo>, Error> {
|
||||||
@ -85,8 +59,7 @@ impl BackupGroup {
|
|||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let backup_dir =
|
let backup_dir = self.backup_dir_with_rfc3339(backup_time)?;
|
||||||
BackupDir::with_rfc3339(self.backup_type, &self.backup_id, backup_time)?;
|
|
||||||
let files = list_backup_files(l2_fd, backup_time)?;
|
let files = list_backup_files(l2_fd, backup_time)?;
|
||||||
|
|
||||||
let protected = backup_dir.is_protected(base_path.to_owned());
|
let protected = backup_dir.is_protected(base_path.to_owned());
|
||||||
@ -171,26 +144,37 @@ impl BackupGroup {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn matches(&self, filter: &GroupFilter) -> bool {
|
pub fn matches(&self, filter: &GroupFilter) -> bool {
|
||||||
match filter {
|
self.group.matches(filter)
|
||||||
GroupFilter::Group(backup_group) => match BackupGroup::from_str(backup_group) {
|
}
|
||||||
Ok(group) => &group == self,
|
|
||||||
Err(_) => false, // shouldn't happen if value is schema-checked
|
pub fn backup_dir(&self, time: i64) -> Result<BackupDir, Error> {
|
||||||
},
|
BackupDir::with_group(self.clone(), time)
|
||||||
GroupFilter::BackupType(backup_type) => self.backup_type().as_str() == backup_type,
|
}
|
||||||
GroupFilter::Regex(regex) => regex.is_match(&self.to_string()),
|
|
||||||
}
|
pub fn backup_dir_with_rfc3339<T: Into<String>>(
|
||||||
|
&self,
|
||||||
|
time_string: T,
|
||||||
|
) -> Result<BackupDir, Error> {
|
||||||
|
BackupDir::with_rfc3339(self.clone(), time_string.into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<pbs_api_types::BackupGroup> for BackupGroup {
|
||||||
|
#[inline]
|
||||||
|
fn as_ref(&self) -> &pbs_api_types::BackupGroup {
|
||||||
|
&self.group
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<&BackupGroup> for pbs_api_types::BackupGroup {
|
impl From<&BackupGroup> for pbs_api_types::BackupGroup {
|
||||||
fn from(group: &BackupGroup) -> pbs_api_types::BackupGroup {
|
fn from(group: &BackupGroup) -> pbs_api_types::BackupGroup {
|
||||||
(group.backup_type, group.backup_id.clone()).into()
|
group.group.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<BackupGroup> for pbs_api_types::BackupGroup {
|
impl From<BackupGroup> for pbs_api_types::BackupGroup {
|
||||||
fn from(group: BackupGroup) -> pbs_api_types::BackupGroup {
|
fn from(group: BackupGroup) -> pbs_api_types::BackupGroup {
|
||||||
(group.backup_type, group.backup_id).into()
|
group.group
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,21 +186,19 @@ impl std::fmt::Display for BackupGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::str::FromStr for BackupGroup {
|
impl From<BackupDir> for BackupGroup {
|
||||||
type Err = Error;
|
fn from(dir: BackupDir) -> BackupGroup {
|
||||||
|
BackupGroup {
|
||||||
|
group: dir.dir.group,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Parse a backup group path
|
impl From<&BackupDir> for BackupGroup {
|
||||||
///
|
fn from(dir: &BackupDir) -> BackupGroup {
|
||||||
/// This parses strings like `vm/100".
|
BackupGroup {
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
group: dir.dir.group.clone(),
|
||||||
let cap = GROUP_PATH_REGEX
|
}
|
||||||
.captures(path)
|
|
||||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
backup_type: cap.get(1).unwrap().as_str().parse()?,
|
|
||||||
backup_id: cap.get(2).unwrap().as_str().to_owned(),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,57 +207,53 @@ impl std::str::FromStr for BackupGroup {
|
|||||||
/// We also call this a backup snaphost.
|
/// We also call this a backup snaphost.
|
||||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||||
pub struct BackupDir {
|
pub struct BackupDir {
|
||||||
/// Backup group
|
dir: pbs_api_types::BackupDir,
|
||||||
group: BackupGroup,
|
|
||||||
/// Backup timestamp
|
|
||||||
backup_time: i64,
|
|
||||||
// backup_time as rfc3339
|
// backup_time as rfc3339
|
||||||
backup_time_string: String,
|
backup_time_string: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BackupDir {
|
impl BackupDir {
|
||||||
pub fn new<T>(backup_type: BackupType, backup_id: T, backup_time: i64) -> Result<Self, Error>
|
/// Temporarily used for tests.
|
||||||
where
|
#[doc(hidden)]
|
||||||
T: Into<String>,
|
pub fn new_test(dir: pbs_api_types::BackupDir) -> Self {
|
||||||
{
|
Self {
|
||||||
let group = BackupGroup::new(backup_type, backup_id.into());
|
backup_time_string: Self::backup_time_to_string(dir.time).unwrap(),
|
||||||
BackupDir::with_group(group, backup_time)
|
dir,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_rfc3339<T, U>(
|
pub(crate) fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
||||||
backup_type: BackupType,
|
|
||||||
backup_id: T,
|
|
||||||
backup_time_string: U,
|
|
||||||
) -> Result<Self, Error>
|
|
||||||
where
|
|
||||||
T: Into<String>,
|
|
||||||
U: Into<String>,
|
|
||||||
{
|
|
||||||
let backup_time_string = backup_time_string.into();
|
|
||||||
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
|
||||||
let group = BackupGroup::new(backup_type, backup_id.into());
|
|
||||||
Ok(Self {
|
|
||||||
group,
|
|
||||||
backup_time,
|
|
||||||
backup_time_string,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn with_group(group: BackupGroup, backup_time: i64) -> Result<Self, Error> {
|
|
||||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
group,
|
dir: (group.group, backup_time).into(),
|
||||||
backup_time,
|
|
||||||
backup_time_string,
|
backup_time_string,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn group(&self) -> &BackupGroup {
|
pub(crate) fn with_rfc3339(
|
||||||
&self.group
|
group: BackupGroup,
|
||||||
|
backup_time_string: String,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
||||||
|
Ok(Self {
|
||||||
|
dir: (group.group, backup_time).into(),
|
||||||
|
backup_time_string,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn backup_type(&self) -> BackupType {
|
||||||
|
self.dir.group.ty
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
pub fn backup_id(&self) -> &str {
|
||||||
|
&self.dir.group.id
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
pub fn backup_time(&self) -> i64 {
|
pub fn backup_time(&self) -> i64 {
|
||||||
self.backup_time
|
self.dir.time
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn backup_time_string(&self) -> &str {
|
pub fn backup_time_string(&self) -> &str {
|
||||||
@ -283,11 +261,15 @@ impl BackupDir {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn relative_path(&self) -> PathBuf {
|
pub fn relative_path(&self) -> PathBuf {
|
||||||
let mut relative_path = self.group.relative_group_path();
|
format!("{}/{}", self.dir.group, self.backup_time_string).into()
|
||||||
|
}
|
||||||
|
|
||||||
relative_path.push(self.backup_time_string.clone());
|
/// Returns the absolute path for backup_dir, using the cached formatted time string.
|
||||||
|
pub fn full_path(&self, mut base_path: PathBuf) -> PathBuf {
|
||||||
relative_path
|
base_path.push(self.dir.group.ty.as_str());
|
||||||
|
base_path.push(&self.dir.group.id);
|
||||||
|
base_path.push(&self.backup_time_string);
|
||||||
|
base_path
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn protected_file(&self, mut path: PathBuf) -> PathBuf {
|
pub fn protected_file(&self, mut path: PathBuf) -> PathBuf {
|
||||||
@ -307,46 +289,45 @@ impl BackupDir {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl AsRef<pbs_api_types::BackupDir> for BackupDir {
|
||||||
|
fn as_ref(&self) -> &pbs_api_types::BackupDir {
|
||||||
|
&self.dir
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AsRef<pbs_api_types::BackupGroup> for BackupDir {
|
||||||
|
fn as_ref(&self) -> &pbs_api_types::BackupGroup {
|
||||||
|
&self.dir.group
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<&BackupDir> for pbs_api_types::BackupGroup {
|
||||||
|
fn from(dir: &BackupDir) -> pbs_api_types::BackupGroup {
|
||||||
|
dir.dir.group.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<BackupDir> for pbs_api_types::BackupGroup {
|
||||||
|
fn from(dir: BackupDir) -> pbs_api_types::BackupGroup {
|
||||||
|
dir.dir.group.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl From<&BackupDir> for pbs_api_types::BackupDir {
|
impl From<&BackupDir> for pbs_api_types::BackupDir {
|
||||||
fn from(dir: &BackupDir) -> pbs_api_types::BackupDir {
|
fn from(dir: &BackupDir) -> pbs_api_types::BackupDir {
|
||||||
(
|
dir.dir.clone()
|
||||||
pbs_api_types::BackupGroup::from(dir.group.clone()),
|
|
||||||
dir.backup_time,
|
|
||||||
)
|
|
||||||
.into()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<BackupDir> for pbs_api_types::BackupDir {
|
impl From<BackupDir> for pbs_api_types::BackupDir {
|
||||||
fn from(dir: BackupDir) -> pbs_api_types::BackupDir {
|
fn from(dir: BackupDir) -> pbs_api_types::BackupDir {
|
||||||
(pbs_api_types::BackupGroup::from(dir.group), dir.backup_time).into()
|
dir.dir
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl std::str::FromStr for BackupDir {
|
|
||||||
type Err = Error;
|
|
||||||
|
|
||||||
/// Parse a snapshot path
|
|
||||||
///
|
|
||||||
/// This parses strings like `host/elsa/2020-06-15T05:18:33Z".
|
|
||||||
fn from_str(path: &str) -> Result<Self, Self::Err> {
|
|
||||||
let cap = SNAPSHOT_PATH_REGEX
|
|
||||||
.captures(path)
|
|
||||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
|
||||||
|
|
||||||
BackupDir::with_rfc3339(
|
|
||||||
cap.get(1).unwrap().as_str().parse()?,
|
|
||||||
cap.get(2).unwrap().as_str(),
|
|
||||||
cap.get(3).unwrap().as_str(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl std::fmt::Display for BackupDir {
|
impl std::fmt::Display for BackupDir {
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
let backup_type = self.group.backup_type();
|
write!(f, "{}/{}", self.dir.group, self.backup_time_string)
|
||||||
let id = self.group.backup_id();
|
|
||||||
write!(f, "{}/{}/{}", backup_type, id, self.backup_time_string)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,10 +360,10 @@ impl BackupInfo {
|
|||||||
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
pub fn sort_list(list: &mut Vec<BackupInfo>, ascendending: bool) {
|
||||||
if ascendending {
|
if ascendending {
|
||||||
// oldest first
|
// oldest first
|
||||||
list.sort_unstable_by(|a, b| a.backup_dir.backup_time.cmp(&b.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| a.backup_dir.dir.time.cmp(&b.backup_dir.dir.time));
|
||||||
} else {
|
} else {
|
||||||
// newest first
|
// newest first
|
||||||
list.sort_unstable_by(|a, b| b.backup_dir.backup_time.cmp(&a.backup_dir.backup_time));
|
list.sort_unstable_by(|a, b| b.backup_dir.dir.time.cmp(&a.backup_dir.dir.time));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,8 @@ use proxmox_sys::{task_log, task_warn};
|
|||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
|
Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
|
||||||
HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
|
HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, GROUP_PATH_REGEX,
|
||||||
|
SNAPSHOT_PATH_REGEX, UPID,
|
||||||
};
|
};
|
||||||
use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
|
use pbs_config::{open_backup_lockfile, BackupLockGuard, ConfigVersionCache};
|
||||||
|
|
||||||
@ -302,11 +303,19 @@ impl DataStore {
|
|||||||
/// Removes all files not mentioned in the manifest.
|
/// Removes all files not mentioned in the manifest.
|
||||||
pub fn cleanup_backup_dir(
|
pub fn cleanup_backup_dir(
|
||||||
&self,
|
&self,
|
||||||
backup_dir: &BackupDir,
|
backup_dir: impl AsRef<pbs_api_types::BackupDir>,
|
||||||
|
manifest: &BackupManifest,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.cleanup_backup_dir_do(backup_dir.as_ref(), manifest)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cleanup_backup_dir_do(
|
||||||
|
&self,
|
||||||
|
backup_dir: &pbs_api_types::BackupDir,
|
||||||
manifest: &BackupManifest,
|
manifest: &BackupManifest,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_dir.relative_path());
|
full_path.push(backup_dir.to_string());
|
||||||
|
|
||||||
let mut wanted_files = HashSet::new();
|
let mut wanted_files = HashSet::new();
|
||||||
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
||||||
@ -339,23 +348,28 @@ impl DataStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the absolute path for a backup_group
|
/// Returns the absolute path for a backup_group
|
||||||
pub fn group_path(&self, backup_group: &BackupGroup) -> PathBuf {
|
pub fn group_path(&self, backup_group: &pbs_api_types::BackupGroup) -> PathBuf {
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_group.relative_group_path());
|
full_path.push(backup_group.to_string());
|
||||||
full_path
|
full_path
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the absolute path for backup_dir
|
/// Returns the absolute path for backup_dir
|
||||||
pub fn snapshot_path(&self, backup_dir: &BackupDir) -> PathBuf {
|
pub fn snapshot_path(&self, backup_dir: &pbs_api_types::BackupDir) -> PathBuf {
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_dir.relative_path());
|
full_path.push(backup_dir.to_string());
|
||||||
full_path
|
full_path
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Remove a complete backup group including all snapshots, returns true
|
/// Remove a complete backup group including all snapshots, returns true
|
||||||
/// if all snapshots were removed, and false if some were protected
|
/// if all snapshots were removed, and false if some were protected
|
||||||
pub fn remove_backup_group(&self, backup_group: &BackupGroup) -> Result<bool, Error> {
|
pub fn remove_backup_group(
|
||||||
let full_path = self.group_path(backup_group);
|
&self,
|
||||||
|
backup_group: &pbs_api_types::BackupGroup,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
|
let backup_group = self.backup_group_from_spec(backup_group.clone());
|
||||||
|
|
||||||
|
let full_path = self.group_path(backup_group.as_ref());
|
||||||
|
|
||||||
let _guard = proxmox_sys::fs::lock_dir_noblock(
|
let _guard = proxmox_sys::fs::lock_dir_noblock(
|
||||||
&full_path,
|
&full_path,
|
||||||
@ -373,7 +387,7 @@ impl DataStore {
|
|||||||
removed_all = false;
|
removed_all = false;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
self.remove_backup_dir(&snap.backup_dir, false)?;
|
self.remove_backup_dir(snap.backup_dir.as_ref(), false)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if removed_all {
|
if removed_all {
|
||||||
@ -391,13 +405,19 @@ impl DataStore {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Remove a backup directory including all content
|
/// Remove a backup directory including all content
|
||||||
pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
|
pub fn remove_backup_dir(
|
||||||
let full_path = self.snapshot_path(backup_dir);
|
&self,
|
||||||
|
backup_dir: &pbs_api_types::BackupDir,
|
||||||
|
force: bool,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let backup_dir = self.backup_dir_from_spec(backup_dir.clone())?;
|
||||||
|
|
||||||
|
let full_path = backup_dir.full_path(self.base_path());
|
||||||
|
|
||||||
let (_guard, _manifest_guard);
|
let (_guard, _manifest_guard);
|
||||||
if !force {
|
if !force {
|
||||||
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
_guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
||||||
_manifest_guard = self.lock_manifest(backup_dir)?;
|
_manifest_guard = self.lock_manifest(&backup_dir)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if backup_dir.is_protected(self.base_path()) {
|
if backup_dir.is_protected(self.base_path()) {
|
||||||
@ -410,7 +430,7 @@ impl DataStore {
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
// the manifest does not exists anymore, we do not need to keep the lock
|
// the manifest does not exists anymore, we do not need to keep the lock
|
||||||
if let Ok(path) = self.manifest_lock_path(backup_dir) {
|
if let Ok(path) = self.manifest_lock_path(&backup_dir) {
|
||||||
// ignore errors
|
// ignore errors
|
||||||
let _ = std::fs::remove_file(path);
|
let _ = std::fs::remove_file(path);
|
||||||
}
|
}
|
||||||
@ -421,7 +441,12 @@ impl DataStore {
|
|||||||
/// Returns the time of the last successful backup
|
/// Returns the time of the last successful backup
|
||||||
///
|
///
|
||||||
/// Or None if there is no backup in the group (or the group dir does not exist).
|
/// Or None if there is no backup in the group (or the group dir does not exist).
|
||||||
pub fn last_successful_backup(&self, backup_group: &BackupGroup) -> Result<Option<i64>, Error> {
|
pub fn last_successful_backup(
|
||||||
|
&self,
|
||||||
|
backup_group: &pbs_api_types::BackupGroup,
|
||||||
|
) -> Result<Option<i64>, Error> {
|
||||||
|
let backup_group = self.backup_group_from_spec(backup_group.clone());
|
||||||
|
|
||||||
let base_path = self.base_path();
|
let base_path = self.base_path();
|
||||||
let mut group_path = base_path.clone();
|
let mut group_path = base_path.clone();
|
||||||
group_path.push(backup_group.relative_group_path());
|
group_path.push(backup_group.relative_group_path());
|
||||||
@ -436,15 +461,19 @@ impl DataStore {
|
|||||||
/// Returns the backup owner.
|
/// Returns the backup owner.
|
||||||
///
|
///
|
||||||
/// The backup owner is the entity who first created the backup group.
|
/// The backup owner is the entity who first created the backup group.
|
||||||
pub fn get_owner(&self, backup_group: &BackupGroup) -> Result<Authid, Error> {
|
pub fn get_owner(&self, backup_group: &pbs_api_types::BackupGroup) -> Result<Authid, Error> {
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_group.relative_group_path());
|
full_path.push(backup_group.to_string());
|
||||||
full_path.push("owner");
|
full_path.push("owner");
|
||||||
let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
|
let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
|
||||||
owner.trim_end().parse() // remove trailing newline
|
owner.trim_end().parse() // remove trailing newline
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn owns_backup(&self, backup_group: &BackupGroup, auth_id: &Authid) -> Result<bool, Error> {
|
pub fn owns_backup(
|
||||||
|
&self,
|
||||||
|
backup_group: &pbs_api_types::BackupGroup,
|
||||||
|
auth_id: &Authid,
|
||||||
|
) -> Result<bool, Error> {
|
||||||
let owner = self.get_owner(backup_group)?;
|
let owner = self.get_owner(backup_group)?;
|
||||||
|
|
||||||
Ok(check_backup_owner(&owner, auth_id).is_ok())
|
Ok(check_backup_owner(&owner, auth_id).is_ok())
|
||||||
@ -453,12 +482,12 @@ impl DataStore {
|
|||||||
/// Set the backup owner.
|
/// Set the backup owner.
|
||||||
pub fn set_owner(
|
pub fn set_owner(
|
||||||
&self,
|
&self,
|
||||||
backup_group: &BackupGroup,
|
backup_group: &pbs_api_types::BackupGroup,
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
force: bool,
|
force: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let mut path = self.base_path();
|
let mut path = self.base_path();
|
||||||
path.push(backup_group.relative_group_path());
|
path.push(backup_group.to_string());
|
||||||
path.push("owner");
|
path.push("owner");
|
||||||
|
|
||||||
let mut open_options = std::fs::OpenOptions::new();
|
let mut open_options = std::fs::OpenOptions::new();
|
||||||
@ -489,15 +518,15 @@ impl DataStore {
|
|||||||
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
||||||
pub fn create_locked_backup_group(
|
pub fn create_locked_backup_group(
|
||||||
&self,
|
&self,
|
||||||
backup_group: &BackupGroup,
|
backup_group: &pbs_api_types::BackupGroup,
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
) -> Result<(Authid, DirLockGuard), Error> {
|
) -> Result<(Authid, DirLockGuard), Error> {
|
||||||
// create intermediate path first:
|
// create intermediate path first:
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(backup_group.backup_type().as_str());
|
full_path.push(backup_group.ty.as_str());
|
||||||
std::fs::create_dir_all(&full_path)?;
|
std::fs::create_dir_all(&full_path)?;
|
||||||
|
|
||||||
full_path.push(backup_group.backup_id());
|
full_path.push(&backup_group.id);
|
||||||
|
|
||||||
// create the last component now
|
// create the last component now
|
||||||
match std::fs::create_dir(&full_path) {
|
match std::fs::create_dir(&full_path) {
|
||||||
@ -529,9 +558,9 @@ impl DataStore {
|
|||||||
/// The BackupGroup directory needs to exist.
|
/// The BackupGroup directory needs to exist.
|
||||||
pub fn create_locked_backup_dir(
|
pub fn create_locked_backup_dir(
|
||||||
&self,
|
&self,
|
||||||
backup_dir: &BackupDir,
|
backup_dir: &pbs_api_types::BackupDir,
|
||||||
) -> Result<(PathBuf, bool, DirLockGuard), Error> {
|
) -> Result<(PathBuf, bool, DirLockGuard), Error> {
|
||||||
let relative_path = backup_dir.relative_path();
|
let relative_path = PathBuf::from(backup_dir.to_string());
|
||||||
let mut full_path = self.base_path();
|
let mut full_path = self.base_path();
|
||||||
full_path.push(&relative_path);
|
full_path.push(&relative_path);
|
||||||
|
|
||||||
@ -699,7 +728,7 @@ impl DataStore {
|
|||||||
if let Some(backup_dir_path) = img.parent() {
|
if let Some(backup_dir_path) = img.parent() {
|
||||||
let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
|
let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
|
||||||
if let Some(backup_dir_str) = backup_dir_path.to_str() {
|
if let Some(backup_dir_str) = backup_dir_path.to_str() {
|
||||||
if BackupDir::from_str(backup_dir_str).is_err() {
|
if pbs_api_types::BackupDir::from_str(backup_dir_str).is_err() {
|
||||||
strange_paths_count += 1;
|
strange_paths_count += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -933,8 +962,8 @@ impl DataStore {
|
|||||||
let mut path = format!(
|
let mut path = format!(
|
||||||
"/run/proxmox-backup/locks/{}/{}/{}",
|
"/run/proxmox-backup/locks/{}/{}/{}",
|
||||||
self.name(),
|
self.name(),
|
||||||
backup_dir.group().backup_type(),
|
backup_dir.backup_type(),
|
||||||
backup_dir.group().backup_id(),
|
backup_dir.backup_id(),
|
||||||
);
|
);
|
||||||
std::fs::create_dir_all(&path)?;
|
std::fs::create_dir_all(&path)?;
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
@ -994,7 +1023,7 @@ impl DataStore {
|
|||||||
|
|
||||||
/// Updates the protection status of the specified snapshot.
|
/// Updates the protection status of the specified snapshot.
|
||||||
pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> {
|
pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> {
|
||||||
let full_path = self.snapshot_path(backup_dir);
|
let full_path = backup_dir.full_path(self.base_path());
|
||||||
|
|
||||||
let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
||||||
|
|
||||||
@ -1063,6 +1092,70 @@ impl DataStore {
|
|||||||
|
|
||||||
Ok(chunk_list)
|
Ok(chunk_list)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn backup_group_from_spec(&self, group: pbs_api_types::BackupGroup) -> BackupGroup {
|
||||||
|
BackupGroup::new(group.ty, group.id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_dir_from_spec(&self, dir: pbs_api_types::BackupDir) -> Result<BackupDir, Error> {
|
||||||
|
BackupDir::with_group(self.backup_group_from_spec(dir.group), dir.time)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_dir_from_parts<T>(
|
||||||
|
&self,
|
||||||
|
ty: BackupType,
|
||||||
|
id: T,
|
||||||
|
time: i64,
|
||||||
|
) -> Result<BackupDir, Error>
|
||||||
|
where
|
||||||
|
T: Into<String>,
|
||||||
|
{
|
||||||
|
self.backup_dir_from_spec((ty, id.into(), time).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_group<T>(&self, ty: BackupType, id: T) -> BackupGroup
|
||||||
|
where
|
||||||
|
T: Into<String>,
|
||||||
|
{
|
||||||
|
BackupGroup::new(ty, id.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_group_from_path(&self, path: &str) -> Result<BackupGroup, Error> {
|
||||||
|
let cap = GROUP_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
|
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||||
|
|
||||||
|
Ok(self.backup_group(
|
||||||
|
cap.get(1).unwrap().as_str().parse()?,
|
||||||
|
cap.get(2).unwrap().as_str().to_owned(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_dir(&self, group: BackupGroup, time: i64) -> Result<BackupDir, Error> {
|
||||||
|
BackupDir::with_group(group, time)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_dir_with_rfc3339<T: Into<String>>(
|
||||||
|
&self,
|
||||||
|
group: BackupGroup,
|
||||||
|
time_string: T,
|
||||||
|
) -> Result<BackupDir, Error> {
|
||||||
|
BackupDir::with_rfc3339(group, time_string.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn backup_dir_from_path(&self, path: &str) -> Result<BackupDir, Error> {
|
||||||
|
let cap = SNAPSHOT_PATH_REGEX
|
||||||
|
.captures(path)
|
||||||
|
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||||
|
|
||||||
|
BackupDir::with_rfc3339(
|
||||||
|
BackupGroup::new(
|
||||||
|
cap.get(1).unwrap().as_str().parse()?,
|
||||||
|
cap.get(2).unwrap().as_str().to_owned(),
|
||||||
|
),
|
||||||
|
cap.get(3).unwrap().as_str().to_owned(),
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
|
/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
|
||||||
|
@ -9,8 +9,6 @@ use serde_json::{json, Value};
|
|||||||
use pbs_api_types::{BackupType, CryptMode, Fingerprint};
|
use pbs_api_types::{BackupType, CryptMode, Fingerprint};
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
|
|
||||||
use crate::BackupDir;
|
|
||||||
|
|
||||||
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
pub const MANIFEST_BLOB_NAME: &str = "index.json.blob";
|
||||||
pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
|
pub const MANIFEST_LOCK_NAME: &str = ".index.json.lck";
|
||||||
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
pub const CLIENT_LOG_BLOB_NAME: &str = "client.log.blob";
|
||||||
@ -85,11 +83,11 @@ pub fn archive_type<P: AsRef<Path>>(archive_name: P) -> Result<ArchiveType, Erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BackupManifest {
|
impl BackupManifest {
|
||||||
pub fn new(snapshot: BackupDir) -> Self {
|
pub fn new(snapshot: pbs_api_types::BackupDir) -> Self {
|
||||||
Self {
|
Self {
|
||||||
backup_type: snapshot.group().backup_type(),
|
backup_type: snapshot.group.ty,
|
||||||
backup_id: snapshot.group().backup_id().into(),
|
backup_id: snapshot.group.id.into(),
|
||||||
backup_time: snapshot.backup_time(),
|
backup_time: snapshot.time,
|
||||||
files: Vec::new(),
|
files: Vec::new(),
|
||||||
unprotected: json!({}),
|
unprotected: json!({}),
|
||||||
signature: None,
|
signature: None,
|
||||||
@ -284,9 +282,7 @@ fn test_manifest_signature() -> Result<(), Error> {
|
|||||||
|
|
||||||
let crypt_config = CryptConfig::new(testkey)?;
|
let crypt_config = CryptConfig::new(testkey)?;
|
||||||
|
|
||||||
let snapshot: BackupDir = "host/elsa/2020-06-26T13:56:05Z".parse()?;
|
let mut manifest = BackupManifest::new("host/elsa/2020-06-26T13:56:05Z".parse()?);
|
||||||
|
|
||||||
let mut manifest = BackupManifest::new(snapshot);
|
|
||||||
|
|
||||||
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
manifest.add_file("test1.img.fidx".into(), 200, [1u8; 32], CryptMode::Encrypt)?;
|
||||||
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
manifest.add_file("abc.blob".into(), 200, [2u8; 32], CryptMode::None)?;
|
||||||
|
@ -28,8 +28,13 @@ pub struct SnapshotReader {
|
|||||||
|
|
||||||
impl SnapshotReader {
|
impl SnapshotReader {
|
||||||
/// Lock snapshot, reads the manifest and returns a new instance
|
/// Lock snapshot, reads the manifest and returns a new instance
|
||||||
pub fn new(datastore: Arc<DataStore>, snapshot: BackupDir) -> Result<Self, Error> {
|
pub fn new(
|
||||||
let snapshot_path = datastore.snapshot_path(&snapshot);
|
datastore: Arc<DataStore>,
|
||||||
|
snapshot: pbs_api_types::BackupDir,
|
||||||
|
) -> Result<Self, Error> {
|
||||||
|
let snapshot = datastore.backup_dir_from_spec(snapshot)?;
|
||||||
|
|
||||||
|
let snapshot_path = snapshot.full_path(datastore.base_path());
|
||||||
|
|
||||||
let locked_dir =
|
let locked_dir =
|
||||||
lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
|
lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
|
||||||
|
@ -72,9 +72,9 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
|||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
snapshot.group().backup_type(),
|
snapshot.group.ty,
|
||||||
snapshot.group().backup_id(),
|
&snapshot.group.id,
|
||||||
snapshot.backup_time(),
|
snapshot.time,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -158,11 +158,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
|||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
} else {
|
} else {
|
||||||
let snapshot: BackupDir = path.parse()?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
(
|
(snapshot.group.ty, snapshot.group.id, snapshot.time)
|
||||||
snapshot.group().backup_type().to_owned(),
|
|
||||||
snapshot.group().backup_id().to_owned(),
|
|
||||||
snapshot.backup_time(),
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let crypto = crypto_parameters(¶m)?;
|
let crypto = crypto_parameters(¶m)?;
|
||||||
|
@ -22,9 +22,9 @@ use proxmox_time::{epoch_i64, strftime_local};
|
|||||||
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
|
||||||
|
|
||||||
use pbs_api_types::{
|
use pbs_api_types::{
|
||||||
Authid, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte, PruneListItem,
|
Authid, BackupDir, BackupGroup, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte,
|
||||||
PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA,
|
PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus,
|
||||||
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
|
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
|
||||||
TRAFFIC_CONTROL_RATE_SCHEMA,
|
TRAFFIC_CONTROL_RATE_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_client::catalog_shell::Shell;
|
use pbs_client::catalog_shell::Shell;
|
||||||
@ -46,7 +46,6 @@ use pbs_client::{
|
|||||||
BACKUP_SOURCE_SCHEMA,
|
BACKUP_SOURCE_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_config::key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig};
|
use pbs_config::key_config::{decrypt_key, rsa_encrypt_key_config, KeyConfig};
|
||||||
use pbs_datastore::backup_info::{BackupDir, BackupGroup};
|
|
||||||
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
|
use pbs_datastore::catalog::{BackupCatalogWriter, CatalogReader, CatalogWriter};
|
||||||
use pbs_datastore::chunk_store::verify_chunk_size;
|
use pbs_datastore::chunk_store::verify_chunk_size;
|
||||||
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader};
|
use pbs_datastore::dynamic_index::{BufferedDynamicReader, DynamicIndexReader};
|
||||||
@ -136,8 +135,8 @@ async fn api_datastore_list_snapshots(
|
|||||||
|
|
||||||
let mut args = json!({});
|
let mut args = json!({});
|
||||||
if let Some(group) = group {
|
if let Some(group) = group {
|
||||||
args["backup-type"] = group.backup_type().to_string().into();
|
args["backup-type"] = group.ty.to_string().into();
|
||||||
args["backup-id"] = group.backup_id().into();
|
args["backup-id"] = group.id.into();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut result = client.get(&path, Some(args)).await?;
|
let mut result = client.get(&path, Some(args)).await?;
|
||||||
@ -154,21 +153,12 @@ pub async fn api_datastore_latest_snapshot(
|
|||||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
||||||
|
|
||||||
if list.is_empty() {
|
if list.is_empty() {
|
||||||
bail!(
|
bail!("backup group {} does not contain any snapshots.", group);
|
||||||
"backup group {:?} does not contain any snapshots.",
|
|
||||||
group.relative_group_path()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
|
list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
|
||||||
|
|
||||||
let backup_time = list[0].backup.time;
|
Ok((group.ty, group.id, list[0].backup.time))
|
||||||
|
|
||||||
Ok((
|
|
||||||
group.backup_type().to_owned(),
|
|
||||||
group.backup_id().to_owned(),
|
|
||||||
backup_time,
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn backup_directory<P: AsRef<Path>>(
|
async fn backup_directory<P: AsRef<Path>>(
|
||||||
@ -263,13 +253,16 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
|||||||
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let group = BackupGroup::new(item.backup.ty, item.backup.id);
|
let group = BackupGroup::new(item.backup.ty, item.backup.id);
|
||||||
Ok(group.relative_group_path().to_str().unwrap().to_owned())
|
Ok(group.to_string())
|
||||||
};
|
};
|
||||||
|
|
||||||
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let snapshot = BackupDir::new(item.backup.ty, item.backup.id, item.last_backup)?;
|
let snapshot = BackupDir {
|
||||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
group: item.backup,
|
||||||
|
time: item.last_backup,
|
||||||
|
};
|
||||||
|
Ok(snapshot.to_string())
|
||||||
};
|
};
|
||||||
|
|
||||||
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
@ -330,8 +323,8 @@ async fn change_backup_owner(group: String, mut param: Value) -> Result<(), Erro
|
|||||||
|
|
||||||
let group: BackupGroup = group.parse()?;
|
let group: BackupGroup = group.parse()?;
|
||||||
|
|
||||||
param["backup-type"] = group.backup_type().to_string().into();
|
param["backup-type"] = group.ty.to_string().into();
|
||||||
param["backup-id"] = group.backup_id().into();
|
param["backup-id"] = group.id.into();
|
||||||
|
|
||||||
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/change-owner", repo.store());
|
||||||
client.post(&path, Some(param)).await?;
|
client.post(&path, Some(param)).await?;
|
||||||
@ -786,7 +779,7 @@ async fn create_backup(
|
|||||||
"Starting backup: {}/{}/{}",
|
"Starting backup: {}/{}/{}",
|
||||||
backup_type,
|
backup_type,
|
||||||
backup_id,
|
backup_id,
|
||||||
BackupDir::backup_time_to_string(backup_time)?
|
pbs_datastore::BackupDir::backup_time_to_string(backup_time)?
|
||||||
);
|
);
|
||||||
|
|
||||||
println!("Client name: {}", proxmox_sys::nodename());
|
println!("Client name: {}", proxmox_sys::nodename());
|
||||||
@ -880,7 +873,7 @@ async fn create_backup(
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
|
||||||
let mut manifest = BackupManifest::new(snapshot);
|
let mut manifest = BackupManifest::new(snapshot);
|
||||||
|
|
||||||
let mut catalog = None;
|
let mut catalog = None;
|
||||||
@ -1194,11 +1187,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
|||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
} else {
|
} else {
|
||||||
let snapshot: BackupDir = path.parse()?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
(
|
(snapshot.group.ty, snapshot.group.id, snapshot.time)
|
||||||
snapshot.group().backup_type().to_owned(),
|
|
||||||
snapshot.group().backup_id().to_owned(),
|
|
||||||
snapshot.backup_time(),
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let target = json::required_string_param(¶m, "target")?;
|
let target = json::required_string_param(¶m, "target")?;
|
||||||
@ -1415,8 +1404,8 @@ async fn prune(
|
|||||||
if let Some(dry_run) = dry_run {
|
if let Some(dry_run) = dry_run {
|
||||||
api_param["dry-run"] = dry_run.into();
|
api_param["dry-run"] = dry_run.into();
|
||||||
}
|
}
|
||||||
api_param["backup-type"] = group.backup_type().to_string().into();
|
api_param["backup-type"] = group.ty.to_string().into();
|
||||||
api_param["backup-id"] = group.backup_id().into();
|
api_param["backup-id"] = group.id.into();
|
||||||
|
|
||||||
let mut result = client.post(&path, Some(api_param)).await?;
|
let mut result = client.post(&path, Some(api_param)).await?;
|
||||||
|
|
||||||
@ -1424,8 +1413,7 @@ async fn prune(
|
|||||||
|
|
||||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
let item: PruneListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
|
Ok(item.backup.to_string())
|
||||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
|
let render_prune_action = |v: &Value, _record: &Value| -> Result<String, Error> {
|
||||||
|
@ -18,13 +18,13 @@ use proxmox_schema::*;
|
|||||||
use proxmox_sys::fd::Fd;
|
use proxmox_sys::fd::Fd;
|
||||||
use proxmox_sys::sortable;
|
use proxmox_sys::sortable;
|
||||||
|
|
||||||
|
use pbs_api_types::{BackupDir, BackupGroup};
|
||||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||||
use pbs_client::{BackupReader, RemoteChunkReader};
|
use pbs_client::{BackupReader, RemoteChunkReader};
|
||||||
use pbs_config::key_config::load_and_decrypt_key;
|
use pbs_config::key_config::load_and_decrypt_key;
|
||||||
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
|
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
|
||||||
use pbs_datastore::dynamic_index::BufferedDynamicReader;
|
use pbs_datastore::dynamic_index::BufferedDynamicReader;
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::{BackupDir, BackupGroup};
|
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
use pbs_tools::json::required_string_param;
|
use pbs_tools::json::required_string_param;
|
||||||
|
|
||||||
@ -204,11 +204,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
|||||||
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
api_datastore_latest_snapshot(&client, repo.store(), group).await?
|
||||||
} else {
|
} else {
|
||||||
let snapshot: BackupDir = path.parse()?;
|
let snapshot: BackupDir = path.parse()?;
|
||||||
(
|
(snapshot.group.ty, snapshot.group.id, snapshot.time)
|
||||||
snapshot.group().backup_type().to_owned(),
|
|
||||||
snapshot.group().backup_id().to_owned(),
|
|
||||||
snapshot.backup_time(),
|
|
||||||
)
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||||
|
@ -7,10 +7,10 @@ use proxmox_router::cli::*;
|
|||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
use proxmox_sys::fs::file_get_contents;
|
use proxmox_sys::fs::file_get_contents;
|
||||||
|
|
||||||
use pbs_api_types::{CryptMode, SnapshotListItem};
|
use pbs_api_types::{BackupGroup, CryptMode, SnapshotListItem};
|
||||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||||
use pbs_config::key_config::decrypt_key;
|
use pbs_config::key_config::decrypt_key;
|
||||||
use pbs_datastore::{BackupGroup, DataBlob};
|
use pbs_datastore::DataBlob;
|
||||||
use pbs_tools::crypt_config::CryptConfig;
|
use pbs_tools::crypt_config::CryptConfig;
|
||||||
use pbs_tools::json::required_string_param;
|
use pbs_tools::json::required_string_param;
|
||||||
|
|
||||||
@ -59,8 +59,7 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
|
|
||||||
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_snapshot_path = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
let item: SnapshotListItem = serde_json::from_value(record.to_owned())?;
|
||||||
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
|
Ok(item.backup.to_string())
|
||||||
Ok(snapshot.relative_path().to_str().unwrap().to_owned())
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
|
||||||
@ -126,9 +125,9 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
|
|||||||
.get(
|
.get(
|
||||||
&path,
|
&path,
|
||||||
Some(json!({
|
Some(json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group.ty,
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group.id,
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.time,
|
||||||
})),
|
})),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -175,9 +174,9 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
|
|||||||
.delete(
|
.delete(
|
||||||
&path,
|
&path,
|
||||||
Some(json!({
|
Some(json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group.ty,
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group.id,
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.time,
|
||||||
})),
|
})),
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -255,12 +254,7 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
|
|||||||
repo.store()
|
repo.store()
|
||||||
);
|
);
|
||||||
|
|
||||||
let args = json!({
|
let args = serde_json::to_value(&snapshot)?;
|
||||||
"backup-type": snapshot.group().backup_type(),
|
|
||||||
"backup-id": snapshot.group().backup_id(),
|
|
||||||
"backup-time": snapshot.backup_time(),
|
|
||||||
});
|
|
||||||
|
|
||||||
let body = hyper::Body::from(raw_data);
|
let body = hyper::Body::from(raw_data);
|
||||||
|
|
||||||
client
|
client
|
||||||
@ -297,9 +291,9 @@ async fn show_notes(param: Value) -> Result<Value, Error> {
|
|||||||
let path = format!("api2/json/admin/datastore/{}/notes", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/notes", repo.store());
|
||||||
|
|
||||||
let args = json!({
|
let args = json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group.ty,
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group.id,
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.time,
|
||||||
});
|
});
|
||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
@ -354,9 +348,9 @@ async fn update_notes(param: Value) -> Result<Value, Error> {
|
|||||||
let path = format!("api2/json/admin/datastore/{}/notes", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/notes", repo.store());
|
||||||
|
|
||||||
let args = json!({
|
let args = json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group.ty,
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group.id,
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.time,
|
||||||
"notes": notes,
|
"notes": notes,
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -394,9 +388,9 @@ async fn show_protection(param: Value) -> Result<(), Error> {
|
|||||||
let path = format!("api2/json/admin/datastore/{}/protected", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/protected", repo.store());
|
||||||
|
|
||||||
let args = json!({
|
let args = json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group.ty,
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group.id,
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.time,
|
||||||
});
|
});
|
||||||
|
|
||||||
let output_format = get_output_format(¶m);
|
let output_format = get_output_format(¶m);
|
||||||
@ -450,9 +444,9 @@ async fn update_protection(protected: bool, param: Value) -> Result<(), Error> {
|
|||||||
let path = format!("api2/json/admin/datastore/{}/protected", repo.store());
|
let path = format!("api2/json/admin/datastore/{}/protected", repo.store());
|
||||||
|
|
||||||
let args = json!({
|
let args = json!({
|
||||||
"backup-type": snapshot.group().backup_type(),
|
"backup-type": snapshot.group.ty,
|
||||||
"backup-id": snapshot.group().backup_id(),
|
"backup-id": snapshot.group.id,
|
||||||
"backup-time": snapshot.backup_time(),
|
"backup-time": snapshot.time,
|
||||||
"protected": protected,
|
"protected": protected,
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -11,8 +11,8 @@ use serde_json::{json, Value};
|
|||||||
use proxmox_router::cli::*;
|
use proxmox_router::cli::*;
|
||||||
use proxmox_schema::api;
|
use proxmox_schema::api;
|
||||||
|
|
||||||
|
use pbs_api_types::BackupDir;
|
||||||
use pbs_client::BackupRepository;
|
use pbs_client::BackupRepository;
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
|
||||||
use pbs_datastore::catalog::ArchiveEntry;
|
use pbs_datastore::catalog::ArchiveEntry;
|
||||||
use pbs_datastore::manifest::BackupManifest;
|
use pbs_datastore::manifest::BackupManifest;
|
||||||
|
|
||||||
|
@ -10,8 +10,8 @@ use serde_json::json;
|
|||||||
|
|
||||||
use proxmox_sys::fs::lock_file;
|
use proxmox_sys::fs::lock_file;
|
||||||
|
|
||||||
|
use pbs_api_types::BackupDir;
|
||||||
use pbs_client::{BackupRepository, VsockClient, DEFAULT_VSOCK_PORT};
|
use pbs_client::{BackupRepository, VsockClient, DEFAULT_VSOCK_PORT};
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
|
||||||
use pbs_datastore::catalog::ArchiveEntry;
|
use pbs_datastore::catalog::ArchiveEntry;
|
||||||
|
|
||||||
use super::block_driver::*;
|
use super::block_driver::*;
|
||||||
|
@ -16,7 +16,7 @@ use proxmox_sys::fs::{create_path, CreateOptions};
|
|||||||
use pxar::accessor::aio::Accessor;
|
use pxar::accessor::aio::Accessor;
|
||||||
use pxar::decoder::aio::Decoder;
|
use pxar::decoder::aio::Decoder;
|
||||||
|
|
||||||
use pbs_api_types::CryptMode;
|
use pbs_api_types::{BackupDir, CryptMode};
|
||||||
use pbs_client::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
|
use pbs_client::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
|
||||||
use pbs_client::tools::{
|
use pbs_client::tools::{
|
||||||
complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
|
complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
|
||||||
@ -28,7 +28,6 @@ use pbs_client::tools::{
|
|||||||
};
|
};
|
||||||
use pbs_client::{BackupReader, RemoteChunkReader};
|
use pbs_client::{BackupReader, RemoteChunkReader};
|
||||||
use pbs_config::key_config::decrypt_key;
|
use pbs_config::key_config::decrypt_key;
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
|
||||||
use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute};
|
use pbs_datastore::catalog::{ArchiveEntry, CatalogReader, DirEntryAttribute};
|
||||||
use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt};
|
use pbs_datastore::dynamic_index::{BufferedDynamicReader, LocalDynamicReadAt};
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
@ -169,9 +168,9 @@ async fn list(snapshot: String, path: String, base64: bool, param: Value) -> Res
|
|||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
snapshot.group().backup_type(),
|
snapshot.group.ty,
|
||||||
snapshot.group().backup_id(),
|
&snapshot.group.id,
|
||||||
snapshot.backup_time(),
|
snapshot.time,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
@ -346,9 +345,9 @@ async fn extract(
|
|||||||
client,
|
client,
|
||||||
crypt_config.clone(),
|
crypt_config.clone(),
|
||||||
repo.store(),
|
repo.store(),
|
||||||
snapshot.group().backup_type(),
|
snapshot.group.ty,
|
||||||
snapshot.group().backup_id(),
|
&snapshot.group.id,
|
||||||
snapshot.backup_time(),
|
snapshot.time,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -63,16 +63,16 @@ use crate::server::jobstate::Job;
|
|||||||
|
|
||||||
const GROUP_NOTES_FILE_NAME: &str = "notes";
|
const GROUP_NOTES_FILE_NAME: &str = "notes";
|
||||||
|
|
||||||
fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
|
fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf {
|
||||||
let mut note_path = store.base_path();
|
let mut note_path = store.base_path();
|
||||||
note_path.push(group.relative_group_path());
|
note_path.push(group.to_string());
|
||||||
note_path.push(GROUP_NOTES_FILE_NAME);
|
note_path.push(GROUP_NOTES_FILE_NAME);
|
||||||
note_path
|
note_path
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_priv_or_backup_owner(
|
fn check_priv_or_backup_owner(
|
||||||
store: &DataStore,
|
store: &DataStore,
|
||||||
group: &BackupGroup,
|
group: &pbs_api_types::BackupGroup,
|
||||||
auth_id: &Authid,
|
auth_id: &Authid,
|
||||||
required_privs: u64,
|
required_privs: u64,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
@ -170,7 +170,7 @@ pub fn list_groups(
|
|||||||
.iter_backup_groups()?
|
.iter_backup_groups()?
|
||||||
.try_fold(Vec::new(), |mut group_info, group| {
|
.try_fold(Vec::new(), |mut group_info, group| {
|
||||||
let group = group?;
|
let group = group?;
|
||||||
let owner = match datastore.get_owner(&group) {
|
let owner = match datastore.get_owner(group.as_ref()) {
|
||||||
Ok(auth_id) => auth_id,
|
Ok(auth_id) => auth_id,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let id = &store;
|
let id = &store;
|
||||||
@ -203,7 +203,7 @@ pub fn list_groups(
|
|||||||
})
|
})
|
||||||
.to_owned();
|
.to_owned();
|
||||||
|
|
||||||
let note_path = get_group_note_path(&datastore, &group);
|
let note_path = get_group_note_path(&datastore, group.as_ref());
|
||||||
let comment = file_read_firstline(¬e_path).ok();
|
let comment = file_read_firstline(¬e_path).ok();
|
||||||
|
|
||||||
group_info.push(GroupListItem {
|
group_info.push(GroupListItem {
|
||||||
@ -244,7 +244,7 @@ pub fn delete_group(
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let group = BackupGroup::new(backup_type, backup_id);
|
let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||||
@ -285,11 +285,11 @@ pub fn list_snapshot_files(
|
|||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let snapshot = datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
snapshot.group(),
|
snapshot.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
|
||||||
)?;
|
)?;
|
||||||
@ -328,17 +328,17 @@ pub fn delete_snapshot(
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
|
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
let snapshot = datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
snapshot.group(),
|
snapshot.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_MODIFY,
|
PRIV_DATASTORE_MODIFY,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
datastore.remove_backup_dir(&snapshot, false)?;
|
datastore.remove_backup_dir(snapshot.as_ref(), false)?;
|
||||||
|
|
||||||
Ok(Value::Null)
|
Ok(Value::Null)
|
||||||
}
|
}
|
||||||
@ -386,7 +386,9 @@ pub fn list_snapshots(
|
|||||||
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
|
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
|
||||||
// backup group and provide an error free (Err -> None) accessor
|
// backup group and provide an error free (Err -> None) accessor
|
||||||
let groups = match (backup_type, backup_id) {
|
let groups = match (backup_type, backup_id) {
|
||||||
(Some(backup_type), Some(backup_id)) => vec![BackupGroup::new(backup_type, backup_id)],
|
(Some(backup_type), Some(backup_id)) => {
|
||||||
|
vec![datastore.backup_group(backup_type, backup_id)]
|
||||||
|
}
|
||||||
(Some(backup_type), None) => datastore
|
(Some(backup_type), None) => datastore
|
||||||
.iter_backup_groups_ok()?
|
.iter_backup_groups_ok()?
|
||||||
.filter(|group| group.backup_type() == backup_type)
|
.filter(|group| group.backup_type() == backup_type)
|
||||||
@ -471,7 +473,7 @@ pub fn list_snapshots(
|
|||||||
};
|
};
|
||||||
|
|
||||||
groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
|
groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
|
||||||
let owner = match datastore.get_owner(group) {
|
let owner = match datastore.get_owner(group.as_ref()) {
|
||||||
Ok(auth_id) => auth_id,
|
Ok(auth_id) => auth_id,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
eprintln!(
|
eprintln!(
|
||||||
@ -502,7 +504,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
|
|||||||
store
|
store
|
||||||
.iter_backup_groups_ok()?
|
.iter_backup_groups_ok()?
|
||||||
.filter(|group| {
|
.filter(|group| {
|
||||||
let owner = match store.get_owner(group) {
|
let owner = match store.get_owner(group.as_ref()) {
|
||||||
Ok(owner) => owner,
|
Ok(owner) => owner,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
let id = store.name();
|
let id = store.name();
|
||||||
@ -658,20 +660,20 @@ pub fn verify(
|
|||||||
"{}:{}/{}/{:08X}",
|
"{}:{}/{}/{:08X}",
|
||||||
store, backup_type, backup_id, backup_time
|
store, backup_type, backup_id, backup_time
|
||||||
);
|
);
|
||||||
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
|
check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
|
||||||
|
|
||||||
backup_dir = Some(dir);
|
backup_dir = Some(dir);
|
||||||
worker_type = "verify_snapshot";
|
worker_type = "verify_snapshot";
|
||||||
}
|
}
|
||||||
(Some(backup_type), Some(backup_id), None) => {
|
(Some(backup_type), Some(backup_id), None) => {
|
||||||
worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
|
||||||
let group = BackupGroup::new(backup_type, backup_id);
|
let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
|
||||||
|
|
||||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
|
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
|
||||||
|
|
||||||
backup_group = Some(group);
|
backup_group = Some(datastore.backup_group_from_spec(group));
|
||||||
worker_type = "verify_group";
|
worker_type = "verify_group";
|
||||||
}
|
}
|
||||||
(None, None, None) => {
|
(None, None, None) => {
|
||||||
@ -776,11 +778,11 @@ pub fn prune(
|
|||||||
) -> Result<Value, Error> {
|
) -> Result<Value, Error> {
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let group = BackupGroup::new(backup_type, &backup_id);
|
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
let group = datastore.backup_group(backup_type, &backup_id);
|
||||||
|
|
||||||
|
check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||||
|
|
||||||
let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
|
let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
|
||||||
|
|
||||||
@ -798,13 +800,10 @@ pub fn prune(
|
|||||||
for (info, mark) in prune_info {
|
for (info, mark) in prune_info {
|
||||||
let keep = keep_all || mark.keep();
|
let keep = keep_all || mark.keep();
|
||||||
|
|
||||||
let backup_time = info.backup_dir.backup_time();
|
|
||||||
let group = info.backup_dir.group();
|
|
||||||
|
|
||||||
prune_result.push(json!({
|
prune_result.push(json!({
|
||||||
"backup-type": group.backup_type(),
|
"backup-type": info.backup_dir.backup_type(),
|
||||||
"backup-id": group.backup_id(),
|
"backup-id": info.backup_dir.backup_id(),
|
||||||
"backup-time": backup_time,
|
"backup-time": info.backup_dir.backup_time(),
|
||||||
"keep": keep,
|
"keep": keep,
|
||||||
"protected": mark.protected(),
|
"protected": mark.protected(),
|
||||||
}));
|
}));
|
||||||
@ -837,28 +836,22 @@ pub fn prune(
|
|||||||
|
|
||||||
let backup_time = info.backup_dir.backup_time();
|
let backup_time = info.backup_dir.backup_time();
|
||||||
let timestamp = info.backup_dir.backup_time_string();
|
let timestamp = info.backup_dir.backup_time_string();
|
||||||
let group = info.backup_dir.group();
|
let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
|
||||||
|
|
||||||
let msg = format!(
|
let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
|
||||||
"{}/{}/{} {}",
|
|
||||||
group.backup_type(),
|
|
||||||
group.backup_id(),
|
|
||||||
timestamp,
|
|
||||||
mark,
|
|
||||||
);
|
|
||||||
|
|
||||||
task_log!(worker, "{}", msg);
|
task_log!(worker, "{}", msg);
|
||||||
|
|
||||||
prune_result.push(json!({
|
prune_result.push(json!({
|
||||||
"backup-type": group.backup_type(),
|
"backup-type": group.ty,
|
||||||
"backup-id": group.backup_id(),
|
"backup-id": group.id,
|
||||||
"backup-time": backup_time,
|
"backup-time": backup_time,
|
||||||
"keep": keep,
|
"keep": keep,
|
||||||
"protected": mark.protected(),
|
"protected": mark.protected(),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
if !(dry_run || keep) {
|
if !(dry_run || keep) {
|
||||||
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
|
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
|
||||||
task_warn!(
|
task_warn!(
|
||||||
worker,
|
worker,
|
||||||
"failed to remove dir {:?}: {}",
|
"failed to remove dir {:?}: {}",
|
||||||
@ -1079,14 +1072,14 @@ pub fn download_file(
|
|||||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?.to_owned();
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_READ,
|
||||||
)?;
|
)?;
|
||||||
@ -1162,14 +1155,14 @@ pub fn download_file_decoded(
|
|||||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||||
|
|
||||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?.to_owned();
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_READ,
|
||||||
)?;
|
)?;
|
||||||
@ -1291,10 +1284,10 @@ pub fn upload_backup_log(
|
|||||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let owner = datastore.get_owner(backup_dir.group())?;
|
let owner = datastore.get_owner(backup_dir.as_ref())?;
|
||||||
check_backup_owner(&owner, &auth_id)?;
|
check_backup_owner(&owner, &auth_id)?;
|
||||||
|
|
||||||
let mut path = datastore.base_path();
|
let mut path = datastore.base_path();
|
||||||
@ -1363,11 +1356,12 @@ pub fn catalog(
|
|||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir =
|
||||||
|
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_READ,
|
||||||
)?;
|
)?;
|
||||||
@ -1446,11 +1440,12 @@ pub fn pxar_file_download(
|
|||||||
|
|
||||||
let tar = param["tar"].as_bool().unwrap_or(false);
|
let tar = param["tar"].as_bool().unwrap_or(false);
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = datastore
|
||||||
|
.backup_dir_from_spec((backup_type, backup_id.to_owned(), backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_READ,
|
||||||
)?;
|
)?;
|
||||||
@ -1637,7 +1632,7 @@ pub fn get_group_notes(
|
|||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
|
||||||
|
|
||||||
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
|
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
|
||||||
|
|
||||||
@ -1673,7 +1668,7 @@ pub fn set_group_notes(
|
|||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
|
||||||
|
|
||||||
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||||
|
|
||||||
@ -1707,11 +1702,12 @@ pub fn get_notes(
|
|||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir =
|
||||||
|
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_AUDIT,
|
PRIV_DATASTORE_AUDIT,
|
||||||
)?;
|
)?;
|
||||||
@ -1753,11 +1749,12 @@ pub fn set_notes(
|
|||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir =
|
||||||
|
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_MODIFY,
|
PRIV_DATASTORE_MODIFY,
|
||||||
)?;
|
)?;
|
||||||
@ -1795,11 +1792,12 @@ pub fn get_protection(
|
|||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir =
|
||||||
|
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_AUDIT,
|
PRIV_DATASTORE_AUDIT,
|
||||||
)?;
|
)?;
|
||||||
@ -1837,11 +1835,12 @@ pub fn set_protection(
|
|||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir =
|
||||||
|
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
|
||||||
|
|
||||||
check_priv_or_backup_owner(
|
check_priv_or_backup_owner(
|
||||||
&datastore,
|
&datastore,
|
||||||
backup_dir.group(),
|
backup_dir.as_ref(),
|
||||||
&auth_id,
|
&auth_id,
|
||||||
PRIV_DATASTORE_MODIFY,
|
PRIV_DATASTORE_MODIFY,
|
||||||
)?;
|
)?;
|
||||||
@ -1875,7 +1874,7 @@ pub fn set_backup_owner(
|
|||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||||
|
|
||||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
let backup_group = datastore.backup_group(backup_type, backup_id);
|
||||||
|
|
||||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||||
|
|
||||||
@ -1887,7 +1886,7 @@ pub fn set_backup_owner(
|
|||||||
// High-privilege user/token
|
// High-privilege user/token
|
||||||
true
|
true
|
||||||
} else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
|
} else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
|
||||||
let owner = datastore.get_owner(&backup_group)?;
|
let owner = datastore.get_owner(backup_group.as_ref())?;
|
||||||
|
|
||||||
match (owner.is_token(), new_owner.is_token()) {
|
match (owner.is_token(), new_owner.is_token()) {
|
||||||
(true, true) => {
|
(true, true) => {
|
||||||
@ -1935,7 +1934,7 @@ pub fn set_backup_owner(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
datastore.set_owner(&backup_group, &new_owner, true)?;
|
datastore.set_owner(backup_group.as_ref(), &new_owner, true)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -614,7 +614,7 @@ impl BackupEnvironment {
|
|||||||
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||||
|
|
||||||
if let Some(base) = &self.last_backup {
|
if let Some(base) = &self.last_backup {
|
||||||
let path = self.datastore.snapshot_path(&base.backup_dir);
|
let path = self.datastore.snapshot_path(base.backup_dir.as_ref());
|
||||||
if !path.exists() {
|
if !path.exists() {
|
||||||
bail!(
|
bail!(
|
||||||
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
||||||
@ -643,8 +643,8 @@ impl BackupEnvironment {
|
|||||||
let worker_id = format!(
|
let worker_id = format!(
|
||||||
"{}:{}/{}/{:08X}",
|
"{}:{}/{}/{:08X}",
|
||||||
self.datastore.name(),
|
self.datastore.name(),
|
||||||
self.backup_dir.group().backup_type(),
|
self.backup_dir.backup_type(),
|
||||||
self.backup_dir.group().backup_id(),
|
self.backup_dir.backup_id(),
|
||||||
self.backup_dir.backup_time()
|
self.backup_dir.backup_time()
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -710,7 +710,8 @@ impl BackupEnvironment {
|
|||||||
let mut state = self.state.lock().unwrap();
|
let mut state = self.state.lock().unwrap();
|
||||||
state.finished = true;
|
state.finished = true;
|
||||||
|
|
||||||
self.datastore.remove_backup_dir(&self.backup_dir, true)?;
|
self.datastore
|
||||||
|
.remove_backup_dir(self.backup_dir.as_ref(), true)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,6 @@ use pbs_api_types::{
|
|||||||
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||||
};
|
};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_datastore::backup_info::{BackupDir, BackupGroup};
|
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::manifest::{archive_type, ArchiveType};
|
use pbs_datastore::manifest::{archive_type, ArchiveType};
|
||||||
use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
|
||||||
@ -107,7 +106,7 @@ fn upgrade_to_backup_protocol(
|
|||||||
|
|
||||||
let env_type = rpcenv.env_type();
|
let env_type = rpcenv.env_type();
|
||||||
|
|
||||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
let backup_group = datastore.backup_group(backup_type, backup_id);
|
||||||
|
|
||||||
let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
|
let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
|
||||||
if !benchmark {
|
if !benchmark {
|
||||||
@ -123,7 +122,7 @@ fn upgrade_to_backup_protocol(
|
|||||||
|
|
||||||
// lock backup group to only allow one backup per group at a time
|
// lock backup group to only allow one backup per group at a time
|
||||||
let (owner, _group_guard) =
|
let (owner, _group_guard) =
|
||||||
datastore.create_locked_backup_group(&backup_group, &auth_id)?;
|
datastore.create_locked_backup_group(backup_group.as_ref(), &auth_id)?;
|
||||||
|
|
||||||
// permission check
|
// permission check
|
||||||
let correct_owner =
|
let correct_owner =
|
||||||
@ -155,7 +154,7 @@ fn upgrade_to_backup_protocol(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
|
let backup_dir = backup_group.backup_dir(backup_time)?;
|
||||||
|
|
||||||
let _last_guard = if let Some(last) = &last_backup {
|
let _last_guard = if let Some(last) = &last_backup {
|
||||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||||
@ -163,7 +162,7 @@ fn upgrade_to_backup_protocol(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||||
let full_path = datastore.snapshot_path(&last.backup_dir);
|
let full_path = datastore.snapshot_path(last.backup_dir.as_ref());
|
||||||
Some(lock_dir_noblock_shared(
|
Some(lock_dir_noblock_shared(
|
||||||
&full_path,
|
&full_path,
|
||||||
"snapshot",
|
"snapshot",
|
||||||
@ -173,7 +172,7 @@ fn upgrade_to_backup_protocol(
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(backup_dir.as_ref())?;
|
||||||
if !is_new {
|
if !is_new {
|
||||||
bail!("backup directory already exists.");
|
bail!("backup directory already exists.");
|
||||||
}
|
}
|
||||||
@ -812,7 +811,7 @@ fn download_previous(
|
|||||||
None => bail!("no valid previous backup"),
|
None => bail!("no valid previous backup"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
|
let mut path = env.datastore.snapshot_path(last_backup.backup_dir.as_ref());
|
||||||
path.push(&archive_name);
|
path.push(&archive_name);
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -21,7 +21,6 @@ use pbs_api_types::{
|
|||||||
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
|
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
|
||||||
};
|
};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
use pbs_datastore::manifest::{archive_type, ArchiveType};
|
use pbs_datastore::manifest::{archive_type, ArchiveType};
|
||||||
use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
|
use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
|
||||||
@ -113,9 +112,9 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
|
|
||||||
let env_type = rpcenv.env_type();
|
let env_type = rpcenv.env_type();
|
||||||
|
|
||||||
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
|
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
|
||||||
if !priv_read {
|
if !priv_read {
|
||||||
let owner = datastore.get_owner(backup_dir.group())?;
|
let owner = datastore.get_owner(backup_dir.as_ref())?;
|
||||||
let correct_owner = owner == auth_id
|
let correct_owner = owner == auth_id
|
||||||
|| (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
|
|| (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
|
||||||
if !correct_owner {
|
if !correct_owner {
|
||||||
@ -124,7 +123,7 @@ fn upgrade_to_backup_reader_protocol(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let _guard = lock_dir_noblock_shared(
|
let _guard = lock_dir_noblock_shared(
|
||||||
&datastore.snapshot_path(&backup_dir),
|
&backup_dir.full_path(datastore.base_path()),
|
||||||
"snapshot",
|
"snapshot",
|
||||||
"locked by another operation",
|
"locked by another operation",
|
||||||
)?;
|
)?;
|
||||||
|
@ -576,7 +576,7 @@ pub fn backup_snapshot(
|
|||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
task_log!(worker, "backup snapshot {}", snapshot);
|
task_log!(worker, "backup snapshot {}", snapshot);
|
||||||
|
|
||||||
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
|
let snapshot_reader = match SnapshotReader::new(datastore.clone(), (&snapshot).into()) {
|
||||||
Ok(reader) => reader,
|
Ok(reader) => reader,
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
// ignore missing snapshots and continue
|
// ignore missing snapshots and continue
|
||||||
|
@ -13,7 +13,6 @@ use pbs_api_types::{
|
|||||||
MEDIA_POOL_NAME_SCHEMA, MEDIA_UUID_SCHEMA, PRIV_TAPE_AUDIT, VAULT_NAME_SCHEMA,
|
MEDIA_POOL_NAME_SCHEMA, MEDIA_UUID_SCHEMA, PRIV_TAPE_AUDIT, VAULT_NAME_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
|
||||||
|
|
||||||
use crate::tape::{
|
use crate::tape::{
|
||||||
changer::update_online_status, media_catalog_snapshot_list, Inventory, MediaCatalog, MediaPool,
|
changer::update_online_status, media_catalog_snapshot_list, Inventory, MediaCatalog, MediaPool,
|
||||||
@ -439,15 +438,15 @@ pub fn list_content(
|
|||||||
.unwrap_or_else(|_| set.uuid.to_string());
|
.unwrap_or_else(|_| set.uuid.to_string());
|
||||||
|
|
||||||
for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
|
for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some(backup_type) = filter.backup_type {
|
if let Some(backup_type) = filter.backup_type {
|
||||||
if backup_dir.group().backup_type() != backup_type {
|
if backup_dir.ty() != backup_type {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(ref backup_id) = filter.backup_id {
|
if let Some(ref backup_id) = filter.backup_id {
|
||||||
if backup_dir.group().backup_id() != backup_id {
|
if backup_dir.id() != backup_id {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -462,7 +461,7 @@ pub fn list_content(
|
|||||||
seq_nr: set.seq_nr,
|
seq_nr: set.seq_nr,
|
||||||
snapshot: snapshot.to_owned(),
|
snapshot: snapshot.to_owned(),
|
||||||
store: store.to_owned(),
|
store: store.to_owned(),
|
||||||
backup_time: backup_dir.backup_time(),
|
backup_time: backup_dir.time,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,6 @@ use pbs_api_types::{
|
|||||||
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
|
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
|
||||||
};
|
};
|
||||||
use pbs_config::CachedUserInfo;
|
use pbs_config::CachedUserInfo;
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
|
||||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||||
use pbs_datastore::index::IndexFile;
|
use pbs_datastore::index::IndexFile;
|
||||||
@ -423,7 +422,7 @@ fn restore_list_worker(
|
|||||||
let snapshot = split
|
let snapshot = split
|
||||||
.next()
|
.next()
|
||||||
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
|
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
|
let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
|
||||||
format_err!(
|
format_err!(
|
||||||
@ -433,7 +432,7 @@ fn restore_list_worker(
|
|||||||
})?;
|
})?;
|
||||||
|
|
||||||
let (owner, _group_lock) =
|
let (owner, _group_lock) =
|
||||||
datastore.create_locked_backup_group(backup_dir.group(), restore_owner)?;
|
datastore.create_locked_backup_group(backup_dir.as_ref(), restore_owner)?;
|
||||||
if restore_owner != &owner {
|
if restore_owner != &owner {
|
||||||
// only the owner is allowed to create additional snapshots
|
// only the owner is allowed to create additional snapshots
|
||||||
bail!(
|
bail!(
|
||||||
@ -577,7 +576,7 @@ fn restore_list_worker(
|
|||||||
let snapshot = split
|
let snapshot = split
|
||||||
.next()
|
.next()
|
||||||
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
|
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
|
let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
|
||||||
format_err!("unexpected source datastore: {}", source_datastore)
|
format_err!("unexpected source datastore: {}", source_datastore)
|
||||||
@ -1037,12 +1036,12 @@ fn restore_archive<'a>(
|
|||||||
snapshot
|
snapshot
|
||||||
);
|
);
|
||||||
|
|
||||||
let backup_dir: BackupDir = snapshot.parse()?;
|
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
|
||||||
|
|
||||||
if let Some((store_map, authid)) = target.as_ref() {
|
if let Some((store_map, authid)) = target.as_ref() {
|
||||||
if let Some(datastore) = store_map.get_datastore(&datastore_name) {
|
if let Some(datastore) = store_map.get_datastore(&datastore_name) {
|
||||||
let (owner, _group_lock) =
|
let (owner, _group_lock) =
|
||||||
datastore.create_locked_backup_group(backup_dir.group(), authid)?;
|
datastore.create_locked_backup_group(backup_dir.as_ref(), authid)?;
|
||||||
if *authid != &owner {
|
if *authid != &owner {
|
||||||
// only the owner is allowed to create additional snapshots
|
// only the owner is allowed to create additional snapshots
|
||||||
bail!(
|
bail!(
|
||||||
@ -1054,7 +1053,7 @@ fn restore_archive<'a>(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let (rel_path, is_new, _snap_lock) =
|
let (rel_path, is_new, _snap_lock) =
|
||||||
datastore.create_locked_backup_dir(&backup_dir)?;
|
datastore.create_locked_backup_dir(backup_dir.as_ref())?;
|
||||||
let mut path = datastore.base_path();
|
let mut path = datastore.base_path();
|
||||||
path.push(rel_path);
|
path.push(rel_path);
|
||||||
|
|
||||||
|
@ -328,7 +328,7 @@ pub fn verify_backup_dir(
|
|||||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||||
) -> Result<bool, Error> {
|
) -> Result<bool, Error> {
|
||||||
let snap_lock = lock_dir_noblock_shared(
|
let snap_lock = lock_dir_noblock_shared(
|
||||||
&verify_worker.datastore.snapshot_path(backup_dir),
|
&verify_worker.datastore.snapshot_path(backup_dir.as_ref()),
|
||||||
"snapshot",
|
"snapshot",
|
||||||
"locked by another operation",
|
"locked by another operation",
|
||||||
);
|
);
|
||||||
@ -514,7 +514,7 @@ pub fn verify_all_backups(
|
|||||||
}
|
}
|
||||||
|
|
||||||
let filter_by_owner = |group: &BackupGroup| {
|
let filter_by_owner = |group: &BackupGroup| {
|
||||||
match (verify_worker.datastore.get_owner(group), &owner) {
|
match (verify_worker.datastore.get_owner(group.as_ref()), &owner) {
|
||||||
(Ok(ref group_owner), Some(owner)) => {
|
(Ok(ref group_owner), Some(owner)) => {
|
||||||
group_owner == owner
|
group_owner == owner
|
||||||
|| (group_owner.is_token()
|
|| (group_owner.is_token()
|
||||||
@ -530,7 +530,7 @@ pub fn verify_all_backups(
|
|||||||
}
|
}
|
||||||
(Err(err), None) => {
|
(Err(err), None) => {
|
||||||
// we don't filter by owner, but we want to log the error
|
// we don't filter by owner, but we want to log the error
|
||||||
task_log!(worker, "Failed to get owner of group '{} - {}", group, err,);
|
task_log!(worker, "Failed to get owner of group '{} - {}", group, err);
|
||||||
errors.push(group.to_string());
|
errors.push(group.to_string());
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ pub fn prune_datastore(
|
|||||||
let group = group?;
|
let group = group?;
|
||||||
let list = group.list_backups(&datastore.base_path())?;
|
let list = group.list_backups(&datastore.base_path())?;
|
||||||
|
|
||||||
if !has_privs && !datastore.owns_backup(&group, &auth_id)? {
|
if !has_privs && !datastore.owns_backup(group.as_ref(), &auth_id)? {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ pub fn prune_datastore(
|
|||||||
info.backup_dir.backup_time_string()
|
info.backup_dir.backup_time_string()
|
||||||
);
|
);
|
||||||
if !keep && !dry_run {
|
if !keep && !dry_run {
|
||||||
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
|
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
|
||||||
task_warn!(
|
task_warn!(
|
||||||
worker,
|
worker,
|
||||||
"failed to remove dir {:?}: {}",
|
"failed to remove dir {:?}: {}",
|
||||||
|
@ -28,7 +28,7 @@ use pbs_datastore::index::IndexFile;
|
|||||||
use pbs_datastore::manifest::{
|
use pbs_datastore::manifest::{
|
||||||
archive_type, ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
|
archive_type, ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
|
||||||
};
|
};
|
||||||
use pbs_datastore::{BackupDir, BackupGroup, DataStore, StoreProgress};
|
use pbs_datastore::{DataStore, StoreProgress};
|
||||||
use pbs_tools::sha::sha256;
|
use pbs_tools::sha::sha256;
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
@ -223,13 +223,13 @@ async fn pull_single_archive(
|
|||||||
reader: &BackupReader,
|
reader: &BackupReader,
|
||||||
chunk_reader: &mut RemoteChunkReader,
|
chunk_reader: &mut RemoteChunkReader,
|
||||||
tgt_store: Arc<DataStore>,
|
tgt_store: Arc<DataStore>,
|
||||||
snapshot: &BackupDir,
|
snapshot: &pbs_api_types::BackupDir,
|
||||||
archive_info: &FileInfo,
|
archive_info: &FileInfo,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let archive_name = &archive_info.filename;
|
let archive_name = &archive_info.filename;
|
||||||
let mut path = tgt_store.base_path();
|
let mut path = tgt_store.base_path();
|
||||||
path.push(snapshot.relative_path());
|
path.push(snapshot.to_string());
|
||||||
path.push(archive_name);
|
path.push(archive_name);
|
||||||
|
|
||||||
let mut tmp_path = path.clone();
|
let mut tmp_path = path.clone();
|
||||||
@ -321,15 +321,17 @@ async fn pull_snapshot(
|
|||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
reader: Arc<BackupReader>,
|
reader: Arc<BackupReader>,
|
||||||
tgt_store: Arc<DataStore>,
|
tgt_store: Arc<DataStore>,
|
||||||
snapshot: &BackupDir,
|
snapshot: &pbs_api_types::BackupDir,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
|
let snapshot_relative_path = snapshot.to_string();
|
||||||
|
|
||||||
let mut manifest_name = tgt_store.base_path();
|
let mut manifest_name = tgt_store.base_path();
|
||||||
manifest_name.push(snapshot.relative_path());
|
manifest_name.push(&snapshot_relative_path);
|
||||||
manifest_name.push(MANIFEST_BLOB_NAME);
|
manifest_name.push(MANIFEST_BLOB_NAME);
|
||||||
|
|
||||||
let mut client_log_name = tgt_store.base_path();
|
let mut client_log_name = tgt_store.base_path();
|
||||||
client_log_name.push(snapshot.relative_path());
|
client_log_name.push(&snapshot_relative_path);
|
||||||
client_log_name.push(CLIENT_LOG_BLOB_NAME);
|
client_log_name.push(CLIENT_LOG_BLOB_NAME);
|
||||||
|
|
||||||
let mut tmp_manifest_name = manifest_name.clone();
|
let mut tmp_manifest_name = manifest_name.clone();
|
||||||
@ -396,7 +398,7 @@ async fn pull_snapshot(
|
|||||||
|
|
||||||
for item in manifest.files() {
|
for item in manifest.files() {
|
||||||
let mut path = tgt_store.base_path();
|
let mut path = tgt_store.base_path();
|
||||||
path.push(snapshot.relative_path());
|
path.push(&snapshot_relative_path);
|
||||||
path.push(&item.filename);
|
path.push(&item.filename);
|
||||||
|
|
||||||
if path.exists() {
|
if path.exists() {
|
||||||
@ -471,13 +473,14 @@ pub async fn pull_snapshot_from(
|
|||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
reader: Arc<BackupReader>,
|
reader: Arc<BackupReader>,
|
||||||
tgt_store: Arc<DataStore>,
|
tgt_store: Arc<DataStore>,
|
||||||
snapshot: &BackupDir,
|
snapshot: &pbs_api_types::BackupDir,
|
||||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?;
|
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?;
|
||||||
|
|
||||||
|
let snapshot_path = snapshot.to_string();
|
||||||
if is_new {
|
if is_new {
|
||||||
task_log!(worker, "sync snapshot {:?}", snapshot.relative_path());
|
task_log!(worker, "sync snapshot {:?}", snapshot_path);
|
||||||
|
|
||||||
if let Err(err) = pull_snapshot(
|
if let Err(err) = pull_snapshot(
|
||||||
worker,
|
worker,
|
||||||
@ -493,9 +496,9 @@ pub async fn pull_snapshot_from(
|
|||||||
}
|
}
|
||||||
return Err(err);
|
return Err(err);
|
||||||
}
|
}
|
||||||
task_log!(worker, "sync snapshot {:?} done", snapshot.relative_path());
|
task_log!(worker, "sync snapshot {:?} done", snapshot_path);
|
||||||
} else {
|
} else {
|
||||||
task_log!(worker, "re-sync snapshot {:?}", snapshot.relative_path());
|
task_log!(worker, "re-sync snapshot {:?}", snapshot_path);
|
||||||
pull_snapshot(
|
pull_snapshot(
|
||||||
worker,
|
worker,
|
||||||
reader,
|
reader,
|
||||||
@ -504,11 +507,7 @@ pub async fn pull_snapshot_from(
|
|||||||
downloaded_chunks,
|
downloaded_chunks,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
task_log!(
|
task_log!(worker, "re-sync snapshot {:?} done", snapshot_path);
|
||||||
worker,
|
|
||||||
"re-sync snapshot {:?} done",
|
|
||||||
snapshot.relative_path()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -561,7 +560,7 @@ pub async fn pull_group(
|
|||||||
worker: &WorkerTask,
|
worker: &WorkerTask,
|
||||||
client: &HttpClient,
|
client: &HttpClient,
|
||||||
params: &PullParameters,
|
params: &PullParameters,
|
||||||
group: &BackupGroup,
|
group: &pbs_api_types::BackupGroup,
|
||||||
progress: &mut StoreProgress,
|
progress: &mut StoreProgress,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let path = format!(
|
let path = format!(
|
||||||
@ -570,8 +569,8 @@ pub async fn pull_group(
|
|||||||
);
|
);
|
||||||
|
|
||||||
let args = json!({
|
let args = json!({
|
||||||
"backup-type": group.backup_type(),
|
"backup-type": group.ty,
|
||||||
"backup-id": group.backup_id(),
|
"backup-id": group.id,
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut result = client.get(&path, Some(args)).await?;
|
let mut result = client.get(&path, Some(args)).await?;
|
||||||
@ -599,7 +598,7 @@ pub async fn pull_group(
|
|||||||
};
|
};
|
||||||
|
|
||||||
for (pos, item) in list.into_iter().enumerate() {
|
for (pos, item) in list.into_iter().enumerate() {
|
||||||
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
|
let snapshot = item.backup;
|
||||||
|
|
||||||
// in-progress backups can't be synced
|
// in-progress backups can't be synced
|
||||||
if item.size.is_none() {
|
if item.size.is_none() {
|
||||||
@ -611,7 +610,7 @@ pub async fn pull_group(
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let backup_time = snapshot.backup_time();
|
let backup_time = snapshot.time;
|
||||||
|
|
||||||
remote_snapshots.insert(backup_time);
|
remote_snapshots.insert(backup_time);
|
||||||
|
|
||||||
@ -640,8 +639,8 @@ pub async fn pull_group(
|
|||||||
new_client,
|
new_client,
|
||||||
None,
|
None,
|
||||||
params.source.store(),
|
params.source.store(),
|
||||||
snapshot.group().backup_type(),
|
snapshot.group.ty,
|
||||||
snapshot.group().backup_id(),
|
&snapshot.group.id,
|
||||||
backup_time,
|
backup_time,
|
||||||
true,
|
true,
|
||||||
)
|
)
|
||||||
@ -663,6 +662,7 @@ pub async fn pull_group(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if params.remove_vanished {
|
if params.remove_vanished {
|
||||||
|
let group = params.store.backup_group_from_spec(group.clone());
|
||||||
let local_list = group.list_backups(¶ms.store.base_path())?;
|
let local_list = group.list_backups(¶ms.store.base_path())?;
|
||||||
for info in local_list {
|
for info in local_list {
|
||||||
let backup_time = info.backup_dir.backup_time();
|
let backup_time = info.backup_dir.backup_time();
|
||||||
@ -682,7 +682,9 @@ pub async fn pull_group(
|
|||||||
"delete vanished snapshot {:?}",
|
"delete vanished snapshot {:?}",
|
||||||
info.backup_dir.relative_path()
|
info.backup_dir.relative_path()
|
||||||
);
|
);
|
||||||
params.store.remove_backup_dir(&info.backup_dir, false)?;
|
params
|
||||||
|
.store
|
||||||
|
.remove_backup_dir(info.backup_dir.as_ref(), false)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -720,18 +722,15 @@ pub async fn pull_store(
|
|||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
let apply_filters = |group: &BackupGroup, filters: &[GroupFilter]| -> bool {
|
let apply_filters = |group: &pbs_api_types::BackupGroup, filters: &[GroupFilter]| -> bool {
|
||||||
filters.iter().any(|filter| group.matches(filter))
|
filters.iter().any(|filter| group.matches(filter))
|
||||||
};
|
};
|
||||||
|
|
||||||
let list: Vec<BackupGroup> = list
|
let list: Vec<pbs_api_types::BackupGroup> = list.into_iter().map(|item| item.backup).collect();
|
||||||
.into_iter()
|
|
||||||
.map(|item| BackupGroup::new(item.backup.ty, item.backup.id))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let list = if let Some(ref group_filter) = ¶ms.group_filter {
|
let list = if let Some(ref group_filter) = ¶ms.group_filter {
|
||||||
let unfiltered_count = list.len();
|
let unfiltered_count = list.len();
|
||||||
let list: Vec<BackupGroup> = list
|
let list: Vec<pbs_api_types::BackupGroup> = list
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.filter(|group| apply_filters(group, group_filter))
|
.filter(|group| apply_filters(group, group_filter))
|
||||||
.collect();
|
.collect();
|
||||||
@ -799,11 +798,11 @@ pub async fn pull_store(
|
|||||||
let result: Result<(), Error> = proxmox_lang::try_block!({
|
let result: Result<(), Error> = proxmox_lang::try_block!({
|
||||||
for local_group in params.store.iter_backup_groups()? {
|
for local_group in params.store.iter_backup_groups()? {
|
||||||
let local_group = local_group?;
|
let local_group = local_group?;
|
||||||
if new_groups.contains(&local_group) {
|
if new_groups.contains(local_group.as_ref()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if let Some(ref group_filter) = ¶ms.group_filter {
|
if let Some(ref group_filter) = ¶ms.group_filter {
|
||||||
if !apply_filters(&local_group, group_filter) {
|
if !apply_filters(local_group.as_ref(), group_filter) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -813,7 +812,7 @@ pub async fn pull_store(
|
|||||||
local_group.backup_type(),
|
local_group.backup_type(),
|
||||||
local_group.backup_id()
|
local_group.backup_id()
|
||||||
);
|
);
|
||||||
match params.store.remove_backup_group(&local_group) {
|
match params.store.remove_backup_group(local_group.as_ref()) {
|
||||||
Ok(true) => {}
|
Ok(true) => {}
|
||||||
Ok(false) => {
|
Ok(false) => {
|
||||||
task_log!(
|
task_log!(
|
||||||
|
@ -8,7 +8,6 @@ use std::path::{Path, PathBuf};
|
|||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use endian_trait::Endian;
|
use endian_trait::Endian;
|
||||||
|
|
||||||
use pbs_datastore::backup_info::BackupDir;
|
|
||||||
use proxmox_sys::fs::read_subdir;
|
use proxmox_sys::fs::read_subdir;
|
||||||
|
|
||||||
use proxmox_io::{ReadExt, WriteExt};
|
use proxmox_io::{ReadExt, WriteExt};
|
||||||
@ -682,7 +681,7 @@ impl MediaCatalog {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(err) = snapshot.parse::<BackupDir>() {
|
if let Err(err) = snapshot.parse::<pbs_api_types::BackupDir>() {
|
||||||
bail!(
|
bail!(
|
||||||
"register_snapshot failed: unable to parse snapshot '{}' - {}",
|
"register_snapshot failed: unable to parse snapshot '{}' - {}",
|
||||||
snapshot,
|
snapshot,
|
||||||
|
@ -29,7 +29,7 @@ fn get_prune_list(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn create_info(snapshot: &str, partial: bool) -> BackupInfo {
|
fn create_info(snapshot: &str, partial: bool) -> BackupInfo {
|
||||||
let backup_dir: BackupDir = snapshot.parse().unwrap();
|
let backup_dir = BackupDir::new_test(snapshot.parse().unwrap());
|
||||||
|
|
||||||
let mut files = Vec::new();
|
let mut files = Vec::new();
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user