api-types: add namespace to BackupGroup

Make it easier by adding an helper accepting either group or
directory

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Wolfgang Bumiller 2022-04-21 15:04:59 +02:00 committed by Thomas Lamprecht
parent 42103c467d
commit 8c74349b08
22 changed files with 431 additions and 319 deletions

View File

@ -2,7 +2,7 @@ use std::io::Write;
use anyhow::Error; use anyhow::Error;
use pbs_api_types::{Authid, BackupType}; use pbs_api_types::{Authid, BackupNamespace, BackupType};
use pbs_client::{BackupReader, HttpClient, HttpClientOptions}; use pbs_client::{BackupReader, HttpClient, HttpClientOptions};
pub struct DummyWriter { pub struct DummyWriter {
@ -37,9 +37,13 @@ async fn run() -> Result<(), Error> {
client, client,
None, None,
"store2", "store2",
&(
BackupNamespace::root(),
BackupType::Host, BackupType::Host,
"elsa", "elsa".to_string(),
backup_time, backup_time,
)
.into(),
true, true,
) )
.await?; .await?;

View File

@ -1,6 +1,6 @@
use anyhow::Error; use anyhow::Error;
use pbs_api_types::{Authid, BackupType}; use pbs_api_types::{Authid, BackupNamespace, BackupType};
use pbs_client::{BackupWriter, HttpClient, HttpClientOptions}; use pbs_client::{BackupWriter, HttpClient, HttpClientOptions};
async fn upload_speed() -> Result<f64, Error> { async fn upload_speed() -> Result<f64, Error> {
@ -21,9 +21,13 @@ async fn upload_speed() -> Result<f64, Error> {
client, client,
None, None,
datastore, datastore,
&(
BackupNamespace::root(),
BackupType::Host, BackupType::Host,
"speedtest", "speedtest".to_string(),
backup_time, backup_time,
)
.into(),
false, false,
true, true,
) )

View File

@ -1,5 +1,5 @@
use std::fmt; use std::fmt;
use std::path::{Path, PathBuf}; use std::path::PathBuf;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -16,19 +16,24 @@ use crate::{
}; };
const_regex! { const_regex! {
pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$"); pub BACKUP_TYPE_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), r")$");
pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$"); pub BACKUP_ID_REGEX = concat!(r"^", BACKUP_ID_RE!(), r"$");
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
pub GROUP_PATH_REGEX = concat!(r"^(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), r")$"); pub GROUP_PATH_REGEX = concat!(
r"^(", BACKUP_NS_PATH_RE!(), r")?",
r"(", BACKUP_TYPE_RE!(), ")/",
r"(", BACKUP_ID_RE!(), r")$",
);
pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$"; pub BACKUP_FILE_REGEX = r"^.*\.([fd]idx|blob)$";
pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$"); pub SNAPSHOT_PATH_REGEX = concat!(r"^", SNAPSHOT_PATH_REGEX_STR!(), r"$");
pub GROUP_OR_SNAPSHOT_PATH_REGEX = concat!(r"^", GROUP_OR_SNAPSHOT_PATH_REGEX_STR!(), r"$");
pub BACKUP_NAMESPACE_REGEX = concat!(r"^", BACKUP_NS_RE!(), r"$");
pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!()); pub DATASTORE_MAP_REGEX = concat!(r"(:?", PROXMOX_SAFE_ID_REGEX_STR!(), r"=)?", PROXMOX_SAFE_ID_REGEX_STR!());
} }
@ -640,7 +645,7 @@ impl BackupNamespace {
/// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every /// Return an adapter which [`Display`]s as a path with `"ns/"` prefixes in front of every
/// component. /// component.
fn display_as_path(&self) -> BackupNamespacePath { pub fn display_as_path(&self) -> BackupNamespacePath {
BackupNamespacePath(self) BackupNamespacePath(self)
} }
@ -775,6 +780,7 @@ impl std::cmp::PartialOrd for BackupType {
#[api( #[api(
properties: { properties: {
"backup-ns": { type: BackupNamespace },
"backup-type": { type: BackupType }, "backup-type": { type: BackupType },
"backup-id": { schema: BACKUP_ID_SCHEMA }, "backup-id": { schema: BACKUP_ID_SCHEMA },
}, },
@ -783,6 +789,14 @@ impl std::cmp::PartialOrd for BackupType {
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// A backup group (without a data store). /// A backup group (without a data store).
pub struct BackupGroup { pub struct BackupGroup {
/// An optional namespace this backup belongs to.
#[serde(
rename = "backup-ns",
skip_serializing_if = "BackupNamespace::is_root",
default
)]
pub ns: BackupNamespace,
/// Backup type. /// Backup type.
#[serde(rename = "backup-type")] #[serde(rename = "backup-type")]
pub ty: BackupType, pub ty: BackupType,
@ -793,8 +807,12 @@ pub struct BackupGroup {
} }
impl BackupGroup { impl BackupGroup {
pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self { pub fn new<T: Into<String>>(ns: BackupNamespace, ty: BackupType, id: T) -> Self {
Self { ty, id: id.into() } Self {
ns,
ty,
id: id.into(),
}
} }
pub fn matches(&self, filter: &crate::GroupFilter) -> bool { pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
@ -820,21 +838,29 @@ impl AsRef<BackupGroup> for BackupGroup {
} }
} }
impl From<(BackupType, String)> for BackupGroup { impl From<(BackupNamespace, BackupType, String)> for BackupGroup {
fn from(data: (BackupType, String)) -> Self { #[inline]
fn from(data: (BackupNamespace, BackupType, String)) -> Self {
Self { Self {
ty: data.0, ns: data.0,
id: data.1, ty: data.1,
id: data.2,
} }
} }
} }
impl std::cmp::Ord for BackupGroup { impl std::cmp::Ord for BackupGroup {
fn cmp(&self, other: &Self) -> std::cmp::Ordering { fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let ns_order = self.ns.cmp(&other.ns);
if ns_order != std::cmp::Ordering::Equal {
return ns_order;
}
let type_order = self.ty.cmp(&other.ty); let type_order = self.ty.cmp(&other.ty);
if type_order != std::cmp::Ordering::Equal { if type_order != std::cmp::Ordering::Equal {
return type_order; return type_order;
} }
// try to compare IDs numerically // try to compare IDs numerically
let id_self = self.id.parse::<u64>(); let id_self = self.id.parse::<u64>();
let id_other = other.id.parse::<u64>(); let id_other = other.id.parse::<u64>();
@ -855,7 +881,11 @@ impl std::cmp::PartialOrd for BackupGroup {
impl fmt::Display for BackupGroup { impl fmt::Display for BackupGroup {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.ns.is_root() {
write!(f, "{}/{}", self.ty, self.id) write!(f, "{}/{}", self.ty, self.id)
} else {
write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id)
}
} }
} }
@ -871,8 +901,9 @@ impl std::str::FromStr for BackupGroup {
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?; .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
Ok(Self { Ok(Self {
ty: cap.get(1).unwrap().as_str().parse()?, ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?,
id: cap.get(2).unwrap().as_str().to_owned(), ty: cap.get(2).unwrap().as_str().parse()?,
id: cap.get(3).unwrap().as_str().to_owned(),
}) })
} }
} }
@ -921,32 +952,44 @@ impl From<(BackupGroup, i64)> for BackupDir {
} }
} }
impl From<(BackupType, String, i64)> for BackupDir { impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir {
fn from(data: (BackupType, String, i64)) -> Self { fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self {
Self { Self {
group: (data.0, data.1).into(), group: (data.0, data.1, data.2).into(),
time: data.2, time: data.3,
} }
} }
} }
impl BackupDir { impl BackupDir {
pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error> pub fn with_rfc3339<T>(
ns: BackupNamespace,
ty: BackupType,
id: T,
backup_time_string: &str,
) -> Result<Self, Error>
where where
T: Into<String>, T: Into<String>,
{ {
let time = proxmox_time::parse_rfc3339(&backup_time_string)?; let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
let group = BackupGroup::new(ty, id.into()); let group = BackupGroup::new(ns, ty, id.into());
Ok(Self { group, time }) Ok(Self { group, time })
} }
#[inline]
pub fn ty(&self) -> BackupType { pub fn ty(&self) -> BackupType {
self.group.ty self.group.ty
} }
#[inline]
pub fn id(&self) -> &str { pub fn id(&self) -> &str {
&self.group.id &self.group.id
} }
#[inline]
pub fn ns(&self) -> &BackupNamespace {
&self.group.ns
}
} }
impl std::str::FromStr for BackupDir { impl std::str::FromStr for BackupDir {
@ -960,22 +1003,56 @@ impl std::str::FromStr for BackupDir {
.captures(path) .captures(path)
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
let ns = match cap.get(1) {
Some(cap) => BackupNamespace::from_path(cap.as_str())?,
None => BackupNamespace::root(),
};
BackupDir::with_rfc3339( BackupDir::with_rfc3339(
cap.get(1).unwrap().as_str().parse()?, ns,
cap.get(2).unwrap().as_str(), cap.get(2).unwrap().as_str().parse()?,
cap.get(3).unwrap().as_str(), cap.get(3).unwrap().as_str(),
cap.get(4).unwrap().as_str(),
) )
} }
} }
impl std::fmt::Display for BackupDir { impl fmt::Display for BackupDir {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// FIXME: log error? // FIXME: log error?
let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?; let time = proxmox_time::epoch_to_rfc3339_utc(self.time).map_err(|_| fmt::Error)?;
write!(f, "{}/{}", self.group, time) write!(f, "{}/{}", self.group, time)
} }
} }
/// Used when both a backup group or a directory can be valid.
pub enum BackupPart {
Group(BackupGroup),
Dir(BackupDir),
}
impl std::str::FromStr for BackupPart {
type Err = Error;
/// Parse a path which can be either a backup group or a snapshot dir.
fn from_str(path: &str) -> Result<Self, Error> {
let cap = GROUP_OR_SNAPSHOT_PATH_REGEX
.captures(path)
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
let ns = match cap.get(1) {
Some(cap) => BackupNamespace::from_path(cap.as_str())?,
None => BackupNamespace::root(),
};
let ty = cap.get(2).unwrap().as_str().parse()?;
let id = cap.get(3).unwrap().as_str().to_string();
Ok(match cap.get(4) {
Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?),
None => BackupPart::Group((ns, ty, id).into()),
})
}
}
#[api( #[api(
properties: { properties: {
"backup": { type: BackupDir }, "backup": { type: BackupDir },

View File

@ -34,14 +34,32 @@ macro_rules! BACKUP_NS_RE {
); );
} }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_NS_PATH_RE {
() => (
concat!(r"(:?ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!())
);
}
#[rustfmt::skip] #[rustfmt::skip]
#[macro_export] #[macro_export]
macro_rules! SNAPSHOT_PATH_REGEX_STR { macro_rules! SNAPSHOT_PATH_REGEX_STR {
() => ( () => (
concat!(r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")") concat!(
r"(", BACKUP_NS_PATH_RE!(), ")?",
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
)
); );
} }
#[macro_export]
macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
() => {
concat!(SNAPSHOT_PATH_REGEX_STR!(), "?")
};
}
mod acl; mod acl;
pub use acl::*; pub use acl::*;

View File

@ -7,7 +7,7 @@ use std::sync::Arc;
use futures::future::AbortHandle; use futures::future::AbortHandle;
use serde_json::{json, Value}; use serde_json::{json, Value};
use pbs_api_types::BackupType; use pbs_api_types::BackupDir;
use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
@ -47,15 +47,14 @@ impl BackupReader {
client: HttpClient, client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
datastore: &str, datastore: &str,
backup_type: BackupType, backup: &BackupDir,
backup_id: &str,
backup_time: i64,
debug: bool, debug: bool,
) -> Result<Arc<BackupReader>, Error> { ) -> Result<Arc<BackupReader>, Error> {
let param = json!({ let param = json!({
"backup-type": backup_type, "backup-ns": backup.ns(),
"backup-id": backup_id, "backup-type": backup.ty(),
"backup-time": backup_time, "backup-id": backup.id(),
"backup-time": backup.time,
"store": datastore, "store": datastore,
"debug": debug, "debug": debug,
}); });

View File

@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot}; use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
use pbs_api_types::{BackupType, HumanByte}; use pbs_api_types::{BackupDir, HumanByte};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
@ -86,16 +86,15 @@ impl BackupWriter {
client: HttpClient, client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
datastore: &str, datastore: &str,
backup_type: BackupType, backup: &BackupDir,
backup_id: &str,
backup_time: i64,
debug: bool, debug: bool,
benchmark: bool, benchmark: bool,
) -> Result<Arc<BackupWriter>, Error> { ) -> Result<Arc<BackupWriter>, Error> {
let param = json!({ let param = json!({
"backup-type": backup_type, "backup-ns": backup.ns(),
"backup-id": backup_id, "backup-type": backup.ty(),
"backup-time": backup_time, "backup-id": backup.id(),
"backup-time": backup.time,
"store": datastore, "store": datastore,
"debug": debug, "debug": debug,
"benchmark": benchmark "benchmark": benchmark

View File

@ -293,6 +293,7 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
}; };
let query = json_object_to_query(json!({ let query = json_object_to_query(json!({
"backup-ns": snapshot.group.ns,
"backup-type": snapshot.group.ty, "backup-type": snapshot.group.ty,
"backup-id": snapshot.group.id, "backup-id": snapshot.group.id,
"backup-time": snapshot.time, "backup-time": snapshot.time,

View File

@ -12,7 +12,7 @@ fn run() -> Result<(), Error> {
let store = unsafe { DataStore::open_path("", &base, None)? }; let store = unsafe { DataStore::open_path("", &base, None)? };
for group in store.iter_backup_groups()? { for group in store.iter_backup_groups(Default::default())? {
let group = group?; let group = group?;
println!("found group {}", group); println!("found group {}", group);

View File

@ -217,11 +217,10 @@ impl From<BackupGroup> for pbs_api_types::BackupGroup {
} }
} }
impl std::fmt::Display for BackupGroup { impl fmt::Display for BackupGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { #[inline]
let backup_type = self.backup_type(); fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let id = self.backup_id(); fmt::Display::fmt(&self.group, f)
write!(f, "{}/{}", backup_type, id)
} }
} }
@ -446,8 +445,8 @@ impl From<BackupDir> for pbs_api_types::BackupDir {
} }
} }
impl std::fmt::Display for BackupDir { impl fmt::Display for BackupDir {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}/{}", self.dir.group, self.backup_time_string) write!(f, "{}/{}", self.dir.group, self.backup_time_string)
} }
} }

View File

@ -17,8 +17,8 @@ use proxmox_sys::WorkerTaskContext;
use proxmox_sys::{task_log, task_warn}; use proxmox_sys::{task_log, task_warn};
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus, Authid, BackupNamespace, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning,
HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID, GarbageCollectionStatus, HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
}; };
use pbs_config::ConfigVersionCache; use pbs_config::ConfigVersionCache;
@ -348,6 +348,16 @@ impl DataStore {
self.inner.chunk_store.base_path() self.inner.chunk_store.base_path()
} }
pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
let mut path = self.base_path();
path.reserve(ns.path_len());
for part in ns.components() {
path.push("ns");
path.push(part);
}
path
}
/// Cleanup a backup directory /// Cleanup a backup directory
/// ///
/// Removes all files not mentioned in the manifest. /// Removes all files not mentioned in the manifest.
@ -517,6 +527,10 @@ impl DataStore {
) -> Result<(Authid, DirLockGuard), Error> { ) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first: // create intermediate path first:
let mut full_path = self.base_path(); let mut full_path = self.base_path();
for ns in backup_group.ns.components() {
full_path.push("ns");
full_path.push(ns);
}
full_path.push(backup_group.ty.as_str()); full_path.push(backup_group.ty.as_str());
std::fs::create_dir_all(&full_path)?; std::fs::create_dir_all(&full_path)?;
@ -579,8 +593,11 @@ impl DataStore {
/// ///
/// The iterated item is still a Result that can contain errors from rather unexptected FS or /// The iterated item is still a Result that can contain errors from rather unexptected FS or
/// parsing errors. /// parsing errors.
pub fn iter_backup_groups(self: &Arc<DataStore>) -> Result<ListGroups, Error> { pub fn iter_backup_groups(
ListGroups::new(Arc::clone(self)) self: &Arc<DataStore>,
ns: BackupNamespace,
) -> Result<ListGroups, Error> {
ListGroups::new(Arc::clone(self), ns)
} }
/// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results /// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
@ -589,10 +606,11 @@ impl DataStore {
/// logged. Can be useful in iterator chain commands /// logged. Can be useful in iterator chain commands
pub fn iter_backup_groups_ok( pub fn iter_backup_groups_ok(
self: &Arc<DataStore>, self: &Arc<DataStore>,
ns: BackupNamespace,
) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> { ) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
let this = Arc::clone(self); let this = Arc::clone(self);
Ok( Ok(
ListGroups::new(Arc::clone(&self))?.filter_map(move |group| match group { ListGroups::new(Arc::clone(&self), ns)?.filter_map(move |group| match group {
Ok(group) => Some(group), Ok(group) => Some(group),
Err(err) => { Err(err) => {
log::error!("list groups error on datastore {} - {}", this.name(), err); log::error!("list groups error on datastore {} - {}", this.name(), err);
@ -605,8 +623,11 @@ impl DataStore {
/// Get a in-memory vector for all top-level backup groups of a datatstore /// Get a in-memory vector for all top-level backup groups of a datatstore
/// ///
/// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage /// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
pub fn list_backup_groups(self: &Arc<DataStore>) -> Result<Vec<BackupGroup>, Error> { pub fn list_backup_groups(
ListGroups::new(Arc::clone(self))?.collect() self: &Arc<DataStore>,
ns: BackupNamespace,
) -> Result<Vec<BackupGroup>, Error> {
ListGroups::new(Arc::clone(self), ns)?.collect()
} }
pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> { pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
@ -1047,11 +1068,16 @@ impl DataStore {
} }
/// Open a backup group from this datastore. /// Open a backup group from this datastore.
pub fn backup_group_from_parts<T>(self: &Arc<Self>, ty: BackupType, id: T) -> BackupGroup pub fn backup_group_from_parts<T>(
self: &Arc<Self>,
ns: BackupNamespace,
ty: BackupType,
id: T,
) -> BackupGroup
where where
T: Into<String>, T: Into<String>,
{ {
self.backup_group((ty, id.into()).into()) self.backup_group((ns, ty, id.into()).into())
} }
/// Open a backup group from this datastore by backup group path such as `vm/100`. /// Open a backup group from this datastore by backup group path such as `vm/100`.
@ -1069,6 +1095,7 @@ impl DataStore {
/// Open a snapshot (backup directory) from this datastore. /// Open a snapshot (backup directory) from this datastore.
pub fn backup_dir_from_parts<T>( pub fn backup_dir_from_parts<T>(
self: &Arc<Self>, self: &Arc<Self>,
ns: BackupNamespace,
ty: BackupType, ty: BackupType,
id: T, id: T,
time: i64, time: i64,
@ -1076,7 +1103,7 @@ impl DataStore {
where where
T: Into<String>, T: Into<String>,
{ {
self.backup_dir((ty, id.into(), time).into()) self.backup_dir((ns, ty, id.into(), time).into())
} }
/// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string. /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
@ -1143,15 +1170,19 @@ impl Iterator for ListSnapshots {
/// A iterator for a (single) level of Backup Groups /// A iterator for a (single) level of Backup Groups
pub struct ListGroups { pub struct ListGroups {
store: Arc<DataStore>, store: Arc<DataStore>,
ns: BackupNamespace,
type_fd: proxmox_sys::fs::ReadDir, type_fd: proxmox_sys::fs::ReadDir,
id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>, id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
} }
impl ListGroups { impl ListGroups {
pub fn new(store: Arc<DataStore>) -> Result<Self, Error> { pub fn new(store: Arc<DataStore>, ns: BackupNamespace) -> Result<Self, Error> {
let mut base_path = store.base_path().to_owned();
base_path.push(ns.path());
Ok(ListGroups { Ok(ListGroups {
type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.base_path())?, type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &base_path)?,
store, store,
ns,
id_state: None, id_state: None,
}) })
} }
@ -1183,7 +1214,7 @@ impl Iterator for ListGroups {
if BACKUP_ID_REGEX.is_match(name) { if BACKUP_ID_REGEX.is_match(name) {
return Some(Ok(BackupGroup::new( return Some(Ok(BackupGroup::new(
Arc::clone(&self.store), Arc::clone(&self.store),
(group_type, name.to_owned()).into(), (self.ns.clone(), group_type, name.to_owned()).into(),
))); )));
} }
} }

View File

@ -14,7 +14,7 @@ use proxmox_router::{
}; };
use proxmox_schema::{api, ApiType, ReturnType}; use proxmox_schema::{api, ApiType, ReturnType};
use pbs_api_types::BackupType; use pbs_api_types::{BackupNamespace, BackupType};
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupRepository, BackupWriter}; use pbs_client::{BackupRepository, BackupWriter};
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig}; use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
@ -242,9 +242,13 @@ async fn test_upload_speed(
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
&(
BackupNamespace::root(),
BackupType::Host, BackupType::Host,
"benchmark", "benchmark".to_string(),
backup_time, backup_time,
)
.into(),
false, false,
true, true,
) )

View File

@ -14,9 +14,9 @@ use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use crate::{ use crate::{
api_datastore_latest_snapshot, complete_backup_snapshot, complete_group_or_snapshot, complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name,
complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key, complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group,
extract_repository_from_value, format_key_source, record_repository, BackupDir, BackupGroup, extract_repository_from_value, format_key_source, record_repository, BackupDir,
BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile, BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
}; };
@ -68,16 +68,8 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let client = connect(&repo)?; let client = connect(&repo)?;
let client = BackupReader::start( let client =
client, BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
crypt_config.clone(),
repo.store(),
snapshot.group.ty,
&snapshot.group.id,
snapshot.time,
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -153,13 +145,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
let archive_name = required_string_param(&param, "archive-name")?; let archive_name = required_string_param(&param, "archive-name")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let group: BackupGroup = path.parse()?;
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group.ty, snapshot.group.id, snapshot.time)
};
let crypto = crypto_parameters(&param)?; let crypto = crypto_parameters(&param)?;
@ -186,9 +172,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
backup_type, &backup_dir,
&backup_id,
backup_time,
true, true,
) )
.await?; .await?;

View File

@ -7,6 +7,7 @@ use std::task::Context;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use futures::stream::{StreamExt, TryStreamExt}; use futures::stream::{StreamExt, TryStreamExt};
use serde::Deserialize;
use serde_json::{json, Value}; use serde_json::{json, Value};
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
@ -22,10 +23,10 @@ use proxmox_time::{epoch_i64, strftime_local};
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation}; use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupDir, BackupGroup, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte, Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode,
PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus, Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions, RateLimitConfig,
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
TRAFFIC_CONTROL_RATE_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
}; };
use pbs_client::catalog_shell::Shell; use pbs_client::catalog_shell::Shell;
use pbs_client::tools::{ use pbs_client::tools::{
@ -148,7 +149,7 @@ pub async fn api_datastore_latest_snapshot(
client: &HttpClient, client: &HttpClient,
store: &str, store: &str,
group: BackupGroup, group: BackupGroup,
) -> Result<(BackupType, String, i64), Error> { ) -> Result<BackupDir, Error> {
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?; let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?; let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
@ -158,7 +159,20 @@ pub async fn api_datastore_latest_snapshot(
list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time)); list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
Ok((group.ty, group.id, list[0].backup.time)) Ok((group, list[0].backup.time).into())
}
pub async fn dir_or_last_from_group(
client: &HttpClient,
repo: &BackupRepository,
path: &str,
) -> Result<BackupDir, Error> {
match path.parse::<BackupPart>()? {
BackupPart::Dir(dir) => Ok(dir),
BackupPart::Group(group) => {
api_datastore_latest_snapshot(&client, repo.store(), group).await
}
}
} }
async fn backup_directory<P: AsRef<Path>>( async fn backup_directory<P: AsRef<Path>>(
@ -251,13 +265,12 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
record_repository(&repo); record_repository(&repo);
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> { let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?; let item = GroupListItem::deserialize(record)?;
let group = BackupGroup::new(item.backup.ty, item.backup.id); Ok(item.backup.to_string())
Ok(group.to_string())
}; };
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> { let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?; let item = GroupListItem::deserialize(record)?;
let snapshot = BackupDir { let snapshot = BackupDir {
group: item.backup, group: item.backup,
time: item.last_backup, time: item.last_backup,
@ -266,7 +279,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
}; };
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> { let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?; let item = GroupListItem::deserialize(record)?;
Ok(pbs_tools::format::render_backup_file_list(&item.files)) Ok(pbs_tools::format::render_backup_file_list(&item.files))
}; };
@ -560,6 +573,10 @@ fn spawn_catalog_upload(
optional: true, optional: true,
default: false, default: false,
}, },
"backup-ns": {
schema: BACKUP_NAMESPACE_SCHEMA,
optional: true,
},
"backup-type": { "backup-type": {
schema: BACKUP_TYPE_SCHEMA, schema: BACKUP_TYPE_SCHEMA,
optional: true, optional: true,
@ -653,6 +670,14 @@ async fn create_backup(
.as_str() .as_str()
.unwrap_or(proxmox_sys::nodename()); .unwrap_or(proxmox_sys::nodename());
let backup_namespace: BackupNamespace = match param.get("backup-ns") {
Some(ns) => ns
.as_str()
.ok_or_else(|| format_err!("bad namespace {:?}", ns))?
.parse()?,
None => BackupNamespace::root(),
};
let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?; let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
let include_dev = param["include-dev"].as_array(); let include_dev = param["include-dev"].as_array();
@ -775,12 +800,13 @@ async fn create_backup(
let client = connect_rate_limited(&repo, rate_limit)?; let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo); record_repository(&repo);
println!( let snapshot = BackupDir::from((
"Starting backup: {}/{}/{}", backup_namespace,
backup_type, backup_type,
backup_id, backup_id.to_owned(),
pbs_datastore::BackupDir::backup_time_to_string(backup_time)? backup_time,
); ));
println!("Starting backup: {snapshot}");
println!("Client name: {}", proxmox_sys::nodename()); println!("Client name: {}", proxmox_sys::nodename());
@ -827,9 +853,7 @@ async fn create_backup(
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
backup_type, &snapshot,
backup_id,
backup_time,
verbose, verbose,
false, false,
) )
@ -873,7 +897,6 @@ async fn create_backup(
None None
}; };
let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
let mut manifest = BackupManifest::new(snapshot); let mut manifest = BackupManifest::new(snapshot);
let mut catalog = None; let mut catalog = None;
@ -1182,13 +1205,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
let path = json::required_string_param(&param, "snapshot")?; let path = json::required_string_param(&param, "snapshot")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let group: BackupGroup = path.parse()?;
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group.ty, snapshot.group.id, snapshot.time)
};
let target = json::required_string_param(&param, "target")?; let target = json::required_string_param(&param, "target")?;
let target = if target == "-" { None } else { Some(target) }; let target = if target == "-" { None } else { Some(target) };
@ -1211,9 +1228,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
backup_type, &backup_dir,
&backup_id,
backup_time,
true, true,
) )
.await?; .await?;

View File

@ -18,7 +18,6 @@ use proxmox_schema::*;
use proxmox_sys::fd::Fd; use proxmox_sys::fd::Fd;
use proxmox_sys::sortable; use proxmox_sys::sortable;
use pbs_api_types::{BackupDir, BackupGroup};
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_config::key_config::load_and_decrypt_key; use pbs_config::key_config::load_and_decrypt_key;
@ -29,8 +28,8 @@ use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use crate::{ use crate::{
api_datastore_latest_snapshot, complete_group_or_snapshot, complete_img_archive_name, complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name,
complete_pxar_archive_name, complete_repository, connect, extract_repository_from_value, complete_repository, connect, dir_or_last_from_group, extract_repository_from_value,
record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA, record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
}; };
@ -199,13 +198,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
record_repository(&repo); record_repository(&repo);
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 { let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let group: BackupGroup = path.parse()?;
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group.ty, snapshot.group.id, snapshot.time)
};
let keyfile = param["keyfile"].as_str().map(PathBuf::from); let keyfile = param["keyfile"].as_str().map(PathBuf::from);
let crypt_config = match keyfile { let crypt_config = match keyfile {
@ -236,9 +229,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
backup_type, &backup_dir,
&backup_id,
backup_time,
true, true,
) )
.await?; .await?;

View File

@ -102,16 +102,8 @@ async fn list_files(
driver: Option<BlockDriverType>, driver: Option<BlockDriverType>,
) -> Result<Vec<ArchiveEntry>, Error> { ) -> Result<Vec<ArchiveEntry>, Error> {
let client = connect(&repo)?; let client = connect(&repo)?;
let client = BackupReader::start( let client =
client, BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
crypt_config.clone(),
repo.store(),
snapshot.group.ty,
&snapshot.group.id,
snapshot.time,
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -409,16 +401,8 @@ async fn extract(
}; };
let client = connect(&repo)?; let client = connect(&repo)?;
let client = BackupReader::start( let client =
client, BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
crypt_config.clone(),
repo.store(),
snapshot.group.ty,
&snapshot.group.id,
snapshot.time,
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
match path { match path {

View File

@ -10,6 +10,7 @@ use anyhow::{bail, format_err, Error};
use futures::*; use futures::*;
use hyper::http::request::Parts; use hyper::http::request::Parts;
use hyper::{header, Body, Response, StatusCode}; use hyper::{header, Body, Response, StatusCode};
use serde::Deserialize;
use serde_json::{json, Value}; use serde_json::{json, Value};
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
@ -31,12 +32,13 @@ use pxar::accessor::aio::Accessor;
use pxar::EntryKind; use pxar::EntryKind;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
}; };
use pbs_client::pxar::{create_tar, create_zip}; use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -54,7 +56,7 @@ use pbs_datastore::{
check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader, check_backup_owner, task_tracking, BackupDir, BackupGroup, DataStore, LocalChunkReader,
StoreProgress, CATALOG_NAME, StoreProgress, CATALOG_NAME,
}; };
use pbs_tools::json::{required_integer_param, required_string_param}; use pbs_tools::json::required_string_param;
use proxmox_rest_server::{formatter, WorkerTask}; use proxmox_rest_server::{formatter, WorkerTask};
use crate::api2::node::rrd::create_value_from_rrd; use crate::api2::node::rrd::create_value_from_rrd;
@ -168,7 +170,7 @@ pub fn list_groups(
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
datastore datastore
.iter_backup_groups()? .iter_backup_groups(Default::default())? // FIXME: Namespaces and recursion parameters!
.try_fold(Vec::new(), |mut group_info, group| { .try_fold(Vec::new(), |mut group_info, group| {
let group = group?; let group = group?;
let owner = match datastore.get_owner(group.as_ref()) { let owner = match datastore.get_owner(group.as_ref()) {
@ -224,8 +226,10 @@ pub fn list_groups(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, group: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupGroup,
flatten: true,
},
}, },
}, },
access: { access: {
@ -238,14 +242,12 @@ pub fn list_groups(
/// Delete backup group including all snapshots. /// Delete backup group including all snapshots.
pub fn delete_group( pub fn delete_group(
store: String, store: String,
backup_type: BackupType, group: pbs_api_types::BackupGroup,
backup_id: String,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?; check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
@ -261,9 +263,10 @@ pub fn delete_group(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_dir: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupDir,
"backup-time": { schema: BACKUP_TIME_SCHEMA }, flatten: true,
},
}, },
}, },
returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE, returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
@ -277,16 +280,14 @@ pub fn delete_group(
/// List snapshot files. /// List snapshot files.
pub fn list_snapshot_files( pub fn list_snapshot_files(
store: String, store: String,
backup_type: BackupType, backup_dir: pbs_api_types::BackupDir,
backup_id: String,
backup_time: i64,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> { ) -> Result<Vec<BackupContent>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let snapshot = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -306,9 +307,10 @@ pub fn list_snapshot_files(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_dir: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupDir,
"backup-time": { schema: BACKUP_TIME_SCHEMA }, flatten: true,
},
}, },
}, },
access: { access: {
@ -321,16 +323,14 @@ pub fn list_snapshot_files(
/// Delete backup snapshot. /// Delete backup snapshot.
pub fn delete_snapshot( pub fn delete_snapshot(
store: String, store: String,
backup_type: BackupType, backup_dir: pbs_api_types::BackupDir,
backup_id: String,
backup_time: i64,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let snapshot = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let snapshot = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -349,6 +349,10 @@ pub fn delete_snapshot(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
"backup-type": { "backup-type": {
optional: true, optional: true,
type: BackupType, type: BackupType,
@ -370,6 +374,7 @@ pub fn delete_snapshot(
/// List backup snapshots. /// List backup snapshots.
pub fn list_snapshots( pub fn list_snapshots(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_type: Option<BackupType>, backup_type: Option<BackupType>,
backup_id: Option<String>, backup_id: Option<String>,
_param: Value, _param: Value,
@ -384,21 +389,26 @@ pub fn list_snapshots(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_ns = backup_ns.unwrap_or_default();
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into // FIXME: filter also owner before collecting, for doing that nicely the owner should move into
// backup group and provide an error free (Err -> None) accessor // backup group and provide an error free (Err -> None) accessor
let groups = match (backup_type, backup_id) { let groups = match (backup_type, backup_id) {
(Some(backup_type), Some(backup_id)) => { (Some(backup_type), Some(backup_id)) => {
vec![datastore.backup_group_from_parts(backup_type, backup_id)] vec![datastore.backup_group_from_parts(backup_ns, backup_type, backup_id)]
} }
// FIXME: Recursion
(Some(backup_type), None) => datastore (Some(backup_type), None) => datastore
.iter_backup_groups_ok()? .iter_backup_groups_ok(backup_ns)?
.filter(|group| group.backup_type() == backup_type) .filter(|group| group.backup_type() == backup_type)
.collect(), .collect(),
// FIXME: Recursion
(None, Some(backup_id)) => datastore (None, Some(backup_id)) => datastore
.iter_backup_groups_ok()? .iter_backup_groups_ok(backup_ns)?
.filter(|group| group.backup_id() == backup_id) .filter(|group| group.backup_id() == backup_id)
.collect(), .collect(),
_ => datastore.list_backup_groups()?, // FIXME: Recursion
(None, None) => datastore.list_backup_groups(backup_ns)?,
}; };
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| { let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
@ -506,7 +516,7 @@ fn get_snapshots_count(
filter_owner: Option<&Authid>, filter_owner: Option<&Authid>,
) -> Result<Counts, Error> { ) -> Result<Counts, Error> {
store store
.iter_backup_groups_ok()? .iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
.filter(|group| { .filter(|group| {
let owner = match store.get_owner(group.as_ref()) { let owner = match store.get_owner(group.as_ref()) {
Ok(owner) => owner, Ok(owner) => owner,
@ -606,6 +616,10 @@ pub fn status(
store: { store: {
schema: DATASTORE_SCHEMA, schema: DATASTORE_SCHEMA,
}, },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
"backup-type": { "backup-type": {
type: BackupType, type: BackupType,
optional: true, optional: true,
@ -641,6 +655,7 @@ pub fn status(
/// or all backups in the datastore. /// or all backups in the datastore.
pub fn verify( pub fn verify(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_type: Option<BackupType>, backup_type: Option<BackupType>,
backup_id: Option<String>, backup_id: Option<String>,
backup_time: Option<i64>, backup_time: Option<i64>,
@ -658,13 +673,22 @@ pub fn verify(
let mut backup_group = None; let mut backup_group = None;
let mut worker_type = "verify"; let mut worker_type = "verify";
// FIXME: Recursion
// FIXME: Namespaces and worker ID, could this be an issue?
let backup_ns = backup_ns.unwrap_or_default();
match (backup_type, backup_id, backup_time) { match (backup_type, backup_id, backup_time) {
(Some(backup_type), Some(backup_id), Some(backup_time)) => { (Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!( worker_id = format!(
"{}:{}/{}/{:08X}", "{}:{}/{}/{}/{:08X}",
store, backup_type, backup_id, backup_time store,
backup_ns.display_as_path(),
backup_type,
backup_id,
backup_time
); );
let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let dir =
datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?; check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
@ -672,8 +696,14 @@ pub fn verify(
worker_type = "verify_snapshot"; worker_type = "verify_snapshot";
} }
(Some(backup_type), Some(backup_id), None) => { (Some(backup_type), Some(backup_id), None) => {
worker_id = format!("{}:{}/{}", store, backup_type, backup_id); worker_id = format!(
let group = pbs_api_types::BackupGroup::from((backup_type, backup_id)); "{}:{}/{}/{}",
store,
backup_ns.display_as_path(),
backup_type,
backup_id
);
let group = pbs_api_types::BackupGroup::from((backup_ns, backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?; check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
@ -748,8 +778,10 @@ pub fn verify(
#[api( #[api(
input: { input: {
properties: { properties: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, group: {
"backup-type": { type: BackupType }, type: pbs_api_types::BackupGroup,
flatten: true,
},
"dry-run": { "dry-run": {
optional: true, optional: true,
type: bool, type: bool,
@ -772,8 +804,7 @@ pub fn verify(
)] )]
/// Prune a group on the datastore /// Prune a group on the datastore
pub fn prune( pub fn prune(
backup_id: String, group: pbs_api_types::BackupGroup,
backup_type: BackupType,
dry_run: bool, dry_run: bool,
prune_options: PruneOptions, prune_options: PruneOptions,
store: String, store: String,
@ -784,11 +815,11 @@ pub fn prune(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let group = datastore.backup_group_from_parts(backup_type, &backup_id); let group = datastore.backup_group(group);
check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?; check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id); let worker_id = format!("{}:{}", store, group);
let mut prune_result = Vec::new(); let mut prune_result = Vec::new();
@ -828,10 +859,9 @@ pub fn prune(
); );
task_log!( task_log!(
worker, worker,
"Starting prune on store \"{}\" group \"{}/{}\"", "Starting prune on store \"{}\" group \"{}\"",
store, store,
backup_type, group,
backup_id
); );
} }
@ -1076,11 +1106,7 @@ pub fn download_file(
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name = required_string_param(&param, "file-name")?.to_owned();
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?; let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
let backup_id = required_string_param(&param, "backup-id")?.to_owned();
let backup_time = required_integer_param(&param, "backup-time")?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1159,11 +1185,7 @@ pub fn download_file_decoded(
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name = required_string_param(&param, "file-name")?.to_owned();
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?; let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
let backup_id = required_string_param(&param, "backup-id")?.to_owned();
let backup_time = required_integer_param(&param, "backup-time")?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1285,11 +1307,7 @@ pub fn upload_backup_log(
let file_name = CLIENT_LOG_BLOB_NAME; let file_name = CLIENT_LOG_BLOB_NAME;
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?; let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
let backup_id = required_string_param(&param, "backup-id")?;
let backup_time = required_integer_param(&param, "backup-time")?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let owner = datastore.get_owner(backup_dir.as_ref())?; let owner = datastore.get_owner(backup_dir.as_ref())?;
@ -1303,14 +1321,7 @@ pub fn upload_backup_log(
bail!("backup already contains a log."); bail!("backup already contains a log.");
} }
println!( println!("Upload backup log to {store}/{backup_dir}/{file_name}");
"Upload backup log to {}/{}/{}/{}/{}",
store,
backup_type,
backup_id,
backup_dir.backup_time_string(),
file_name
);
let data = req_body let data = req_body
.map_err(Error::from) .map_err(Error::from)
@ -1335,9 +1346,10 @@ pub fn upload_backup_log(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_dir: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupDir,
"backup-time": { schema: BACKUP_TIME_SCHEMA }, flatten: true,
},
"filepath": { "filepath": {
description: "Base64 encoded path.", description: "Base64 encoded path.",
type: String, type: String,
@ -1351,9 +1363,7 @@ pub fn upload_backup_log(
/// Get the entries of the given path of the catalog /// Get the entries of the given path of the catalog
pub fn catalog( pub fn catalog(
store: String, store: String,
backup_type: BackupType, backup_dir: pbs_api_types::BackupDir,
backup_id: String,
backup_time: i64,
filepath: String, filepath: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ArchiveEntry>, Error> { ) -> Result<Vec<ArchiveEntry>, Error> {
@ -1361,7 +1371,7 @@ pub fn catalog(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1438,13 +1448,9 @@ pub fn pxar_file_download(
let filepath = required_string_param(&param, "filepath")?.to_owned(); let filepath = required_string_param(&param, "filepath")?.to_owned();
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
let backup_id = required_string_param(&param, "backup-id")?;
let backup_time = required_integer_param(&param, "backup-time")?;
let tar = param["tar"].as_bool().unwrap_or(false); let tar = param["tar"].as_bool().unwrap_or(false);
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let backup_dir = datastore.backup_dir(Deserialize::deserialize(&param)?)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1617,8 +1623,10 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_group: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupGroup,
flatten: true,
},
}, },
}, },
access: { access: {
@ -1628,14 +1636,12 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
/// Get "notes" for a backup group /// Get "notes" for a backup group
pub fn get_group_notes( pub fn get_group_notes(
store: String, store: String,
backup_type: BackupType, backup_group: pbs_api_types::BackupGroup,
backup_id: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?; check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
@ -1647,8 +1653,10 @@ pub fn get_group_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_group: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupGroup,
flatten: true,
},
notes: { notes: {
description: "A multiline text.", description: "A multiline text.",
}, },
@ -1663,15 +1671,13 @@ pub fn get_group_notes(
/// Set "notes" for a backup group /// Set "notes" for a backup group
pub fn set_group_notes( pub fn set_group_notes(
store: String, store: String,
backup_type: BackupType, backup_group: pbs_api_types::BackupGroup,
backup_id: String,
notes: String, notes: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?; check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
@ -1685,9 +1691,10 @@ pub fn set_group_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_dir: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupDir,
"backup-time": { schema: BACKUP_TIME_SCHEMA }, flatten: true,
},
}, },
}, },
access: { access: {
@ -1697,15 +1704,13 @@ pub fn set_group_notes(
/// Get "notes" for a specific backup /// Get "notes" for a specific backup
pub fn get_notes( pub fn get_notes(
store: String, store: String,
backup_type: BackupType, backup_dir: pbs_api_types::BackupDir,
backup_id: String,
backup_time: i64,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1725,9 +1730,10 @@ pub fn get_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_dir: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupDir,
"backup-time": { schema: BACKUP_TIME_SCHEMA }, flatten: true,
},
notes: { notes: {
description: "A multiline text.", description: "A multiline text.",
}, },
@ -1742,16 +1748,14 @@ pub fn get_notes(
/// Set "notes" for a specific backup /// Set "notes" for a specific backup
pub fn set_notes( pub fn set_notes(
store: String, store: String,
backup_type: BackupType, backup_dir: pbs_api_types::BackupDir,
backup_id: String,
backup_time: i64,
notes: String, notes: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1773,9 +1777,10 @@ pub fn set_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_dir: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupDir,
"backup-time": { schema: BACKUP_TIME_SCHEMA }, flatten: true,
},
}, },
}, },
access: { access: {
@ -1785,15 +1790,13 @@ pub fn set_notes(
/// Query protection for a specific backup /// Query protection for a specific backup
pub fn get_protection( pub fn get_protection(
store: String, store: String,
backup_type: BackupType, backup_dir: pbs_api_types::BackupDir,
backup_id: String,
backup_time: i64,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1809,9 +1812,10 @@ pub fn get_protection(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_dir: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupDir,
"backup-time": { schema: BACKUP_TIME_SCHEMA }, flatten: true,
},
protected: { protected: {
description: "Enable/disable protection.", description: "Enable/disable protection.",
}, },
@ -1826,16 +1830,14 @@ pub fn get_protection(
/// En- or disable protection for a specific backup /// En- or disable protection for a specific backup
pub fn set_protection( pub fn set_protection(
store: String, store: String,
backup_type: BackupType, backup_dir: pbs_api_types::BackupDir,
backup_id: String,
backup_time: i64,
protected: bool, protected: bool,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let backup_dir = datastore.backup_dir(backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
@ -1851,8 +1853,10 @@ pub fn set_protection(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-type": { type: BackupType }, backup_group: {
"backup-id": { schema: BACKUP_ID_SCHEMA }, type: pbs_api_types::BackupGroup,
flatten: true,
},
"new-owner": { "new-owner": {
type: Authid, type: Authid,
}, },
@ -1866,14 +1870,13 @@ pub fn set_protection(
/// Change owner of a backup group /// Change owner of a backup group
pub fn set_backup_owner( pub fn set_backup_owner(
store: String, store: String,
backup_type: BackupType, backup_group: pbs_api_types::BackupGroup,
backup_id: String,
new_owner: Authid, new_owner: Authid,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_group = datastore.backup_group_from_parts(backup_type, backup_id); let backup_group = datastore.backup_group(backup_group);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;

View File

@ -6,6 +6,7 @@ use hex::FromHex;
use hyper::header::{HeaderValue, UPGRADE}; use hyper::header::{HeaderValue, UPGRADE};
use hyper::http::request::Parts; use hyper::http::request::Parts;
use hyper::{Body, Request, Response, StatusCode}; use hyper::{Body, Request, Response, StatusCode};
use serde::Deserialize;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::list_subdirs_api_method; use proxmox_router::list_subdirs_api_method;
@ -81,9 +82,7 @@ fn upgrade_to_backup_protocol(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?; let backup_dir_arg = pbs_api_types::BackupDir::deserialize(&param)?;
let backup_id = required_string_param(&param, "backup-id")?;
let backup_time = required_integer_param(&param, "backup-time")?;
let protocols = parts let protocols = parts
.headers .headers
@ -102,13 +101,15 @@ fn upgrade_to_backup_protocol(
); );
} }
let worker_id = format!("{}:{}/{}", store, backup_type, backup_id); let worker_id = format!("{}:{}/{}", store, backup_dir_arg.ty(), backup_dir_arg.id());
let env_type = rpcenv.env_type(); let env_type = rpcenv.env_type();
let backup_group = datastore.backup_group_from_parts(backup_type, backup_id); let backup_group = datastore.backup_group(backup_dir_arg.group.clone());
let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" { let worker_type = if backup_group.backup_type() == BackupType::Host
&& backup_group.backup_id() == "benchmark"
{
if !benchmark { if !benchmark {
bail!("unable to run benchmark without --benchmark flags"); bail!("unable to run benchmark without --benchmark flags");
} }
@ -152,7 +153,7 @@ fn upgrade_to_backup_protocol(
} }
}; };
let backup_dir = backup_group.backup_dir(backup_time)?; let backup_dir = backup_group.backup_dir(backup_dir_arg.time)?;
let _last_guard = if let Some(last) = &last_backup { let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() { if backup_dir.backup_time() <= last.backup_dir.backup_time() {

View File

@ -6,6 +6,7 @@ use hex::FromHex;
use hyper::header::{self, HeaderValue, UPGRADE}; use hyper::header::{self, HeaderValue, UPGRADE};
use hyper::http::request::Parts; use hyper::http::request::Parts;
use hyper::{Body, Request, Response, StatusCode}; use hyper::{Body, Request, Response, StatusCode};
use serde::Deserialize;
use serde_json::Value; use serde_json::Value;
use proxmox_router::{ use proxmox_router::{
@ -16,15 +17,15 @@ use proxmox_schema::{BooleanSchema, ObjectSchema};
use proxmox_sys::sortable; use proxmox_sys::sortable;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType}; use pbs_datastore::manifest::{archive_type, ArchiveType};
use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1}; use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
use pbs_tools::json::{required_integer_param, required_string_param}; use pbs_tools::json::required_string_param;
use proxmox_rest_server::{H2Service, WorkerTask}; use proxmox_rest_server::{H2Service, WorkerTask};
use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_sys::fs::lock_dir_noblock_shared;
@ -89,9 +90,7 @@ fn upgrade_to_backup_reader_protocol(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?; let backup_dir = pbs_api_types::BackupDir::deserialize(&param)?;
let backup_id = required_string_param(&param, "backup-id")?;
let backup_time = required_integer_param(&param, "backup-time")?;
let protocols = parts let protocols = parts
.headers .headers
@ -112,7 +111,7 @@ fn upgrade_to_backup_reader_protocol(
let env_type = rpcenv.env_type(); let env_type = rpcenv.env_type();
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?; let backup_dir = datastore.backup_dir(backup_dir)?;
if !priv_read { if !priv_read {
let owner = datastore.get_owner(backup_dir.as_ref())?; let owner = datastore.get_owner(backup_dir.as_ref())?;
let correct_owner = owner == auth_id let correct_owner = owner == auth_id
@ -135,9 +134,9 @@ fn upgrade_to_backup_reader_protocol(
let worker_id = format!( let worker_id = format!(
"{}:{}/{}/{:08X}", "{}:{}/{}/{:08X}",
store, store,
backup_type, backup_dir.backup_type(),
backup_id, backup_dir.backup_id(),
backup_dir.backup_time() backup_dir.backup_time(),
); );
WorkerTask::spawn( WorkerTask::spawn(

View File

@ -408,7 +408,8 @@ fn backup_worker(
let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?; let mut pool_writer = PoolWriter::new(pool, &setup.drive, worker, email, force_media_set)?;
let mut group_list = datastore.list_backup_groups()?; // FIXME: Namespaces! Probably just recurse for now? Not sure about the usage here...
let mut group_list = datastore.list_backup_groups(Default::default())?;
group_list.sort_unstable_by(|a, b| a.group().cmp(b.group())); group_list.sort_unstable_by(|a, b| a.group().cmp(b.group()));

View File

@ -533,7 +533,11 @@ pub fn verify_all_backups(
} }
}; };
let mut list = match verify_worker.datastore.iter_backup_groups_ok() { // FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
let mut list = match verify_worker
.datastore
.iter_backup_groups_ok(Default::default())
{
Ok(list) => list Ok(list) => list
.filter(|group| { .filter(|group| {
!(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark") !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")

View File

@ -42,7 +42,8 @@ pub fn prune_datastore(
let privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
let has_privs = privs & PRIV_DATASTORE_MODIFY != 0; let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
for group in datastore.iter_backup_groups()? { // FIXME: Namespaces and recursion!
for group in datastore.iter_backup_groups(Default::default())? {
let group = group?; let group = group?;
let list = group.list_backups()?; let list = group.list_backups()?;

View File

@ -651,13 +651,11 @@ async fn pull_group(
continue; continue;
} }
let backup_time = snapshot.time; remote_snapshots.insert(snapshot.time);
remote_snapshots.insert(backup_time);
if let Some(last_sync_time) = last_sync { if let Some(last_sync_time) = last_sync {
if last_sync_time > backup_time { if last_sync_time > snapshot.time {
skip_info.update(backup_time); skip_info.update(snapshot.time);
continue; continue;
} }
} }
@ -676,16 +674,8 @@ async fn pull_group(
options, options,
)?; )?;
let reader = BackupReader::start( let reader =
new_client, BackupReader::start(new_client, None, params.source.store(), &snapshot, true).await?;
None,
params.source.store(),
snapshot.group.ty,
&snapshot.group.id,
backup_time,
true,
)
.await?;
let result = pull_snapshot_from( let result = pull_snapshot_from(
worker, worker,
@ -757,6 +747,8 @@ pub async fn pull_store(
// explicit create shared lock to prevent GC on newly created chunks // explicit create shared lock to prevent GC on newly created chunks
let _shared_store_lock = params.store.try_shared_chunk_store_lock()?; let _shared_store_lock = params.store.try_shared_chunk_store_lock()?;
// FIXME: Namespaces! AND: If we make this API call recurse down namespaces we need to do the
// same down in the `remove_vanished` case!
let path = format!("api2/json/admin/datastore/{}/groups", params.source.store()); let path = format!("api2/json/admin/datastore/{}/groups", params.source.store());
let mut result = client let mut result = client
@ -850,7 +842,8 @@ pub async fn pull_store(
if params.remove_vanished { if params.remove_vanished {
let result: Result<(), Error> = proxmox_lang::try_block!({ let result: Result<(), Error> = proxmox_lang::try_block!({
for local_group in params.store.iter_backup_groups()? { // FIXME: See above comment about namespaces & recursion
for local_group in params.store.iter_backup_groups(Default::default())? {
let local_group = local_group?; let local_group = local_group?;
if new_groups.contains(local_group.as_ref()) { if new_groups.contains(local_group.as_ref()) {
continue; continue;