split the namespace out of BackupGroup/Dir api types

We decided to go this route because it'll most likely be
safer in the API as we need to explicitly add namespaces
support to the various API endpoints this way.

For example, 'pull' should have 2 namespaces: local and
remote, and the GroupFilter (which would otherwise contain
exactly *one* namespace parameter) needs to be applied for
both sides (to decide what to pull from the remote, and what
to *remove* locally as cleanup).

The *datastore* types still contain the namespace and have a
`.backup_ns()` getter.

Note that the datastore's `Display` implementations are no
longer safe to use as a deserializable string.

Additionally, some datastore based methods now have been
exposed via the BackupGroup/BackupDir types to avoid a
"round trip" in code.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Wolfgang Bumiller 2022-05-09 15:39:29 +02:00 committed by Thomas Lamprecht
parent 1baf9030ad
commit 133d718fe4
25 changed files with 800 additions and 509 deletions

View File

@ -37,13 +37,8 @@ async fn run() -> Result<(), Error> {
client, client,
None, None,
"store2", "store2",
&( &BackupNamespace::root(),
BackupNamespace::root(), &(BackupType::Host, "elsa".to_string(), backup_time).into(),
BackupType::Host,
"elsa".to_string(),
backup_time,
)
.into(),
true, true,
) )
.await?; .await?;

View File

@ -21,13 +21,8 @@ async fn upload_speed() -> Result<f64, Error> {
client, client,
None, None,
datastore, datastore,
&( &BackupNamespace::root(),
BackupNamespace::root(), &(BackupType::Host, "speedtest".to_string(), backup_time).into(),
BackupType::Host,
"speedtest".to_string(),
backup_time,
)
.into(),
false, false,
true, true,
) )

View File

@ -25,8 +25,7 @@ const_regex! {
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$"); pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
pub GROUP_PATH_REGEX = concat!( pub GROUP_PATH_REGEX = concat!(
r"^(", BACKUP_NS_PATH_RE!(), r"/)?", r"^(", BACKUP_TYPE_RE!(), ")/",
r"(", BACKUP_TYPE_RE!(), ")/",
r"(", BACKUP_ID_RE!(), r")$", r"(", BACKUP_ID_RE!(), r")$",
); );
@ -848,7 +847,6 @@ impl std::cmp::PartialOrd for BackupType {
#[api( #[api(
properties: { properties: {
"backup-ns": { type: BackupNamespace, optional: true },
"backup-type": { type: BackupType }, "backup-type": { type: BackupType },
"backup-id": { schema: BACKUP_ID_SCHEMA }, "backup-id": { schema: BACKUP_ID_SCHEMA },
}, },
@ -857,14 +855,6 @@ impl std::cmp::PartialOrd for BackupType {
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
/// A backup group (without a data store). /// A backup group (without a data store).
pub struct BackupGroup { pub struct BackupGroup {
/// An optional namespace this backup belongs to.
#[serde(
rename = "backup-ns",
skip_serializing_if = "BackupNamespace::is_root",
default
)]
pub ns: BackupNamespace,
/// Backup type. /// Backup type.
#[serde(rename = "backup-type")] #[serde(rename = "backup-type")]
pub ty: BackupType, pub ty: BackupType,
@ -875,12 +865,8 @@ pub struct BackupGroup {
} }
impl BackupGroup { impl BackupGroup {
pub fn new<T: Into<String>>(ns: BackupNamespace, ty: BackupType, id: T) -> Self { pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
Self { Self { ty, id: id.into() }
ns,
ty,
id: id.into(),
}
} }
pub fn matches(&self, filter: &crate::GroupFilter) -> bool { pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
@ -906,24 +892,18 @@ impl AsRef<BackupGroup> for BackupGroup {
} }
} }
impl From<(BackupNamespace, BackupType, String)> for BackupGroup { impl From<(BackupType, String)> for BackupGroup {
#[inline] #[inline]
fn from(data: (BackupNamespace, BackupType, String)) -> Self { fn from(data: (BackupType, String)) -> Self {
Self { Self {
ns: data.0, ty: data.0,
ty: data.1, id: data.1,
id: data.2,
} }
} }
} }
impl std::cmp::Ord for BackupGroup { impl std::cmp::Ord for BackupGroup {
fn cmp(&self, other: &Self) -> std::cmp::Ordering { fn cmp(&self, other: &Self) -> std::cmp::Ordering {
let ns_order = self.ns.cmp(&other.ns);
if ns_order != std::cmp::Ordering::Equal {
return ns_order;
}
let type_order = self.ty.cmp(&other.ty); let type_order = self.ty.cmp(&other.ty);
if type_order != std::cmp::Ordering::Equal { if type_order != std::cmp::Ordering::Equal {
return type_order; return type_order;
@ -949,11 +929,7 @@ impl std::cmp::PartialOrd for BackupGroup {
impl fmt::Display for BackupGroup { impl fmt::Display for BackupGroup {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.ns.is_root() {
write!(f, "{}/{}", self.ty, self.id) write!(f, "{}/{}", self.ty, self.id)
} else {
write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id)
}
} }
} }
@ -969,9 +945,8 @@ impl std::str::FromStr for BackupGroup {
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?; .ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
Ok(Self { Ok(Self {
ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?, ty: cap.get(1).unwrap().as_str().parse()?,
ty: cap.get(2).unwrap().as_str().parse()?, id: cap.get(2).unwrap().as_str().to_owned(),
id: cap.get(3).unwrap().as_str().to_owned(),
}) })
} }
} }
@ -1020,27 +995,22 @@ impl From<(BackupGroup, i64)> for BackupDir {
} }
} }
impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir { impl From<(BackupType, String, i64)> for BackupDir {
fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self { fn from(data: (BackupType, String, i64)) -> Self {
Self { Self {
group: (data.0, data.1, data.2).into(), group: (data.0, data.1).into(),
time: data.3, time: data.2,
} }
} }
} }
impl BackupDir { impl BackupDir {
pub fn with_rfc3339<T>( pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
ns: BackupNamespace,
ty: BackupType,
id: T,
backup_time_string: &str,
) -> Result<Self, Error>
where where
T: Into<String>, T: Into<String>,
{ {
let time = proxmox_time::parse_rfc3339(&backup_time_string)?; let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
let group = BackupGroup::new(ns, ty, id.into()); let group = BackupGroup::new(ty, id.into());
Ok(Self { group, time }) Ok(Self { group, time })
} }
@ -1053,11 +1023,6 @@ impl BackupDir {
pub fn id(&self) -> &str { pub fn id(&self) -> &str {
&self.group.id &self.group.id
} }
#[inline]
pub fn ns(&self) -> &BackupNamespace {
&self.group.ns
}
} }
impl std::str::FromStr for BackupDir { impl std::str::FromStr for BackupDir {
@ -1071,15 +1036,10 @@ impl std::str::FromStr for BackupDir {
.captures(path) .captures(path)
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
let ns = match cap.get(1) {
Some(cap) => BackupNamespace::from_path(cap.as_str())?,
None => BackupNamespace::root(),
};
BackupDir::with_rfc3339( BackupDir::with_rfc3339(
ns, cap.get(1).unwrap().as_str().parse()?,
cap.get(2).unwrap().as_str().parse()?, cap.get(2).unwrap().as_str(),
cap.get(3).unwrap().as_str(), cap.get(3).unwrap().as_str(),
cap.get(4).unwrap().as_str(),
) )
} }
} }
@ -1107,16 +1067,12 @@ impl std::str::FromStr for BackupPart {
.captures(path) .captures(path)
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?; .ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
let ns = match cap.get(1) { let ty = cap.get(1).unwrap().as_str().parse()?;
Some(cap) => BackupNamespace::from_path(cap.as_str())?, let id = cap.get(2).unwrap().as_str().to_string();
None => BackupNamespace::root(),
};
let ty = cap.get(2).unwrap().as_str().parse()?;
let id = cap.get(3).unwrap().as_str().to_string();
Ok(match cap.get(4) { Ok(match cap.get(3) {
Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?), Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ty, id, time.as_str())?),
None => BackupPart::Group((ns, ty, id).into()), None => BackupPart::Group((ty, id).into()),
}) })
} }
} }

View File

@ -34,20 +34,11 @@ macro_rules! BACKUP_NS_RE {
); );
} }
#[rustfmt::skip]
#[macro_export]
macro_rules! BACKUP_NS_PATH_RE {
() => (
concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!())
);
}
#[rustfmt::skip] #[rustfmt::skip]
#[macro_export] #[macro_export]
macro_rules! SNAPSHOT_PATH_REGEX_STR { macro_rules! SNAPSHOT_PATH_REGEX_STR {
() => ( () => (
concat!( concat!(
r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?",
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")", r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
) )
); );
@ -58,7 +49,6 @@ macro_rules! SNAPSHOT_PATH_REGEX_STR {
macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR { macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
() => { () => {
concat!( concat!(
r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?",
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?", r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?",
) )
}; };

View File

@ -7,7 +7,7 @@ use std::sync::Arc;
use futures::future::AbortHandle; use futures::future::AbortHandle;
use serde_json::{json, Value}; use serde_json::{json, Value};
use pbs_api_types::BackupDir; use pbs_api_types::{BackupDir, BackupNamespace};
use pbs_datastore::data_blob::DataBlob; use pbs_datastore::data_blob::DataBlob;
use pbs_datastore::data_blob_reader::DataBlobReader; use pbs_datastore::data_blob_reader::DataBlobReader;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
@ -47,6 +47,7 @@ impl BackupReader {
client: HttpClient, client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
datastore: &str, datastore: &str,
ns: &BackupNamespace,
backup: &BackupDir, backup: &BackupDir,
debug: bool, debug: bool,
) -> Result<Arc<BackupReader>, Error> { ) -> Result<Arc<BackupReader>, Error> {
@ -58,7 +59,6 @@ impl BackupReader {
"debug": debug, "debug": debug,
}); });
let ns = backup.ns();
if !ns.is_root() { if !ns.is_root() {
param["backup-ns"] = serde_json::to_value(ns)?; param["backup-ns"] = serde_json::to_value(ns)?;
} }

View File

@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt;
use tokio::sync::{mpsc, oneshot}; use tokio::sync::{mpsc, oneshot};
use tokio_stream::wrappers::ReceiverStream; use tokio_stream::wrappers::ReceiverStream;
use pbs_api_types::{BackupDir, HumanByte}; use pbs_api_types::{BackupDir, BackupNamespace, HumanByte};
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder}; use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader; use pbs_datastore::fixed_index::FixedIndexReader;
@ -86,6 +86,7 @@ impl BackupWriter {
client: HttpClient, client: HttpClient,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
datastore: &str, datastore: &str,
ns: &BackupNamespace,
backup: &BackupDir, backup: &BackupDir,
debug: bool, debug: bool,
benchmark: bool, benchmark: bool,
@ -99,7 +100,6 @@ impl BackupWriter {
"benchmark": benchmark "benchmark": benchmark
}); });
let ns = backup.ns();
if !ns.is_root() { if !ns.is_root() {
param["backup-ns"] = serde_json::to_value(ns)?; param["backup-ns"] = serde_json::to_value(ns)?;
} }

View File

@ -292,8 +292,16 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
_ => return result, _ => return result,
}; };
let ns: pbs_api_types::BackupNamespace = match param.get("ns") {
Some(ns) => match ns.parse() {
Ok(v) => v,
_ => return result,
},
_ => return result,
};
let query = json_object_to_query(json!({ let query = json_object_to_query(json!({
"backup-ns": snapshot.group.ns, "backup-ns": ns,
"backup-type": snapshot.group.ty, "backup-type": snapshot.group.ty,
"backup-id": snapshot.group.id, "backup-id": snapshot.group.id,
"backup-time": snapshot.time, "backup-time": snapshot.time,

View File

@ -1,3 +1,4 @@
use std::convert::TryFrom;
use std::fmt; use std::fmt;
use std::os::unix::io::RawFd; use std::os::unix::io::RawFd;
use std::path::PathBuf; use std::path::PathBuf;
@ -8,11 +9,11 @@ use anyhow::{bail, format_err, Error};
use proxmox_sys::fs::lock_dir_noblock; use proxmox_sys::fs::lock_dir_noblock;
use pbs_api_types::{ use pbs_api_types::{
BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX, Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX,
}; };
use pbs_config::{open_backup_lockfile, BackupLockGuard}; use pbs_config::{open_backup_lockfile, BackupLockGuard};
use crate::manifest::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME}; use crate::manifest::{BackupManifest, MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME};
use crate::{DataBlob, DataStore}; use crate::{DataBlob, DataStore};
/// BackupGroup is a directory containing a list of BackupDir /// BackupGroup is a directory containing a list of BackupDir
@ -20,6 +21,7 @@ use crate::{DataBlob, DataStore};
pub struct BackupGroup { pub struct BackupGroup {
store: Arc<DataStore>, store: Arc<DataStore>,
ns: BackupNamespace,
group: pbs_api_types::BackupGroup, group: pbs_api_types::BackupGroup,
} }
@ -33,8 +35,12 @@ impl fmt::Debug for BackupGroup {
} }
impl BackupGroup { impl BackupGroup {
pub(crate) fn new(store: Arc<DataStore>, group: pbs_api_types::BackupGroup) -> Self { pub(crate) fn new(
Self { store, group } store: Arc<DataStore>,
ns: BackupNamespace,
group: pbs_api_types::BackupGroup,
) -> Self {
Self { store, ns, group }
} }
/// Access the underlying [`BackupGroup`](pbs_api_types::BackupGroup). /// Access the underlying [`BackupGroup`](pbs_api_types::BackupGroup).
@ -45,7 +51,7 @@ impl BackupGroup {
#[inline] #[inline]
pub fn backup_ns(&self) -> &BackupNamespace { pub fn backup_ns(&self) -> &BackupNamespace {
&self.group.ns &self.ns
} }
#[inline] #[inline]
@ -59,11 +65,14 @@ impl BackupGroup {
} }
pub fn full_group_path(&self) -> PathBuf { pub fn full_group_path(&self) -> PathBuf {
self.store.base_path().join(self.group.to_string()) self.store.group_path(&self.ns, &self.group)
} }
pub fn relative_group_path(&self) -> PathBuf { pub fn relative_group_path(&self) -> PathBuf {
self.group.to_string().into() let mut path = self.store.namespace_path(&self.ns);
path.push(self.group.ty.as_str());
path.push(&self.group.id);
path
} }
pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> { pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> {
@ -205,6 +214,26 @@ impl BackupGroup {
Ok(removed_all_snaps) Ok(removed_all_snaps)
} }
/// Returns the backup owner.
///
/// The backup owner is the entity who first created the backup group.
pub fn get_owner(&self) -> Result<Authid, Error> {
self.store.get_owner(&self.ns, self.as_ref())
}
/// Set the backup owner.
pub fn set_owner(&self, auth_id: &Authid, force: bool) -> Result<(), Error> {
self.store
.set_owner(&self.ns, &self.as_ref(), auth_id, force)
}
}
impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup {
#[inline]
fn as_ref(&self) -> &pbs_api_types::BackupNamespace {
&self.ns
}
} }
impl AsRef<pbs_api_types::BackupGroup> for BackupGroup { impl AsRef<pbs_api_types::BackupGroup> for BackupGroup {
@ -229,7 +258,11 @@ impl From<BackupGroup> for pbs_api_types::BackupGroup {
impl fmt::Display for BackupGroup { impl fmt::Display for BackupGroup {
#[inline] #[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.ns.is_root() {
fmt::Display::fmt(&self.group, f) fmt::Display::fmt(&self.group, f)
} else {
write!(f, "[{}]:{}", self.ns, self.group)
}
} }
} }
@ -237,6 +270,7 @@ impl From<BackupDir> for BackupGroup {
fn from(dir: BackupDir) -> BackupGroup { fn from(dir: BackupDir) -> BackupGroup {
BackupGroup { BackupGroup {
store: dir.store, store: dir.store,
ns: dir.ns,
group: dir.dir.group, group: dir.dir.group,
} }
} }
@ -246,6 +280,7 @@ impl From<&BackupDir> for BackupGroup {
fn from(dir: &BackupDir) -> BackupGroup { fn from(dir: &BackupDir) -> BackupGroup {
BackupGroup { BackupGroup {
store: Arc::clone(&dir.store), store: Arc::clone(&dir.store),
ns: dir.ns.clone(),
group: dir.dir.group.clone(), group: dir.dir.group.clone(),
} }
} }
@ -257,6 +292,7 @@ impl From<&BackupDir> for BackupGroup {
#[derive(Clone)] #[derive(Clone)]
pub struct BackupDir { pub struct BackupDir {
store: Arc<DataStore>, store: Arc<DataStore>,
ns: BackupNamespace,
dir: pbs_api_types::BackupDir, dir: pbs_api_types::BackupDir,
// backup_time as rfc3339 // backup_time as rfc3339
backup_time_string: String, backup_time_string: String,
@ -279,6 +315,7 @@ impl BackupDir {
Self { Self {
store: unsafe { DataStore::new_test() }, store: unsafe { DataStore::new_test() },
backup_time_string: Self::backup_time_to_string(dir.time).unwrap(), backup_time_string: Self::backup_time_to_string(dir.time).unwrap(),
ns: BackupNamespace::root(),
dir, dir,
} }
} }
@ -287,6 +324,7 @@ impl BackupDir {
let backup_time_string = Self::backup_time_to_string(backup_time)?; let backup_time_string = Self::backup_time_to_string(backup_time)?;
Ok(Self { Ok(Self {
store: group.store, store: group.store,
ns: group.ns,
dir: (group.group, backup_time).into(), dir: (group.group, backup_time).into(),
backup_time_string, backup_time_string,
}) })
@ -299,6 +337,7 @@ impl BackupDir {
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?; let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
Ok(Self { Ok(Self {
store: group.store, store: group.store,
ns: group.ns,
dir: (group.group, backup_time).into(), dir: (group.group, backup_time).into(),
backup_time_string, backup_time_string,
}) })
@ -306,7 +345,7 @@ impl BackupDir {
#[inline] #[inline]
pub fn backup_ns(&self) -> &BackupNamespace { pub fn backup_ns(&self) -> &BackupNamespace {
&self.dir.group.ns &self.ns
} }
#[inline] #[inline]
@ -329,20 +368,16 @@ impl BackupDir {
} }
pub fn relative_path(&self) -> PathBuf { pub fn relative_path(&self) -> PathBuf {
format!("{}/{}", self.dir.group, self.backup_time_string).into() let mut path = self.store.namespace_path(&self.ns);
path.push(self.dir.group.ty.as_str());
path.push(&self.dir.group.id);
path.push(&self.backup_time_string);
path
} }
/// Returns the absolute path for backup_dir, using the cached formatted time string. /// Returns the absolute path for backup_dir, using the cached formatted time string.
pub fn full_path(&self) -> PathBuf { pub fn full_path(&self) -> PathBuf {
let mut base_path = self.store.base_path(); self.store.snapshot_path(&self.ns, &self.dir)
for ns in self.dir.group.ns.components() {
base_path.push("ns");
base_path.push(ns);
}
base_path.push(self.dir.group.ty.as_str());
base_path.push(&self.dir.group.id);
base_path.push(&self.backup_time_string);
base_path
} }
pub fn protected_file(&self) -> PathBuf { pub fn protected_file(&self) -> PathBuf {
@ -425,6 +460,46 @@ impl BackupDir {
Ok(()) Ok(())
} }
/// Get the datastore.
pub fn datastore(&self) -> &Arc<DataStore> {
&self.store
}
/// Returns the backup owner.
///
/// The backup owner is the entity who first created the backup group.
pub fn get_owner(&self) -> Result<Authid, Error> {
self.store.get_owner(&self.ns, self.as_ref())
}
/// Lock the snapshot and open a reader.
pub fn locked_reader(&self) -> Result<crate::SnapshotReader, Error> {
crate::SnapshotReader::new_do(self.clone())
}
/// Load the manifest without a lock. Must not be written back.
pub fn load_manifest(&self) -> Result<(BackupManifest, u64), Error> {
let blob = self.load_blob(MANIFEST_BLOB_NAME)?;
let raw_size = blob.raw_size();
let manifest = BackupManifest::try_from(blob)?;
Ok((manifest, raw_size))
}
/// Update the manifest of the specified snapshot. Never write a manifest directly,
/// only use this method - anything else may break locking guarantees.
pub fn update_manifest(
&self,
update_fn: impl FnOnce(&mut BackupManifest),
) -> Result<(), Error> {
self.store.update_manifest(self, update_fn)
}
}
impl AsRef<pbs_api_types::BackupNamespace> for BackupDir {
fn as_ref(&self) -> &pbs_api_types::BackupNamespace {
&self.ns
}
} }
impl AsRef<pbs_api_types::BackupDir> for BackupDir { impl AsRef<pbs_api_types::BackupDir> for BackupDir {
@ -465,7 +540,15 @@ impl From<BackupDir> for pbs_api_types::BackupDir {
impl fmt::Display for BackupDir { impl fmt::Display for BackupDir {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.ns.is_root() {
write!(f, "{}/{}", self.dir.group, self.backup_time_string) write!(f, "{}/{}", self.dir.group, self.backup_time_string)
} else {
write!(
f,
"[{}]:{}/{}",
self.ns, self.dir.group, self.backup_time_string
)
}
} }
} }

View File

@ -1,5 +1,4 @@
use std::collections::{HashMap, HashSet}; use std::collections::{HashMap, HashSet};
use std::convert::TryFrom;
use std::io::{self, Write}; use std::io::{self, Write};
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@ -350,6 +349,7 @@ impl DataStore {
self.inner.chunk_store.base_path() self.inner.chunk_store.base_path()
} }
/// Returns the absolute path for a backup namespace on this datastore
pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf { pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
let mut path = self.base_path(); let mut path = self.base_path();
path.reserve(ns.path_len()); path.reserve(ns.path_len());
@ -409,23 +409,24 @@ impl DataStore {
Ok(()) Ok(())
} }
/// Returns the absolute path for a backup namespace on this datastore
pub fn ns_path(&self, ns: &BackupNamespace) -> PathBuf {
let mut full_path = self.base_path();
full_path.push(ns.path());
full_path
}
/// Returns the absolute path for a backup_group /// Returns the absolute path for a backup_group
pub fn group_path(&self, backup_group: &pbs_api_types::BackupGroup) -> PathBuf { pub fn group_path(
let mut full_path = self.base_path(); &self,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup,
) -> PathBuf {
let mut full_path = self.namespace_path(ns);
full_path.push(backup_group.to_string()); full_path.push(backup_group.to_string());
full_path full_path
} }
/// Returns the absolute path for backup_dir /// Returns the absolute path for backup_dir
pub fn snapshot_path(&self, backup_dir: &pbs_api_types::BackupDir) -> PathBuf { pub fn snapshot_path(
let mut full_path = self.base_path(); &self,
ns: &BackupNamespace,
backup_dir: &pbs_api_types::BackupDir,
) -> PathBuf {
let mut full_path = self.namespace_path(ns);
full_path.push(backup_dir.to_string()); full_path.push(backup_dir.to_string());
full_path full_path
} }
@ -537,9 +538,10 @@ impl DataStore {
/// Returns true if all snapshots were removed, and false if some were protected /// Returns true if all snapshots were removed, and false if some were protected
pub fn remove_backup_group( pub fn remove_backup_group(
self: &Arc<Self>, self: &Arc<Self>,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup, backup_group: &pbs_api_types::BackupGroup,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let backup_group = self.backup_group(backup_group.clone()); let backup_group = self.backup_group(ns.clone(), backup_group.clone());
backup_group.destroy() backup_group.destroy()
} }
@ -547,10 +549,11 @@ impl DataStore {
/// Remove a backup directory including all content /// Remove a backup directory including all content
pub fn remove_backup_dir( pub fn remove_backup_dir(
self: &Arc<Self>, self: &Arc<Self>,
ns: &BackupNamespace,
backup_dir: &pbs_api_types::BackupDir, backup_dir: &pbs_api_types::BackupDir,
force: bool, force: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let backup_dir = self.backup_dir(backup_dir.clone())?; let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
backup_dir.destroy(force) backup_dir.destroy(force)
} }
@ -560,9 +563,10 @@ impl DataStore {
/// Or None if there is no backup in the group (or the group dir does not exist). /// Or None if there is no backup in the group (or the group dir does not exist).
pub fn last_successful_backup( pub fn last_successful_backup(
self: &Arc<Self>, self: &Arc<Self>,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup, backup_group: &pbs_api_types::BackupGroup,
) -> Result<Option<i64>, Error> { ) -> Result<Option<i64>, Error> {
let backup_group = self.backup_group(backup_group.clone()); let backup_group = self.backup_group(ns.clone(), backup_group.clone());
let group_path = backup_group.full_group_path(); let group_path = backup_group.full_group_path();
@ -573,23 +577,31 @@ impl DataStore {
} }
} }
/// Return the path of the 'owner' file.
fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
self.group_path(ns, group).join("owner")
}
/// Returns the backup owner. /// Returns the backup owner.
/// ///
/// The backup owner is the entity who first created the backup group. /// The backup owner is the entity who first created the backup group.
pub fn get_owner(&self, backup_group: &pbs_api_types::BackupGroup) -> Result<Authid, Error> { pub fn get_owner(
let mut full_path = self.base_path(); &self,
full_path.push(backup_group.to_string()); ns: &BackupNamespace,
full_path.push("owner"); backup_group: &pbs_api_types::BackupGroup,
) -> Result<Authid, Error> {
let full_path = self.owner_path(ns, backup_group);
let owner = proxmox_sys::fs::file_read_firstline(full_path)?; let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
owner.trim_end().parse() // remove trailing newline owner.trim_end().parse() // remove trailing newline
} }
pub fn owns_backup( pub fn owns_backup(
&self, &self,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup, backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid, auth_id: &Authid,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let owner = self.get_owner(backup_group)?; let owner = self.get_owner(ns, backup_group)?;
Ok(check_backup_owner(&owner, auth_id).is_ok()) Ok(check_backup_owner(&owner, auth_id).is_ok())
} }
@ -597,13 +609,12 @@ impl DataStore {
/// Set the backup owner. /// Set the backup owner.
pub fn set_owner( pub fn set_owner(
&self, &self,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup, backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid, auth_id: &Authid,
force: bool, force: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut path = self.base_path(); let path = self.owner_path(ns, backup_group);
path.push(backup_group.to_string());
path.push("owner");
let mut open_options = std::fs::OpenOptions::new(); let mut open_options = std::fs::OpenOptions::new();
open_options.write(true); open_options.write(true);
@ -633,12 +644,13 @@ impl DataStore {
/// This also acquires an exclusive lock on the directory and returns the lock guard. /// This also acquires an exclusive lock on the directory and returns the lock guard.
pub fn create_locked_backup_group( pub fn create_locked_backup_group(
&self, &self,
ns: &BackupNamespace,
backup_group: &pbs_api_types::BackupGroup, backup_group: &pbs_api_types::BackupGroup,
auth_id: &Authid, auth_id: &Authid,
) -> Result<(Authid, DirLockGuard), Error> { ) -> Result<(Authid, DirLockGuard), Error> {
// create intermediate path first: // create intermediate path first:
let mut full_path = self.base_path(); let mut full_path = self.base_path();
for ns in backup_group.ns.components() { for ns in ns.components() {
full_path.push("ns"); full_path.push("ns");
full_path.push(ns); full_path.push(ns);
} }
@ -655,8 +667,8 @@ impl DataStore {
"backup group", "backup group",
"another backup is already running", "another backup is already running",
)?; )?;
self.set_owner(backup_group, auth_id, false)?; self.set_owner(ns, backup_group, auth_id, false)?;
let owner = self.get_owner(backup_group)?; // just to be sure let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard)) Ok((owner, guard))
} }
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => { Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
@ -665,7 +677,7 @@ impl DataStore {
"backup group", "backup group",
"another backup is already running", "another backup is already running",
)?; )?;
let owner = self.get_owner(backup_group)?; // just to be sure let owner = self.get_owner(ns, backup_group)?; // just to be sure
Ok((owner, guard)) Ok((owner, guard))
} }
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err), Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
@ -677,11 +689,15 @@ impl DataStore {
/// The BackupGroup directory needs to exist. /// The BackupGroup directory needs to exist.
pub fn create_locked_backup_dir( pub fn create_locked_backup_dir(
&self, &self,
ns: &BackupNamespace,
backup_dir: &pbs_api_types::BackupDir, backup_dir: &pbs_api_types::BackupDir,
) -> Result<(PathBuf, bool, DirLockGuard), Error> { ) -> Result<(PathBuf, bool, DirLockGuard), Error> {
let relative_path = PathBuf::from(backup_dir.to_string()); let full_path = self.snapshot_path(ns, backup_dir);
let mut full_path = self.base_path(); let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
full_path.push(&relative_path); format_err!(
"failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
)
})?;
let lock = || { let lock = || {
lock_dir_noblock( lock_dir_noblock(
@ -692,9 +708,9 @@ impl DataStore {
}; };
match std::fs::create_dir(&full_path) { match std::fs::create_dir(&full_path) {
Ok(_) => Ok((relative_path, true, lock()?)), Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => { Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
Ok((relative_path, false, lock()?)) Ok((relative_path.to_owned(), false, lock()?))
} }
Err(e) => Err(e.into()), Err(e) => Err(e.into()),
} }
@ -1135,10 +1151,7 @@ impl DataStore {
/// Load the manifest without a lock. Must not be written back. /// Load the manifest without a lock. Must not be written back.
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> { pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
let blob = backup_dir.load_blob(MANIFEST_BLOB_NAME)?; backup_dir.load_manifest()
let raw_size = blob.raw_size();
let manifest = BackupManifest::try_from(blob)?;
Ok((manifest, raw_size))
} }
/// Update the manifest of the specified snapshot. Never write a manifest directly, /// Update the manifest of the specified snapshot. Never write a manifest directly,
@ -1240,8 +1253,12 @@ impl DataStore {
} }
/// Open a backup group from this datastore. /// Open a backup group from this datastore.
pub fn backup_group(self: &Arc<Self>, group: pbs_api_types::BackupGroup) -> BackupGroup { pub fn backup_group(
BackupGroup::new(Arc::clone(&self), group) self: &Arc<Self>,
ns: BackupNamespace,
group: pbs_api_types::BackupGroup,
) -> BackupGroup {
BackupGroup::new(Arc::clone(&self), ns, group)
} }
/// Open a backup group from this datastore. /// Open a backup group from this datastore.
@ -1254,19 +1271,25 @@ impl DataStore {
where where
T: Into<String>, T: Into<String>,
{ {
self.backup_group((ns, ty, id.into()).into()) self.backup_group(ns, (ty, id.into()).into())
} }
/*
/// Open a backup group from this datastore by backup group path such as `vm/100`. /// Open a backup group from this datastore by backup group path such as `vm/100`.
/// ///
/// Convenience method for `store.backup_group(path.parse()?)` /// Convenience method for `store.backup_group(path.parse()?)`
pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> { pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> {
Ok(self.backup_group(path.parse()?)) todo!("split out the namespace");
} }
*/
/// Open a snapshot (backup directory) from this datastore. /// Open a snapshot (backup directory) from this datastore.
pub fn backup_dir(self: &Arc<Self>, dir: pbs_api_types::BackupDir) -> Result<BackupDir, Error> { pub fn backup_dir(
BackupDir::with_group(self.backup_group(dir.group), dir.time) self: &Arc<Self>,
ns: BackupNamespace,
dir: pbs_api_types::BackupDir,
) -> Result<BackupDir, Error> {
BackupDir::with_group(self.backup_group(ns, dir.group), dir.time)
} }
/// Open a snapshot (backup directory) from this datastore. /// Open a snapshot (backup directory) from this datastore.
@ -1280,7 +1303,7 @@ impl DataStore {
where where
T: Into<String>, T: Into<String>,
{ {
self.backup_dir((ns, ty, id.into(), time).into()) self.backup_dir(ns, (ty, id.into(), time).into())
} }
/// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string. /// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
@ -1292,10 +1315,12 @@ impl DataStore {
BackupDir::with_rfc3339(group, time_string.into()) BackupDir::with_rfc3339(group, time_string.into())
} }
/*
/// Open a snapshot (backup directory) from this datastore by a snapshot path. /// Open a snapshot (backup directory) from this datastore by a snapshot path.
pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> { pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> {
self.backup_dir(path.parse()?) todo!("split out the namespace");
} }
*/
} }
/// A iterator for all BackupDir's (Snapshots) in a BackupGroup /// A iterator for all BackupDir's (Snapshots) in a BackupGroup
@ -1391,7 +1416,8 @@ impl Iterator for ListGroups {
if BACKUP_ID_REGEX.is_match(name) { if BACKUP_ID_REGEX.is_match(name) {
return Some(Ok(BackupGroup::new( return Some(Ok(BackupGroup::new(
Arc::clone(&self.store), Arc::clone(&self.store),
(self.ns.clone(), group_type, name.to_owned()).into(), self.ns.clone(),
(group_type, name.to_owned()).into(),
))); )));
} }
} }

View File

@ -8,13 +8,14 @@ use nix::dir::Dir;
use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{BackupNamespace, Operation};
use crate::backup_info::BackupDir; use crate::backup_info::BackupDir;
use crate::dynamic_index::DynamicIndexReader; use crate::dynamic_index::DynamicIndexReader;
use crate::fixed_index::FixedIndexReader; use crate::fixed_index::FixedIndexReader;
use crate::index::IndexFile; use crate::index::IndexFile;
use crate::manifest::{archive_type, ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME}; use crate::manifest::{archive_type, ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
use crate::DataStore; use crate::DataStore;
use pbs_api_types::Operation;
/// Helper to access the contents of a datastore backup snapshot /// Helper to access the contents of a datastore backup snapshot
/// ///
@ -30,10 +31,14 @@ impl SnapshotReader {
/// Lock snapshot, reads the manifest and returns a new instance /// Lock snapshot, reads the manifest and returns a new instance
pub fn new( pub fn new(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
ns: BackupNamespace,
snapshot: pbs_api_types::BackupDir, snapshot: pbs_api_types::BackupDir,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let snapshot = datastore.backup_dir(snapshot)?; Self::new_do(datastore.backup_dir(ns, snapshot)?)
}
pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
let datastore = snapshot.datastore();
let snapshot_path = snapshot.full_path(); let snapshot_path = snapshot.full_path();
let locked_dir = let locked_dir =

View File

@ -242,13 +242,8 @@ async fn test_upload_speed(
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
&( &BackupNamespace::root(),
BackupNamespace::root(), &(BackupType::Host, "benchmark".to_string(), backup_time).into(),
BackupType::Host,
"benchmark".to_string(),
backup_time,
)
.into(),
false, false,
true, true,
) )

View File

@ -8,6 +8,7 @@ use serde_json::Value;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_schema::api; use proxmox_schema::api;
use pbs_api_types::BackupNamespace;
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
@ -16,9 +17,9 @@ use pbs_tools::json::required_string_param;
use crate::{ use crate::{
complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name, complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name,
complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group, complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group,
extract_repository_from_value, format_key_source, record_repository, BackupDir, extract_repository_from_value, format_key_source, optional_ns_param, record_repository,
BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile, BackupDir, BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader,
Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA, IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
}; };
#[api( #[api(
@ -28,6 +29,10 @@ use crate::{
schema: REPO_URL_SCHEMA, schema: REPO_URL_SCHEMA,
optional: true, optional: true,
}, },
ns: {
type: BackupNamespace,
optional: true,
},
snapshot: { snapshot: {
type: String, type: String,
description: "Snapshot path.", description: "Snapshot path.",
@ -48,6 +53,7 @@ use crate::{
async fn dump_catalog(param: Value) -> Result<Value, Error> { async fn dump_catalog(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
let snapshot: BackupDir = path.parse()?; let snapshot: BackupDir = path.parse()?;
@ -68,8 +74,15 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let client = connect(&repo)?; let client = connect(&repo)?;
let client = let client = BackupReader::start(
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?; client,
crypt_config.clone(),
repo.store(),
&backup_ns,
&snapshot,
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -114,6 +127,10 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
#[api( #[api(
input: { input: {
properties: { properties: {
ns: {
type: BackupNamespace,
optional: true,
},
"snapshot": { "snapshot": {
type: String, type: String,
description: "Group/Snapshot path.", description: "Group/Snapshot path.",
@ -142,10 +159,11 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
async fn catalog_shell(param: Value) -> Result<(), Error> { async fn catalog_shell(param: Value) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let client = connect(&repo)?; let client = connect(&repo)?;
let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
let archive_name = required_string_param(&param, "archive-name")?; let archive_name = required_string_param(&param, "archive-name")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?; let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
let crypto = crypto_parameters(&param)?; let crypto = crypto_parameters(&param)?;
@ -172,6 +190,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
&backup_ns,
&backup_dir, &backup_dir,
true, true,
) )

View File

@ -127,24 +127,21 @@ fn record_repository(repo: &BackupRepository) {
); );
} }
enum List {
Any,
Group(BackupGroup),
Namespace(BackupNamespace),
}
async fn api_datastore_list_snapshots( async fn api_datastore_list_snapshots(
client: &HttpClient, client: &HttpClient,
store: &str, store: &str,
list: List, ns: &BackupNamespace,
group: Option<&BackupGroup>,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/snapshots", store); let path = format!("api2/json/admin/datastore/{}/snapshots", store);
let args = match list { let mut args = match group {
List::Group(group) => serde_json::to_value(group)?, Some(group) => serde_json::to_value(group)?,
List::Namespace(ns) => json!({ "backup-ns": ns }), None => json!({}),
List::Any => json!({}),
}; };
if !ns.is_root() {
args["backup-ns"] = serde_json::to_value(ns)?;
}
let mut result = client.get(&path, Some(args)).await?; let mut result = client.get(&path, Some(args)).await?;
@ -154,9 +151,10 @@ async fn api_datastore_list_snapshots(
pub async fn api_datastore_latest_snapshot( pub async fn api_datastore_latest_snapshot(
client: &HttpClient, client: &HttpClient,
store: &str, store: &str,
ns: &BackupNamespace,
group: BackupGroup, group: BackupGroup,
) -> Result<BackupDir, Error> { ) -> Result<BackupDir, Error> {
let list = api_datastore_list_snapshots(client, store, List::Group(group.clone())).await?; let list = api_datastore_list_snapshots(client, store, ns, Some(&group)).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?; let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
if list.is_empty() { if list.is_empty() {
@ -171,12 +169,13 @@ pub async fn api_datastore_latest_snapshot(
pub async fn dir_or_last_from_group( pub async fn dir_or_last_from_group(
client: &HttpClient, client: &HttpClient,
repo: &BackupRepository, repo: &BackupRepository,
ns: &BackupNamespace,
path: &str, path: &str,
) -> Result<BackupDir, Error> { ) -> Result<BackupDir, Error> {
match path.parse::<BackupPart>()? { match path.parse::<BackupPart>()? {
BackupPart::Dir(dir) => Ok(dir), BackupPart::Dir(dir) => Ok(dir),
BackupPart::Group(group) => { BackupPart::Group(group) => {
api_datastore_latest_snapshot(&client, repo.store(), group).await api_datastore_latest_snapshot(&client, repo.store(), ns, group).await
} }
} }
} }
@ -242,6 +241,14 @@ async fn backup_image<P: AsRef<Path>>(
Ok(stats) Ok(stats)
} }
pub fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
Ok(match param.get("ns") {
Some(Value::String(ns)) => ns.parse()?,
Some(_) => bail!("invalid namespace parameter"),
None => BackupNamespace::root(),
})
}
#[api( #[api(
input: { input: {
properties: { properties: {
@ -270,10 +277,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/groups", repo.store()); let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
let backup_ns: BackupNamespace = match &param["ns"] { let backup_ns = optional_ns_param(&param)?;
Value::String(s) => s.parse()?,
_ => BackupNamespace::root(),
};
let mut result = client let mut result = client
.get(&path, Some(json!({ "backup-ns": backup_ns }))) .get(&path, Some(json!({ "backup-ns": backup_ns })))
.await?; .await?;
@ -692,7 +696,7 @@ async fn create_backup(
.as_str() .as_str()
.unwrap_or(proxmox_sys::nodename()); .unwrap_or(proxmox_sys::nodename());
let backup_namespace: BackupNamespace = match param.get("backup-ns") { let backup_ns: BackupNamespace = match param.get("backup-ns") {
Some(ns) => ns Some(ns) => ns
.as_str() .as_str()
.ok_or_else(|| format_err!("bad namespace {:?}", ns))? .ok_or_else(|| format_err!("bad namespace {:?}", ns))?
@ -822,13 +826,12 @@ async fn create_backup(
let client = connect_rate_limited(&repo, rate_limit)?; let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo); record_repository(&repo);
let snapshot = BackupDir::from(( let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
backup_namespace, if backup_ns.is_root() {
backup_type,
backup_id.to_owned(),
backup_time,
));
println!("Starting backup: {snapshot}"); println!("Starting backup: {snapshot}");
} else {
println!("Starting backup: [{backup_ns}]:{snapshot}");
}
println!("Client name: {}", proxmox_sys::nodename()); println!("Client name: {}", proxmox_sys::nodename());
@ -875,6 +878,7 @@ async fn create_backup(
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
&backup_ns,
&snapshot, &snapshot,
verbose, verbose,
false, false,
@ -1157,6 +1161,10 @@ fn parse_archive_type(name: &str) -> (String, ArchiveType) {
schema: REPO_URL_SCHEMA, schema: REPO_URL_SCHEMA,
optional: true, optional: true,
}, },
ns: {
type: BackupNamespace,
optional: true,
},
snapshot: { snapshot: {
type: String, type: String,
description: "Group/Snapshot path.", description: "Group/Snapshot path.",
@ -1225,9 +1233,14 @@ async fn restore(param: Value) -> Result<Value, Error> {
let client = connect_rate_limited(&repo, rate_limit)?; let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo); record_repository(&repo);
let ns = match param.get("ns") {
Some(Value::String(ns)) => ns.parse()?,
Some(_) => bail!("invalid namespace parameter"),
None => BackupNamespace::root(),
};
let path = json::required_string_param(&param, "snapshot")?; let path = json::required_string_param(&param, "snapshot")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?; let backup_dir = dir_or_last_from_group(&client, &repo, &ns, &path).await?;
let target = json::required_string_param(&param, "target")?; let target = json::required_string_param(&param, "target")?;
let target = if target == "-" { None } else { Some(target) }; let target = if target == "-" { None } else { Some(target) };
@ -1250,6 +1263,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
&ns,
&backup_dir, &backup_dir,
true, true,
) )

View File

@ -18,6 +18,7 @@ use proxmox_schema::*;
use proxmox_sys::fd::Fd; use proxmox_sys::fd::Fd;
use proxmox_sys::sortable; use proxmox_sys::sortable;
use pbs_api_types::BackupNamespace;
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_config::key_config::load_and_decrypt_key; use pbs_config::key_config::load_and_decrypt_key;
@ -30,7 +31,7 @@ use pbs_tools::json::required_string_param;
use crate::{ use crate::{
complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name, complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name,
complete_repository, connect, dir_or_last_from_group, extract_repository_from_value, complete_repository, connect, dir_or_last_from_group, extract_repository_from_value,
record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA, optional_ns_param, record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
}; };
#[sortable] #[sortable]
@ -39,6 +40,7 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
&ObjectSchema::new( &ObjectSchema::new(
"Mount pxar archive.", "Mount pxar archive.",
&sorted!([ &sorted!([
("ns", true, &BackupNamespace::API_SCHEMA,),
( (
"snapshot", "snapshot",
false, false,
@ -197,8 +199,9 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
record_repository(&repo); record_repository(&repo);
let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?; let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
let keyfile = param["keyfile"].as_str().map(PathBuf::from); let keyfile = param["keyfile"].as_str().map(PathBuf::from);
let crypt_config = match keyfile { let crypt_config = match keyfile {
@ -229,6 +232,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
client, client,
crypt_config.clone(), crypt_config.clone(),
repo.store(), repo.store(),
&backup_ns,
&backup_dir, &backup_dir,
true, true,
) )

View File

@ -1,6 +1,6 @@
use std::sync::Arc; use std::sync::Arc;
use anyhow::{bail, Error}; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_router::cli::*; use proxmox_router::cli::*;
@ -17,7 +17,7 @@ use pbs_tools::json::required_string_param;
use crate::{ use crate::{
api_datastore_list_snapshots, complete_backup_group, complete_backup_snapshot, api_datastore_list_snapshots, complete_backup_group, complete_backup_snapshot,
complete_repository, connect, crypto_parameters, extract_repository_from_value, complete_repository, connect, crypto_parameters, extract_repository_from_value,
record_repository, BackupDir, List, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA, optional_ns_param, record_repository, BackupDir, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
}; };
#[api( #[api(
@ -56,17 +56,10 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
.map(|group| group.parse()) .map(|group| group.parse())
.transpose()?; .transpose()?;
let backup_ns: Option<BackupNamespace> = let backup_ns = optional_ns_param(&param)?;
param["ns"].as_str().map(|ns| ns.parse()).transpose()?;
let list = match (group, backup_ns) { let mut data =
(Some(group), None) => List::Group(group), api_datastore_list_snapshots(&client, repo.store(), &backup_ns, group.as_ref()).await?;
(None, Some(ns)) => List::Namespace(ns),
(None, None) => List::Any,
(Some(_), Some(_)) => bail!("'ns' and 'group' parameters are mutually exclusive"),
};
let mut data = api_datastore_list_snapshots(&client, repo.store(), list).await?;
record_repository(&repo); record_repository(&repo);

View File

@ -17,7 +17,7 @@ use proxmox_sys::fs::{create_path, CreateOptions};
use pxar::accessor::aio::Accessor; use pxar::accessor::aio::Accessor;
use pxar::decoder::aio::Decoder; use pxar::decoder::aio::Decoder;
use pbs_api_types::{BackupDir, CryptMode}; use pbs_api_types::{BackupDir, BackupNamespace, CryptMode};
use pbs_client::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq}; use pbs_client::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
use pbs_client::tools::{ use pbs_client::tools::{
complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value, complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
@ -95,6 +95,7 @@ fn keyfile_path(param: &Value) -> Option<String> {
async fn list_files( async fn list_files(
repo: BackupRepository, repo: BackupRepository,
ns: BackupNamespace,
snapshot: BackupDir, snapshot: BackupDir,
path: ExtractPath, path: ExtractPath,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
@ -102,8 +103,15 @@ async fn list_files(
driver: Option<BlockDriverType>, driver: Option<BlockDriverType>,
) -> Result<Vec<ArchiveEntry>, Error> { ) -> Result<Vec<ArchiveEntry>, Error> {
let client = connect(&repo)?; let client = connect(&repo)?;
let client = let client = BackupReader::start(
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?; client,
crypt_config.clone(),
repo.store(),
&ns,
&snapshot,
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -170,6 +178,10 @@ async fn list_files(
schema: REPO_URL_SCHEMA, schema: REPO_URL_SCHEMA,
optional: true, optional: true,
}, },
ns: {
type: BackupNamespace,
optional: true,
},
snapshot: { snapshot: {
type: String, type: String,
description: "Group/Snapshot path.", description: "Group/Snapshot path.",
@ -228,6 +240,7 @@ async fn list_files(
)] )]
/// List a directory from a backup snapshot. /// List a directory from a backup snapshot.
async fn list( async fn list(
ns: Option<BackupNamespace>,
snapshot: String, snapshot: String,
path: String, path: String,
base64: bool, base64: bool,
@ -236,6 +249,7 @@ async fn list(
param: Value, param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let ns = ns.unwrap_or_default();
let snapshot: BackupDir = snapshot.parse()?; let snapshot: BackupDir = snapshot.parse()?;
let path = parse_path(path, base64)?; let path = parse_path(path, base64)?;
@ -261,7 +275,7 @@ async fn list(
let result = if let Some(timeout) = timeout { let result = if let Some(timeout) = timeout {
match tokio::time::timeout( match tokio::time::timeout(
std::time::Duration::from_secs(timeout), std::time::Duration::from_secs(timeout),
list_files(repo, snapshot, path, crypt_config, keyfile, driver), list_files(repo, ns, snapshot, path, crypt_config, keyfile, driver),
) )
.await .await
{ {
@ -269,7 +283,7 @@ async fn list(
Err(_) => Err(http_err!(SERVICE_UNAVAILABLE, "list not finished in time")), Err(_) => Err(http_err!(SERVICE_UNAVAILABLE, "list not finished in time")),
} }
} else { } else {
list_files(repo, snapshot, path, crypt_config, keyfile, driver).await list_files(repo, ns, snapshot, path, crypt_config, keyfile, driver).await
}; };
let output_format = get_output_format(&param); let output_format = get_output_format(&param);
@ -322,6 +336,10 @@ async fn list(
schema: REPO_URL_SCHEMA, schema: REPO_URL_SCHEMA,
optional: true, optional: true,
}, },
ns: {
type: BackupNamespace,
optional: true,
},
snapshot: { snapshot: {
type: String, type: String,
description: "Group/Snapshot path.", description: "Group/Snapshot path.",
@ -368,6 +386,7 @@ async fn list(
)] )]
/// Restore files from a backup snapshot. /// Restore files from a backup snapshot.
async fn extract( async fn extract(
ns: Option<BackupNamespace>,
snapshot: String, snapshot: String,
path: String, path: String,
base64: bool, base64: bool,
@ -376,6 +395,7 @@ async fn extract(
param: Value, param: Value,
) -> Result<(), Error> { ) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let ns = ns.unwrap_or_default();
let snapshot: BackupDir = snapshot.parse()?; let snapshot: BackupDir = snapshot.parse()?;
let orig_path = path; let orig_path = path;
let path = parse_path(orig_path.clone(), base64)?; let path = parse_path(orig_path.clone(), base64)?;
@ -401,8 +421,15 @@ async fn extract(
}; };
let client = connect(&repo)?; let client = connect(&repo)?;
let client = let client = BackupReader::start(
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?; client,
crypt_config.clone(),
repo.store(),
&ns,
&snapshot,
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
match path { match path {

View File

@ -59,6 +59,7 @@ use pbs_datastore::{
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use proxmox_rest_server::{formatter, WorkerTask}; use proxmox_rest_server::{formatter, WorkerTask};
use crate::api2::backup::optional_ns_param;
use crate::api2::node::rrd::create_value_from_rrd; use crate::api2::node::rrd::create_value_from_rrd;
use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter}; use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
@ -66,29 +67,35 @@ use crate::server::jobstate::Job;
const GROUP_NOTES_FILE_NAME: &str = "notes"; const GROUP_NOTES_FILE_NAME: &str = "notes";
fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf { fn get_group_note_path(
let mut note_path = store.base_path(); store: &DataStore,
note_path.push(group.to_string()); ns: &BackupNamespace,
group: &pbs_api_types::BackupGroup,
) -> PathBuf {
let mut note_path = store.group_path(ns, group);
note_path.push(GROUP_NOTES_FILE_NAME); note_path.push(GROUP_NOTES_FILE_NAME);
note_path note_path
} }
fn check_priv_or_backup_owner( fn check_priv_or_backup_owner(
// FIXME: We could probably switch to pbs-datastore::BackupGroup here to replace all of store,
// ns and group.
store: &DataStore, store: &DataStore,
ns: &BackupNamespace,
group: &pbs_api_types::BackupGroup, group: &pbs_api_types::BackupGroup,
auth_id: &Authid, auth_id: &Authid,
required_privs: u64, required_privs: u64,
) -> Result<(), Error> { ) -> Result<(), Error> {
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let privs = if group.ns.is_root() { let privs = if ns.is_root() {
user_info.lookup_privs(auth_id, &["datastore", store.name()]) user_info.lookup_privs(auth_id, &["datastore", store.name()])
} else { } else {
user_info.lookup_privs(auth_id, &["datastore", store.name(), &group.ns.to_string()]) user_info.lookup_privs(auth_id, &["datastore", store.name(), &ns.to_string()])
}; };
if privs & required_privs == 0 { if privs & required_privs == 0 {
let owner = store.get_owner(group)?; let owner = store.get_owner(ns, group)?;
check_backup_owner(&owner, auth_id)?; check_backup_owner(&owner, auth_id)?;
} }
Ok(()) Ok(())
@ -212,10 +219,10 @@ pub fn list_groups(
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
datastore datastore
.iter_backup_groups(backup_ns)? // FIXME: Namespaces and recursion parameters! .iter_backup_groups(backup_ns.clone())? // FIXME: Namespaces and recursion parameters!
.try_fold(Vec::new(), |mut group_info, group| { .try_fold(Vec::new(), |mut group_info, group| {
let group = group?; let group = group?;
let owner = match datastore.get_owner(group.as_ref()) { let owner = match datastore.get_owner(&backup_ns, group.as_ref()) {
Ok(auth_id) => auth_id, Ok(auth_id) => auth_id,
Err(err) => { Err(err) => {
let id = &store; let id = &store;
@ -248,7 +255,7 @@ pub fn list_groups(
}) })
.to_owned(); .to_owned();
let note_path = get_group_note_path(&datastore, group.as_ref()); let note_path = get_group_note_path(&datastore, &backup_ns, group.as_ref());
let comment = file_read_firstline(&note_path).ok(); let comment = file_read_firstline(&note_path).ok();
group_info.push(GroupListItem { group_info.push(GroupListItem {
@ -268,6 +275,10 @@ pub fn list_groups(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
group: { group: {
type: pbs_api_types::BackupGroup, type: pbs_api_types::BackupGroup,
flatten: true, flatten: true,
@ -283,24 +294,33 @@ pub fn list_groups(
/// Delete backup group including all snapshots. /// Delete backup group including all snapshots.
pub fn delete_group( pub fn delete_group(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
group: pbs_api_types::BackupGroup, group: pbs_api_types::BackupGroup,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?; check_priv_or_backup_owner(
&datastore,
&backup_ns,
&group,
&auth_id,
PRIV_DATASTORE_MODIFY,
)?;
if !datastore.remove_backup_group(&group)? { if !datastore.remove_backup_group(&backup_ns, &group)? {
bail!("group only partially deleted due to protected snapshots"); bail!("group only partially deleted due to protected snapshots");
} }
@ -311,6 +331,10 @@ pub fn delete_group(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_dir: { backup_dir: {
type: pbs_api_types::BackupDir, type: pbs_api_types::BackupDir,
flatten: true, flatten: true,
@ -327,25 +351,29 @@ pub fn delete_group(
/// List snapshot files. /// List snapshot files.
pub fn list_snapshot_files( pub fn list_snapshot_files(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> { ) -> Result<Vec<BackupContent>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let snapshot = datastore.backup_dir(backup_dir)?; let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
snapshot.backup_ns(),
snapshot.as_ref(), snapshot.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
@ -362,6 +390,10 @@ pub fn list_snapshot_files(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_dir: { backup_dir: {
type: pbs_api_types::BackupDir, type: pbs_api_types::BackupDir,
flatten: true, flatten: true,
@ -377,30 +409,34 @@ pub fn list_snapshot_files(
/// Delete backup snapshot. /// Delete backup snapshot.
pub fn delete_snapshot( pub fn delete_snapshot(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
_info: &ApiMethod, _info: &ApiMethod,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let snapshot = datastore.backup_dir(backup_dir)?; let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
snapshot.backup_ns(),
snapshot.as_ref(), snapshot.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_MODIFY,
)?; )?;
datastore.remove_backup_dir(snapshot.as_ref(), false)?; snapshot.destroy(false)?;
Ok(Value::Null) Ok(Value::Null)
} }
@ -549,7 +585,7 @@ pub fn list_snapshots(
}; };
groups.iter().try_fold(Vec::new(), |mut snapshots, group| { groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
let owner = match datastore.get_owner(group.as_ref()) { let owner = match group.get_owner() {
Ok(auth_id) => auth_id, Ok(auth_id) => auth_id,
Err(err) => { Err(err) => {
eprintln!( eprintln!(
@ -583,7 +619,8 @@ fn get_snapshots_count(
store store
.iter_backup_groups_ok(Default::default())? // FIXME: Recurse! .iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
.filter(|group| { .filter(|group| {
let owner = match store.get_owner(group.as_ref()) { // FIXME: namespace:
let owner = match store.get_owner(&BackupNamespace::root(), group.as_ref()) {
Ok(owner) => owner, Ok(owner) => owner,
Err(err) => { Err(err) => {
let id = store.name(); let id = store.name();
@ -763,7 +800,13 @@ pub fn verify(
let dir = let dir =
datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?; datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?; check_priv_or_backup_owner(
&datastore,
dir.backup_ns(),
dir.as_ref(),
&auth_id,
PRIV_DATASTORE_VERIFY,
)?;
backup_dir = Some(dir); backup_dir = Some(dir);
worker_type = "verify_snapshot"; worker_type = "verify_snapshot";
@ -776,11 +819,17 @@ pub fn verify(
backup_type, backup_type,
backup_id backup_id
); );
let group = pbs_api_types::BackupGroup::from((backup_ns, backup_type, backup_id)); let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?; check_priv_or_backup_owner(
&datastore,
&backup_ns,
&group,
&auth_id,
PRIV_DATASTORE_VERIFY,
)?;
backup_group = Some(datastore.backup_group(group)); backup_group = Some(datastore.backup_group(backup_ns, group));
worker_type = "verify_group"; worker_type = "verify_group";
} }
(None, None, None) => { (None, None, None) => {
@ -851,6 +900,10 @@ pub fn verify(
#[api( #[api(
input: { input: {
properties: { properties: {
"backup-ns": {
type: BackupNamespace,
optional: true,
},
group: { group: {
type: pbs_api_types::BackupGroup, type: pbs_api_types::BackupGroup,
flatten: true, flatten: true,
@ -879,6 +932,7 @@ pub fn verify(
)] )]
/// Prune a group on the datastore /// Prune a group on the datastore
pub fn prune( pub fn prune(
backup_ns: Option<BackupNamespace>,
group: pbs_api_types::BackupGroup, group: pbs_api_types::BackupGroup,
dry_run: bool, dry_run: bool,
prune_options: PruneOptions, prune_options: PruneOptions,
@ -887,18 +941,27 @@ pub fn prune(
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let group = datastore.backup_group(group); let group = datastore.backup_group(backup_ns, group);
check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?; check_priv_or_backup_owner(
&datastore,
group.backup_ns(),
group.as_ref(),
&auth_id,
PRIV_DATASTORE_MODIFY,
)?;
let worker_id = format!("{}:{}", store, group); let worker_id = format!("{}:{}", store, group);
@ -962,20 +1025,16 @@ pub fn prune(
task_log!(worker, "{}", msg); task_log!(worker, "{}", msg);
let mut result = json!({ prune_result.push(json!({
"backup-type": group.ty, "backup-type": group.ty,
"backup-id": group.id, "backup-id": group.id,
"backup-time": backup_time, "backup-time": backup_time,
"keep": keep, "keep": keep,
"protected": mark.protected(), "protected": mark.protected(),
}); }));
if !group.ns.is_root() {
result["backup-ns"] = serde_json::to_value(&group.ns)?;
}
prune_result.push(result);
if !(dry_run || keep) { if !(dry_run || keep) {
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) { if let Err(err) = info.backup_dir.destroy(false) {
task_warn!( task_warn!(
worker, worker,
"failed to remove dir {:?}: {}", "failed to remove dir {:?}: {}",
@ -1231,20 +1290,22 @@ pub fn download_file(
async move { async move {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?; let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?; let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name = required_string_param(&param, "file-name")?.to_owned();
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
@ -1315,20 +1376,22 @@ pub fn download_file_decoded(
async move { async move {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?; let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?; let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name = required_string_param(&param, "file-name")?.to_owned();
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
@ -1445,23 +1508,18 @@ pub fn upload_backup_log(
async move { async move {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?; let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?; let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
get_ns_privs_checked( get_ns_privs_checked(&store, &backup_ns, &auth_id, PRIV_DATASTORE_BACKUP)?;
&store,
&backup_dir.group.ns,
&auth_id,
PRIV_DATASTORE_BACKUP,
)?;
let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let file_name = CLIENT_LOG_BLOB_NAME; let file_name = CLIENT_LOG_BLOB_NAME;
let owner = datastore.get_owner(backup_dir.as_ref())?; let owner = backup_dir.get_owner()?;
check_backup_owner(&owner, &auth_id)?; check_backup_owner(&owner, &auth_id)?;
let mut path = datastore.base_path(); let mut path = backup_dir.full_path();
path.push(backup_dir.relative_path());
path.push(&file_name); path.push(&file_name);
if path.exists() { if path.exists() {
@ -1493,6 +1551,10 @@ pub fn upload_backup_log(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_dir: { backup_dir: {
type: pbs_api_types::BackupDir, type: pbs_api_types::BackupDir,
flatten: true, flatten: true,
@ -1512,23 +1574,26 @@ pub fn upload_backup_log(
/// Get the entries of the given path of the catalog /// Get the entries of the given path of the catalog
pub fn catalog( pub fn catalog(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
filepath: String, filepath: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ArchiveEntry>, Error> { ) -> Result<Vec<ArchiveEntry>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
@ -1600,15 +1665,16 @@ pub fn pxar_file_download(
async move { async move {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?; let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?; let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let filepath = required_string_param(&param, "filepath")?.to_owned(); let filepath = required_string_param(&param, "filepath")?.to_owned();
@ -1616,6 +1682,7 @@ pub fn pxar_file_download(
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
@ -1786,6 +1853,10 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_group: { backup_group: {
type: pbs_api_types::BackupGroup, type: pbs_api_types::BackupGroup,
flatten: true, flatten: true,
@ -1801,21 +1872,29 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
/// Get "notes" for a backup group /// Get "notes" for a backup group
pub fn get_group_notes( pub fn get_group_notes(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_group: pbs_api_types::BackupGroup, backup_group: pbs_api_types::BackupGroup,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?; check_priv_or_backup_owner(
&datastore,
&backup_ns,
&backup_group,
&auth_id,
PRIV_DATASTORE_AUDIT,
)?;
let note_path = get_group_note_path(&datastore, &backup_group); let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned())) Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
} }
@ -1823,6 +1902,10 @@ pub fn get_group_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_group: { backup_group: {
type: pbs_api_types::BackupGroup, type: pbs_api_types::BackupGroup,
flatten: true, flatten: true,
@ -1841,22 +1924,30 @@ pub fn get_group_notes(
/// Set "notes" for a backup group /// Set "notes" for a backup group
pub fn set_group_notes( pub fn set_group_notes(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_group: pbs_api_types::BackupGroup, backup_group: pbs_api_types::BackupGroup,
notes: String, notes: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?; check_priv_or_backup_owner(
&datastore,
&backup_ns,
&backup_group,
&auth_id,
PRIV_DATASTORE_MODIFY,
)?;
let note_path = get_group_note_path(&datastore, &backup_group); let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?; replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
Ok(()) Ok(())
@ -1866,6 +1957,10 @@ pub fn set_group_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_dir: { backup_dir: {
type: pbs_api_types::BackupDir, type: pbs_api_types::BackupDir,
flatten: true, flatten: true,
@ -1881,28 +1976,31 @@ pub fn set_group_notes(
/// Get "notes" for a specific backup /// Get "notes" for a specific backup
pub fn get_notes( pub fn get_notes(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> { ) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_AUDIT,
)?; )?;
let (manifest, _) = datastore.load_manifest(&backup_dir)?; let (manifest, _) = backup_dir.load_manifest()?;
let notes = manifest.unprotected["notes"].as_str().unwrap_or(""); let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
@ -1913,6 +2011,10 @@ pub fn get_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_dir: { backup_dir: {
type: pbs_api_types::BackupDir, type: pbs_api_types::BackupDir,
flatten: true, flatten: true,
@ -1931,30 +2033,33 @@ pub fn get_notes(
/// Set "notes" for a specific backup /// Set "notes" for a specific backup
pub fn set_notes( pub fn set_notes(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
notes: String, notes: String,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_MODIFY,
)?; )?;
datastore backup_dir
.update_manifest(&backup_dir, |manifest| { .update_manifest(|manifest| {
manifest.unprotected["notes"] = notes.into(); manifest.unprotected["notes"] = notes.into();
}) })
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?; .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
@ -1966,6 +2071,10 @@ pub fn set_notes(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_dir: { backup_dir: {
type: pbs_api_types::BackupDir, type: pbs_api_types::BackupDir,
flatten: true, flatten: true,
@ -1981,22 +2090,25 @@ pub fn set_notes(
/// Query protection for a specific backup /// Query protection for a specific backup
pub fn get_protection( pub fn get_protection(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_AUDIT,
@ -2009,6 +2121,10 @@ pub fn get_protection(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_dir: { backup_dir: {
type: pbs_api_types::BackupDir, type: pbs_api_types::BackupDir,
flatten: true, flatten: true,
@ -2027,23 +2143,26 @@ pub fn get_protection(
/// En- or disable protection for a specific backup /// En- or disable protection for a specific backup
pub fn set_protection( pub fn set_protection(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_dir: pbs_api_types::BackupDir, backup_dir: pbs_api_types::BackupDir,
protected: bool, protected: bool,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_ns = backup_ns.unwrap_or_default();
get_ns_privs_checked( get_ns_privs_checked(
&store, &store,
&backup_dir.group.ns, &backup_ns,
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
)?; )?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
check_priv_or_backup_owner( check_priv_or_backup_owner(
&datastore, &datastore,
backup_dir.backup_ns(),
backup_dir.as_ref(), backup_dir.as_ref(),
&auth_id, &auth_id,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_MODIFY,
@ -2056,6 +2175,10 @@ pub fn set_protection(
input: { input: {
properties: { properties: {
store: { schema: DATASTORE_SCHEMA }, store: { schema: DATASTORE_SCHEMA },
"backup-ns": {
type: BackupNamespace,
optional: true,
},
backup_group: { backup_group: {
type: pbs_api_types::BackupGroup, type: pbs_api_types::BackupGroup,
flatten: true, flatten: true,
@ -2074,6 +2197,7 @@ pub fn set_protection(
/// Change owner of a backup group /// Change owner of a backup group
pub fn set_backup_owner( pub fn set_backup_owner(
store: String, store: String,
backup_ns: Option<BackupNamespace>,
backup_group: pbs_api_types::BackupGroup, backup_group: pbs_api_types::BackupGroup,
new_owner: Authid, new_owner: Authid,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
@ -2081,13 +2205,14 @@ pub fn set_backup_owner(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let privs = get_ns_privs(&store, &backup_group.ns, &auth_id)?; let backup_ns = backup_ns.unwrap_or_default();
let backup_group = datastore.backup_group(backup_group); let privs = get_ns_privs(&store, &backup_ns, &auth_id)?;
let backup_group = datastore.backup_group(backup_ns, backup_group);
let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 { let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
true // High-privilege user/token true // High-privilege user/token
} else if (privs & PRIV_DATASTORE_BACKUP) != 0 { } else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
let owner = datastore.get_owner(backup_group.as_ref())?; let owner = backup_group.get_owner()?;
match (owner.is_token(), new_owner.is_token()) { match (owner.is_token(), new_owner.is_token()) {
(true, true) => { (true, true) => {
@ -2137,7 +2262,7 @@ pub fn set_backup_owner(
); );
} }
datastore.set_owner(backup_group.as_ref(), &new_owner, true)?; backup_group.set_owner(&new_owner, true)?;
Ok(()) Ok(())
} }

View File

@ -614,7 +614,7 @@ impl BackupEnvironment {
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?; .map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
if let Some(base) = &self.last_backup { if let Some(base) = &self.last_backup {
let path = self.datastore.snapshot_path(base.backup_dir.as_ref()); let path = base.backup_dir.full_path();
if !path.exists() { if !path.exists() {
bail!( bail!(
"base snapshot {} was removed during backup, cannot finish as chunks might be missing", "base snapshot {} was removed during backup, cannot finish as chunks might be missing",
@ -710,8 +710,11 @@ impl BackupEnvironment {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.finished = true; state.finished = true;
self.datastore self.datastore.remove_backup_dir(
.remove_backup_dir(self.backup_dir.as_ref(), true)?; self.backup_dir.backup_ns(),
self.backup_dir.as_ref(),
true,
)?;
Ok(()) Ok(())
} }

View File

@ -17,9 +17,9 @@ use proxmox_schema::*;
use proxmox_sys::sortable; use proxmox_sys::sortable;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState,
BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
@ -58,6 +58,14 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
&Permission::Anybody &Permission::Anybody
); );
pub(crate) fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
match param.get("backup-ns") {
Some(Value::String(ns)) => ns.parse(),
None => Ok(BackupNamespace::root()),
_ => bail!("invalid backup-ns parameter"),
}
}
fn upgrade_to_backup_protocol( fn upgrade_to_backup_protocol(
parts: Parts, parts: Parts,
req_body: Body, req_body: Body,
@ -72,9 +80,9 @@ fn upgrade_to_backup_protocol(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?.to_owned(); let store = required_string_param(&param, "store")?.to_owned();
let backup_ns = optional_ns_param(&param)?;
let backup_dir_arg = pbs_api_types::BackupDir::deserialize(&param)?; let backup_dir_arg = pbs_api_types::BackupDir::deserialize(&param)?;
let backup_ns = &backup_dir_arg.group.ns;
let user_info = CachedUserInfo::new()?; let user_info = CachedUserInfo::new()?;
let privs = if backup_ns.is_root() { let privs = if backup_ns.is_root() {
@ -105,7 +113,7 @@ fn upgrade_to_backup_protocol(
); );
} }
if !datastore.ns_path(&backup_ns).exists() { if !datastore.namespace_path(&backup_ns).exists() {
proxmox_router::http_bail!(NOT_FOUND, "namespace not found"); proxmox_router::http_bail!(NOT_FOUND, "namespace not found");
} }
@ -113,7 +121,7 @@ fn upgrade_to_backup_protocol(
let env_type = rpcenv.env_type(); let env_type = rpcenv.env_type();
let backup_group = datastore.backup_group(backup_dir_arg.group.clone()); let backup_group = datastore.backup_group(backup_ns, backup_dir_arg.group.clone());
let worker_type = if backup_group.backup_type() == BackupType::Host let worker_type = if backup_group.backup_type() == BackupType::Host
&& backup_group.backup_id() == "benchmark" && backup_group.backup_id() == "benchmark"
@ -130,8 +138,11 @@ fn upgrade_to_backup_protocol(
}; };
// lock backup group to only allow one backup per group at a time // lock backup group to only allow one backup per group at a time
let (owner, _group_guard) = let (owner, _group_guard) = datastore.create_locked_backup_group(
datastore.create_locked_backup_group(backup_group.as_ref(), &auth_id)?; backup_group.backup_ns(),
backup_group.as_ref(),
&auth_id,
)?;
// permission check // permission check
let correct_owner = let correct_owner =
@ -169,7 +180,7 @@ fn upgrade_to_backup_protocol(
} }
// lock last snapshot to prevent forgetting/pruning it during backup // lock last snapshot to prevent forgetting/pruning it during backup
let full_path = datastore.snapshot_path(last.backup_dir.as_ref()); let full_path = last.backup_dir.full_path();
Some(lock_dir_noblock_shared( Some(lock_dir_noblock_shared(
&full_path, &full_path,
"snapshot", "snapshot",
@ -179,7 +190,8 @@ fn upgrade_to_backup_protocol(
None None
}; };
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(backup_dir.as_ref())?; let (path, is_new, snap_guard) =
datastore.create_locked_backup_dir(backup_dir.backup_ns(), backup_dir.as_ref())?;
if !is_new { if !is_new {
bail!("backup directory already exists."); bail!("backup directory already exists.");
} }
@ -818,7 +830,7 @@ fn download_previous(
None => bail!("no valid previous backup"), None => bail!("no valid previous backup"),
}; };
let mut path = env.datastore.snapshot_path(last_backup.backup_dir.as_ref()); let mut path = last_backup.backup_dir.full_path();
path.push(&archive_name); path.push(&archive_name);
{ {

View File

@ -29,6 +29,7 @@ use pbs_tools::json::required_string_param;
use proxmox_rest_server::{H2Service, WorkerTask}; use proxmox_rest_server::{H2Service, WorkerTask};
use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_sys::fs::lock_dir_noblock_shared;
use crate::api2::backup::optional_ns_param;
use crate::api2::helpers; use crate::api2::helpers;
mod environment; mod environment;
@ -91,6 +92,7 @@ fn upgrade_to_backup_reader_protocol(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let backup_ns = optional_ns_param(&param)?;
let backup_dir = pbs_api_types::BackupDir::deserialize(&param)?; let backup_dir = pbs_api_types::BackupDir::deserialize(&param)?;
let protocols = parts let protocols = parts
@ -112,9 +114,9 @@ fn upgrade_to_backup_reader_protocol(
let env_type = rpcenv.env_type(); let env_type = rpcenv.env_type();
let backup_dir = datastore.backup_dir(backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
if !priv_read { if !priv_read {
let owner = datastore.get_owner(backup_dir.as_ref())?; let owner = backup_dir.get_owner()?;
let correct_owner = owner == auth_id let correct_owner = owner == auth_id
|| (owner.is_token() && Authid::from(owner.user().clone()) == auth_id); || (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
if !correct_owner { if !correct_owner {

View File

@ -17,7 +17,7 @@ use pbs_api_types::{
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::{DataStore, SnapshotReader, StoreProgress}; use pbs_datastore::{DataStore, StoreProgress};
use proxmox_rest_server::WorkerTask; use proxmox_rest_server::WorkerTask;
use crate::{ use crate::{
@ -577,7 +577,7 @@ pub fn backup_snapshot(
) -> Result<bool, Error> { ) -> Result<bool, Error> {
task_log!(worker, "backup snapshot {}", snapshot); task_log!(worker, "backup snapshot {}", snapshot);
let snapshot_reader = match SnapshotReader::new(datastore.clone(), (&snapshot).into()) { let snapshot_reader = match snapshot.locked_reader() {
Ok(reader) => reader, Ok(reader) => reader,
Err(err) => { Err(err) => {
// ignore missing snapshots and continue // ignore missing snapshots and continue

View File

@ -17,9 +17,9 @@ use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use proxmox_uuid::Uuid; use proxmox_uuid::Uuid;
use pbs_api_types::{ use pbs_api_types::{
Authid, CryptMode, Operation, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA, Authid, BackupNamespace, CryptMode, Operation, Userid, DATASTORE_MAP_ARRAY_SCHEMA,
DRIVE_NAME_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA, PRIV_TAPE_READ, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader; use pbs_datastore::dynamic_index::DynamicIndexReader;
@ -401,6 +401,10 @@ fn restore_list_worker(
restore_owner: &Authid, restore_owner: &Authid,
email: Option<String>, email: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
// FIXME: Namespace needs to come from somewhere, `snapshots` is just a snapshot string list
// here.
let ns = BackupNamespace::root();
let base_path: PathBuf = format!("{}/{}", RESTORE_TMP_DIR, media_set_uuid).into(); let base_path: PathBuf = format!("{}/{}", RESTORE_TMP_DIR, media_set_uuid).into();
std::fs::create_dir_all(&base_path)?; std::fs::create_dir_all(&base_path)?;
@ -430,7 +434,7 @@ fn restore_list_worker(
})?; })?;
let (owner, _group_lock) = let (owner, _group_lock) =
datastore.create_locked_backup_group(backup_dir.as_ref(), restore_owner)?; datastore.create_locked_backup_group(&ns, backup_dir.as_ref(), restore_owner)?;
if restore_owner != &owner { if restore_owner != &owner {
// only the owner is allowed to create additional snapshots // only the owner is allowed to create additional snapshots
task_warn!( task_warn!(
@ -458,7 +462,8 @@ fn restore_list_worker(
continue; continue;
}; };
let (_rel_path, is_new, snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?; let (_rel_path, is_new, snap_lock) =
datastore.create_locked_backup_dir(&ns, &backup_dir)?;
if !is_new { if !is_new {
task_log!( task_log!(
@ -586,7 +591,7 @@ fn restore_list_worker(
tmp_path.push(&source_datastore); tmp_path.push(&source_datastore);
tmp_path.push(snapshot); tmp_path.push(snapshot);
let path = datastore.snapshot_path(&backup_dir); let path = datastore.snapshot_path(&ns, &backup_dir);
for entry in std::fs::read_dir(tmp_path)? { for entry in std::fs::read_dir(tmp_path)? {
let entry = entry?; let entry = entry?;
@ -1036,12 +1041,17 @@ fn restore_archive<'a>(
snapshot snapshot
); );
// FIXME: Namespace
let backup_ns = BackupNamespace::root();
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?; let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
if let Some((store_map, authid)) = target.as_ref() { if let Some((store_map, authid)) = target.as_ref() {
if let Some(datastore) = store_map.get_datastore(&datastore_name) { if let Some(datastore) = store_map.get_datastore(&datastore_name) {
let (owner, _group_lock) = let (owner, _group_lock) = datastore.create_locked_backup_group(
datastore.create_locked_backup_group(backup_dir.as_ref(), authid)?; &backup_ns,
backup_dir.as_ref(),
authid,
)?;
if *authid != &owner { if *authid != &owner {
// only the owner is allowed to create additional snapshots // only the owner is allowed to create additional snapshots
bail!( bail!(
@ -1053,7 +1063,7 @@ fn restore_archive<'a>(
} }
let (rel_path, is_new, _snap_lock) = let (rel_path, is_new, _snap_lock) =
datastore.create_locked_backup_dir(backup_dir.as_ref())?; datastore.create_locked_backup_dir(&backup_ns, backup_dir.as_ref())?;
let mut path = datastore.base_path(); let mut path = datastore.base_path();
path.push(rel_path); path.push(rel_path);

View File

@ -8,7 +8,9 @@ use anyhow::{bail, format_err, Error};
use proxmox_sys::{task_log, WorkerTaskContext}; use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{Authid, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID}; use pbs_api_types::{
Authid, BackupNamespace, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID,
};
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo}; use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
@ -324,7 +326,7 @@ pub fn verify_backup_dir(
filter: Option<&dyn Fn(&BackupManifest) -> bool>, filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let snap_lock = lock_dir_noblock_shared( let snap_lock = lock_dir_noblock_shared(
&verify_worker.datastore.snapshot_path(backup_dir.as_ref()), &backup_dir.full_path(),
"snapshot", "snapshot",
"locked by another operation", "locked by another operation",
); );
@ -510,7 +512,13 @@ pub fn verify_all_backups(
} }
let filter_by_owner = |group: &BackupGroup| { let filter_by_owner = |group: &BackupGroup| {
match (verify_worker.datastore.get_owner(group.as_ref()), &owner) { match (
// FIXME: with recursion the namespace needs to come from the iterator...
verify_worker
.datastore
.get_owner(&BackupNamespace::root(), group.as_ref()),
&owner,
) {
(Ok(ref group_owner), Some(owner)) => { (Ok(ref group_owner), Some(owner)) => {
group_owner == owner group_owner == owner
|| (group_owner.is_token() || (group_owner.is_token()

View File

@ -45,11 +45,12 @@ pub fn prune_datastore(
let has_privs = privs & PRIV_DATASTORE_MODIFY != 0; let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
// FIXME: Namespace recursion! // FIXME: Namespace recursion!
for group in datastore.iter_backup_groups(ns)? { for group in datastore.iter_backup_groups(ns.clone())? {
let ns_recursed = &ns; // remove_backup_dir might need the inner one
let group = group?; let group = group?;
let list = group.list_backups()?; let list = group.list_backups()?;
if !has_privs && !datastore.owns_backup(group.as_ref(), &auth_id)? { if !has_privs && !datastore.owns_backup(&ns_recursed, group.as_ref(), &auth_id)? {
continue; continue;
} }
@ -75,7 +76,9 @@ pub fn prune_datastore(
info.backup_dir.backup_time_string() info.backup_dir.backup_time_string()
); );
if !keep && !dry_run { if !keep && !dry_run {
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) { if let Err(err) =
datastore.remove_backup_dir(ns_recursed, info.backup_dir.as_ref(), false)
{
task_warn!( task_warn!(
worker, worker,
"failed to remove dir {:?}: {}", "failed to remove dir {:?}: {}",

View File

@ -15,7 +15,8 @@ use proxmox_router::HttpError;
use proxmox_sys::task_log; use proxmox_sys::task_log;
use pbs_api_types::{ use pbs_api_types::{
Authid, GroupFilter, GroupListItem, Operation, RateLimitConfig, Remote, SnapshotListItem, Authid, BackupNamespace, GroupFilter, GroupListItem, Operation, RateLimitConfig, Remote,
SnapshotListItem,
}; };
use pbs_client::{ use pbs_client::{
@ -504,7 +505,9 @@ async fn pull_snapshot_from(
snapshot: &pbs_api_types::BackupDir, snapshot: &pbs_api_types::BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>, downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?; // FIXME: Namespace support requires source AND target namespace
let ns = BackupNamespace::root();
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&ns, snapshot)?;
let snapshot_path = snapshot.to_string(); let snapshot_path = snapshot.to_string();
if is_new { if is_new {
@ -519,7 +522,7 @@ async fn pull_snapshot_from(
) )
.await .await
{ {
if let Err(cleanup_err) = tgt_store.remove_backup_dir(snapshot, true) { if let Err(cleanup_err) = tgt_store.remove_backup_dir(&ns, snapshot, true) {
task_log!(worker, "cleanup error - {}", cleanup_err); task_log!(worker, "cleanup error - {}", cleanup_err);
} }
return Err(err); return Err(err);
@ -604,6 +607,9 @@ async fn pull_group(
group: &pbs_api_types::BackupGroup, group: &pbs_api_types::BackupGroup,
progress: &mut StoreProgress, progress: &mut StoreProgress,
) -> Result<(), Error> { ) -> Result<(), Error> {
// FIXME: Namespace support
let ns = BackupNamespace::root();
let path = format!( let path = format!(
"api2/json/admin/datastore/{}/snapshots", "api2/json/admin/datastore/{}/snapshots",
params.source.store() params.source.store()
@ -623,7 +629,7 @@ async fn pull_group(
let fingerprint = client.fingerprint(); let fingerprint = client.fingerprint();
let last_sync = params.store.last_successful_backup(group)?; let last_sync = params.store.last_successful_backup(&ns, group)?;
let mut remote_snapshots = std::collections::HashSet::new(); let mut remote_snapshots = std::collections::HashSet::new();
@ -674,8 +680,15 @@ async fn pull_group(
options, options,
)?; )?;
let reader = let reader = BackupReader::start(
BackupReader::start(new_client, None, params.source.store(), &snapshot, true).await?; new_client,
None,
params.source.store(),
&ns,
&snapshot,
true,
)
.await?;
let result = pull_snapshot_from( let result = pull_snapshot_from(
worker, worker,
@ -693,7 +706,7 @@ async fn pull_group(
} }
if params.remove_vanished { if params.remove_vanished {
let group = params.store.backup_group(group.clone()); let group = params.store.backup_group(ns.clone(), group.clone());
let local_list = group.list_backups()?; let local_list = group.list_backups()?;
for info in local_list { for info in local_list {
let backup_time = info.backup_dir.backup_time(); let backup_time = info.backup_dir.backup_time();
@ -715,7 +728,7 @@ async fn pull_group(
); );
params params
.store .store
.remove_backup_dir(info.backup_dir.as_ref(), false)?; .remove_backup_dir(&ns, info.backup_dir.as_ref(), false)?;
} }
} }
@ -744,6 +757,10 @@ pub async fn pull_store(
client: &HttpClient, client: &HttpClient,
params: &PullParameters, params: &PullParameters,
) -> Result<(), Error> { ) -> Result<(), Error> {
// FIXME: Namespace support requires source AND target namespace
let ns = BackupNamespace::root();
let local_ns = BackupNamespace::root();
// explicit create shared lock to prevent GC on newly created chunks // explicit create shared lock to prevent GC on newly created chunks
let _shared_store_lock = params.store.try_shared_chunk_store_lock()?; let _shared_store_lock = params.store.try_shared_chunk_store_lock()?;
@ -806,9 +823,10 @@ pub async fn pull_store(
progress.done_snapshots = 0; progress.done_snapshots = 0;
progress.group_snapshots = 0; progress.group_snapshots = 0;
let (owner, _lock_guard) = match params let (owner, _lock_guard) =
match params
.store .store
.create_locked_backup_group(&group, &params.owner) .create_locked_backup_group(&ns, &group, &params.owner)
{ {
Ok(result) => result, Ok(result) => result,
Err(err) => { Err(err) => {
@ -848,7 +866,7 @@ pub async fn pull_store(
if new_groups.contains(local_group.as_ref()) { if new_groups.contains(local_group.as_ref()) {
continue; continue;
} }
let owner = params.store.get_owner(&local_group.group())?; let owner = params.store.get_owner(&local_ns, &local_group.group())?;
if check_backup_owner(&owner, &params.owner).is_err() { if check_backup_owner(&owner, &params.owner).is_err() {
continue; continue;
} }
@ -863,7 +881,7 @@ pub async fn pull_store(
local_group.backup_type(), local_group.backup_type(),
local_group.backup_id() local_group.backup_id()
); );
match params.store.remove_backup_group(local_group.as_ref()) { match params.store.remove_backup_group(&ns, local_group.as_ref()) {
Ok(true) => {} Ok(true) => {}
Ok(false) => { Ok(false) => {
task_log!( task_log!(