split the namespace out of BackupGroup/Dir api types
We decided to go this route because it'll most likely be safer in the API as we need to explicitly add namespaces support to the various API endpoints this way. For example, 'pull' should have 2 namespaces: local and remote, and the GroupFilter (which would otherwise contain exactly *one* namespace parameter) needs to be applied for both sides (to decide what to pull from the remote, and what to *remove* locally as cleanup). The *datastore* types still contain the namespace and have a `.backup_ns()` getter. Note that the datastore's `Display` implementations are no longer safe to use as a deserializable string. Additionally, some datastore based methods now have been exposed via the BackupGroup/BackupDir types to avoid a "round trip" in code. Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
1baf9030ad
commit
133d718fe4
@ -37,13 +37,8 @@ async fn run() -> Result<(), Error> {
|
||||
client,
|
||||
None,
|
||||
"store2",
|
||||
&(
|
||||
BackupNamespace::root(),
|
||||
BackupType::Host,
|
||||
"elsa".to_string(),
|
||||
backup_time,
|
||||
)
|
||||
.into(),
|
||||
&BackupNamespace::root(),
|
||||
&(BackupType::Host, "elsa".to_string(), backup_time).into(),
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
@ -21,13 +21,8 @@ async fn upload_speed() -> Result<f64, Error> {
|
||||
client,
|
||||
None,
|
||||
datastore,
|
||||
&(
|
||||
BackupNamespace::root(),
|
||||
BackupType::Host,
|
||||
"speedtest".to_string(),
|
||||
backup_time,
|
||||
)
|
||||
.into(),
|
||||
&BackupNamespace::root(),
|
||||
&(BackupType::Host, "speedtest".to_string(), backup_time).into(),
|
||||
false,
|
||||
true,
|
||||
)
|
||||
|
@ -25,8 +25,7 @@ const_regex! {
|
||||
pub BACKUP_DATE_REGEX = concat!(r"^", BACKUP_TIME_RE!() ,r"$");
|
||||
|
||||
pub GROUP_PATH_REGEX = concat!(
|
||||
r"^(", BACKUP_NS_PATH_RE!(), r"/)?",
|
||||
r"(", BACKUP_TYPE_RE!(), ")/",
|
||||
r"^(", BACKUP_TYPE_RE!(), ")/",
|
||||
r"(", BACKUP_ID_RE!(), r")$",
|
||||
);
|
||||
|
||||
@ -848,7 +847,6 @@ impl std::cmp::PartialOrd for BackupType {
|
||||
|
||||
#[api(
|
||||
properties: {
|
||||
"backup-ns": { type: BackupNamespace, optional: true },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
},
|
||||
@ -857,14 +855,6 @@ impl std::cmp::PartialOrd for BackupType {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
/// A backup group (without a data store).
|
||||
pub struct BackupGroup {
|
||||
/// An optional namespace this backup belongs to.
|
||||
#[serde(
|
||||
rename = "backup-ns",
|
||||
skip_serializing_if = "BackupNamespace::is_root",
|
||||
default
|
||||
)]
|
||||
pub ns: BackupNamespace,
|
||||
|
||||
/// Backup type.
|
||||
#[serde(rename = "backup-type")]
|
||||
pub ty: BackupType,
|
||||
@ -875,12 +865,8 @@ pub struct BackupGroup {
|
||||
}
|
||||
|
||||
impl BackupGroup {
|
||||
pub fn new<T: Into<String>>(ns: BackupNamespace, ty: BackupType, id: T) -> Self {
|
||||
Self {
|
||||
ns,
|
||||
ty,
|
||||
id: id.into(),
|
||||
}
|
||||
pub fn new<T: Into<String>>(ty: BackupType, id: T) -> Self {
|
||||
Self { ty, id: id.into() }
|
||||
}
|
||||
|
||||
pub fn matches(&self, filter: &crate::GroupFilter) -> bool {
|
||||
@ -906,24 +892,18 @@ impl AsRef<BackupGroup> for BackupGroup {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupNamespace, BackupType, String)> for BackupGroup {
|
||||
impl From<(BackupType, String)> for BackupGroup {
|
||||
#[inline]
|
||||
fn from(data: (BackupNamespace, BackupType, String)) -> Self {
|
||||
fn from(data: (BackupType, String)) -> Self {
|
||||
Self {
|
||||
ns: data.0,
|
||||
ty: data.1,
|
||||
id: data.2,
|
||||
ty: data.0,
|
||||
id: data.1,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::cmp::Ord for BackupGroup {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
let ns_order = self.ns.cmp(&other.ns);
|
||||
if ns_order != std::cmp::Ordering::Equal {
|
||||
return ns_order;
|
||||
}
|
||||
|
||||
let type_order = self.ty.cmp(&other.ty);
|
||||
if type_order != std::cmp::Ordering::Equal {
|
||||
return type_order;
|
||||
@ -949,11 +929,7 @@ impl std::cmp::PartialOrd for BackupGroup {
|
||||
|
||||
impl fmt::Display for BackupGroup {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.ns.is_root() {
|
||||
write!(f, "{}/{}", self.ty, self.id)
|
||||
} else {
|
||||
write!(f, "{}/{}/{}", self.ns.display_as_path(), self.ty, self.id)
|
||||
}
|
||||
write!(f, "{}/{}", self.ty, self.id)
|
||||
}
|
||||
}
|
||||
|
||||
@ -969,9 +945,8 @@ impl std::str::FromStr for BackupGroup {
|
||||
.ok_or_else(|| format_err!("unable to parse backup group path '{}'", path))?;
|
||||
|
||||
Ok(Self {
|
||||
ns: BackupNamespace::from_path(cap.get(1).unwrap().as_str())?,
|
||||
ty: cap.get(2).unwrap().as_str().parse()?,
|
||||
id: cap.get(3).unwrap().as_str().to_owned(),
|
||||
ty: cap.get(1).unwrap().as_str().parse()?,
|
||||
id: cap.get(2).unwrap().as_str().to_owned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -1020,27 +995,22 @@ impl From<(BackupGroup, i64)> for BackupDir {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<(BackupNamespace, BackupType, String, i64)> for BackupDir {
|
||||
fn from(data: (BackupNamespace, BackupType, String, i64)) -> Self {
|
||||
impl From<(BackupType, String, i64)> for BackupDir {
|
||||
fn from(data: (BackupType, String, i64)) -> Self {
|
||||
Self {
|
||||
group: (data.0, data.1, data.2).into(),
|
||||
time: data.3,
|
||||
group: (data.0, data.1).into(),
|
||||
time: data.2,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BackupDir {
|
||||
pub fn with_rfc3339<T>(
|
||||
ns: BackupNamespace,
|
||||
ty: BackupType,
|
||||
id: T,
|
||||
backup_time_string: &str,
|
||||
) -> Result<Self, Error>
|
||||
pub fn with_rfc3339<T>(ty: BackupType, id: T, backup_time_string: &str) -> Result<Self, Error>
|
||||
where
|
||||
T: Into<String>,
|
||||
{
|
||||
let time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
||||
let group = BackupGroup::new(ns, ty, id.into());
|
||||
let group = BackupGroup::new(ty, id.into());
|
||||
Ok(Self { group, time })
|
||||
}
|
||||
|
||||
@ -1053,11 +1023,6 @@ impl BackupDir {
|
||||
pub fn id(&self) -> &str {
|
||||
&self.group.id
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn ns(&self) -> &BackupNamespace {
|
||||
&self.group.ns
|
||||
}
|
||||
}
|
||||
|
||||
impl std::str::FromStr for BackupDir {
|
||||
@ -1071,15 +1036,10 @@ impl std::str::FromStr for BackupDir {
|
||||
.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let ns = match cap.get(1) {
|
||||
Some(cap) => BackupNamespace::from_path(cap.as_str())?,
|
||||
None => BackupNamespace::root(),
|
||||
};
|
||||
BackupDir::with_rfc3339(
|
||||
ns,
|
||||
cap.get(2).unwrap().as_str().parse()?,
|
||||
cap.get(1).unwrap().as_str().parse()?,
|
||||
cap.get(2).unwrap().as_str(),
|
||||
cap.get(3).unwrap().as_str(),
|
||||
cap.get(4).unwrap().as_str(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -1107,16 +1067,12 @@ impl std::str::FromStr for BackupPart {
|
||||
.captures(path)
|
||||
.ok_or_else(|| format_err!("unable to parse backup snapshot path '{}'", path))?;
|
||||
|
||||
let ns = match cap.get(1) {
|
||||
Some(cap) => BackupNamespace::from_path(cap.as_str())?,
|
||||
None => BackupNamespace::root(),
|
||||
};
|
||||
let ty = cap.get(2).unwrap().as_str().parse()?;
|
||||
let id = cap.get(3).unwrap().as_str().to_string();
|
||||
let ty = cap.get(1).unwrap().as_str().parse()?;
|
||||
let id = cap.get(2).unwrap().as_str().to_string();
|
||||
|
||||
Ok(match cap.get(4) {
|
||||
Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ns, ty, id, time.as_str())?),
|
||||
None => BackupPart::Group((ns, ty, id).into()),
|
||||
Ok(match cap.get(3) {
|
||||
Some(time) => BackupPart::Dir(BackupDir::with_rfc3339(ty, id, time.as_str())?),
|
||||
None => BackupPart::Group((ty, id).into()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -34,20 +34,11 @@ macro_rules! BACKUP_NS_RE {
|
||||
);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[macro_export]
|
||||
macro_rules! BACKUP_NS_PATH_RE {
|
||||
() => (
|
||||
concat!(r"(?:ns/", PROXMOX_SAFE_ID_REGEX_STR!(), r"/){0,7}ns/", PROXMOX_SAFE_ID_REGEX_STR!())
|
||||
);
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[macro_export]
|
||||
macro_rules! SNAPSHOT_PATH_REGEX_STR {
|
||||
() => (
|
||||
concat!(
|
||||
r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?",
|
||||
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")/(", BACKUP_TIME_RE!(), r")",
|
||||
)
|
||||
);
|
||||
@ -58,7 +49,6 @@ macro_rules! SNAPSHOT_PATH_REGEX_STR {
|
||||
macro_rules! GROUP_OR_SNAPSHOT_PATH_REGEX_STR {
|
||||
() => {
|
||||
concat!(
|
||||
r"(?:(", BACKUP_NS_PATH_RE!(), ")/)?",
|
||||
r"(", BACKUP_TYPE_RE!(), ")/(", BACKUP_ID_RE!(), ")(?:/(", BACKUP_TIME_RE!(), r"))?",
|
||||
)
|
||||
};
|
||||
|
@ -7,7 +7,7 @@ use std::sync::Arc;
|
||||
use futures::future::AbortHandle;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use pbs_api_types::BackupDir;
|
||||
use pbs_api_types::{BackupDir, BackupNamespace};
|
||||
use pbs_datastore::data_blob::DataBlob;
|
||||
use pbs_datastore::data_blob_reader::DataBlobReader;
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
@ -47,6 +47,7 @@ impl BackupReader {
|
||||
client: HttpClient,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
datastore: &str,
|
||||
ns: &BackupNamespace,
|
||||
backup: &BackupDir,
|
||||
debug: bool,
|
||||
) -> Result<Arc<BackupReader>, Error> {
|
||||
@ -58,7 +59,6 @@ impl BackupReader {
|
||||
"debug": debug,
|
||||
});
|
||||
|
||||
let ns = backup.ns();
|
||||
if !ns.is_root() {
|
||||
param["backup-ns"] = serde_json::to_value(ns)?;
|
||||
}
|
||||
|
@ -12,7 +12,7 @@ use tokio::io::AsyncReadExt;
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
use pbs_api_types::{BackupDir, HumanByte};
|
||||
use pbs_api_types::{BackupDir, BackupNamespace, HumanByte};
|
||||
use pbs_datastore::data_blob::{ChunkInfo, DataBlob, DataChunkBuilder};
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
use pbs_datastore::fixed_index::FixedIndexReader;
|
||||
@ -86,6 +86,7 @@ impl BackupWriter {
|
||||
client: HttpClient,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
datastore: &str,
|
||||
ns: &BackupNamespace,
|
||||
backup: &BackupDir,
|
||||
debug: bool,
|
||||
benchmark: bool,
|
||||
@ -99,7 +100,6 @@ impl BackupWriter {
|
||||
"benchmark": benchmark
|
||||
});
|
||||
|
||||
let ns = backup.ns();
|
||||
if !ns.is_root() {
|
||||
param["backup-ns"] = serde_json::to_value(ns)?;
|
||||
}
|
||||
|
@ -292,8 +292,16 @@ pub async fn complete_server_file_name_do(param: &HashMap<String, String>) -> Ve
|
||||
_ => return result,
|
||||
};
|
||||
|
||||
let ns: pbs_api_types::BackupNamespace = match param.get("ns") {
|
||||
Some(ns) => match ns.parse() {
|
||||
Ok(v) => v,
|
||||
_ => return result,
|
||||
},
|
||||
_ => return result,
|
||||
};
|
||||
|
||||
let query = json_object_to_query(json!({
|
||||
"backup-ns": snapshot.group.ns,
|
||||
"backup-ns": ns,
|
||||
"backup-type": snapshot.group.ty,
|
||||
"backup-id": snapshot.group.id,
|
||||
"backup-time": snapshot.time,
|
||||
|
@ -1,3 +1,4 @@
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
use std::os::unix::io::RawFd;
|
||||
use std::path::PathBuf;
|
||||
@ -8,11 +9,11 @@ use anyhow::{bail, format_err, Error};
|
||||
use proxmox_sys::fs::lock_dir_noblock;
|
||||
|
||||
use pbs_api_types::{
|
||||
BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX,
|
||||
Authid, BackupNamespace, BackupType, GroupFilter, BACKUP_DATE_REGEX, BACKUP_FILE_REGEX,
|
||||
};
|
||||
use pbs_config::{open_backup_lockfile, BackupLockGuard};
|
||||
|
||||
use crate::manifest::{MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME};
|
||||
use crate::manifest::{BackupManifest, MANIFEST_BLOB_NAME, MANIFEST_LOCK_NAME};
|
||||
use crate::{DataBlob, DataStore};
|
||||
|
||||
/// BackupGroup is a directory containing a list of BackupDir
|
||||
@ -20,6 +21,7 @@ use crate::{DataBlob, DataStore};
|
||||
pub struct BackupGroup {
|
||||
store: Arc<DataStore>,
|
||||
|
||||
ns: BackupNamespace,
|
||||
group: pbs_api_types::BackupGroup,
|
||||
}
|
||||
|
||||
@ -33,8 +35,12 @@ impl fmt::Debug for BackupGroup {
|
||||
}
|
||||
|
||||
impl BackupGroup {
|
||||
pub(crate) fn new(store: Arc<DataStore>, group: pbs_api_types::BackupGroup) -> Self {
|
||||
Self { store, group }
|
||||
pub(crate) fn new(
|
||||
store: Arc<DataStore>,
|
||||
ns: BackupNamespace,
|
||||
group: pbs_api_types::BackupGroup,
|
||||
) -> Self {
|
||||
Self { store, ns, group }
|
||||
}
|
||||
|
||||
/// Access the underlying [`BackupGroup`](pbs_api_types::BackupGroup).
|
||||
@ -45,7 +51,7 @@ impl BackupGroup {
|
||||
|
||||
#[inline]
|
||||
pub fn backup_ns(&self) -> &BackupNamespace {
|
||||
&self.group.ns
|
||||
&self.ns
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -59,11 +65,14 @@ impl BackupGroup {
|
||||
}
|
||||
|
||||
pub fn full_group_path(&self) -> PathBuf {
|
||||
self.store.base_path().join(self.group.to_string())
|
||||
self.store.group_path(&self.ns, &self.group)
|
||||
}
|
||||
|
||||
pub fn relative_group_path(&self) -> PathBuf {
|
||||
self.group.to_string().into()
|
||||
let mut path = self.store.namespace_path(&self.ns);
|
||||
path.push(self.group.ty.as_str());
|
||||
path.push(&self.group.id);
|
||||
path
|
||||
}
|
||||
|
||||
pub fn list_backups(&self) -> Result<Vec<BackupInfo>, Error> {
|
||||
@ -205,6 +214,26 @@ impl BackupGroup {
|
||||
|
||||
Ok(removed_all_snaps)
|
||||
}
|
||||
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the entity who first created the backup group.
|
||||
pub fn get_owner(&self) -> Result<Authid, Error> {
|
||||
self.store.get_owner(&self.ns, self.as_ref())
|
||||
}
|
||||
|
||||
/// Set the backup owner.
|
||||
pub fn set_owner(&self, auth_id: &Authid, force: bool) -> Result<(), Error> {
|
||||
self.store
|
||||
.set_owner(&self.ns, &self.as_ref(), auth_id, force)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<pbs_api_types::BackupNamespace> for BackupGroup {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &pbs_api_types::BackupNamespace {
|
||||
&self.ns
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<pbs_api_types::BackupGroup> for BackupGroup {
|
||||
@ -229,7 +258,11 @@ impl From<BackupGroup> for pbs_api_types::BackupGroup {
|
||||
impl fmt::Display for BackupGroup {
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt::Display::fmt(&self.group, f)
|
||||
if self.ns.is_root() {
|
||||
fmt::Display::fmt(&self.group, f)
|
||||
} else {
|
||||
write!(f, "[{}]:{}", self.ns, self.group)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -237,6 +270,7 @@ impl From<BackupDir> for BackupGroup {
|
||||
fn from(dir: BackupDir) -> BackupGroup {
|
||||
BackupGroup {
|
||||
store: dir.store,
|
||||
ns: dir.ns,
|
||||
group: dir.dir.group,
|
||||
}
|
||||
}
|
||||
@ -246,6 +280,7 @@ impl From<&BackupDir> for BackupGroup {
|
||||
fn from(dir: &BackupDir) -> BackupGroup {
|
||||
BackupGroup {
|
||||
store: Arc::clone(&dir.store),
|
||||
ns: dir.ns.clone(),
|
||||
group: dir.dir.group.clone(),
|
||||
}
|
||||
}
|
||||
@ -257,6 +292,7 @@ impl From<&BackupDir> for BackupGroup {
|
||||
#[derive(Clone)]
|
||||
pub struct BackupDir {
|
||||
store: Arc<DataStore>,
|
||||
ns: BackupNamespace,
|
||||
dir: pbs_api_types::BackupDir,
|
||||
// backup_time as rfc3339
|
||||
backup_time_string: String,
|
||||
@ -279,6 +315,7 @@ impl BackupDir {
|
||||
Self {
|
||||
store: unsafe { DataStore::new_test() },
|
||||
backup_time_string: Self::backup_time_to_string(dir.time).unwrap(),
|
||||
ns: BackupNamespace::root(),
|
||||
dir,
|
||||
}
|
||||
}
|
||||
@ -287,6 +324,7 @@ impl BackupDir {
|
||||
let backup_time_string = Self::backup_time_to_string(backup_time)?;
|
||||
Ok(Self {
|
||||
store: group.store,
|
||||
ns: group.ns,
|
||||
dir: (group.group, backup_time).into(),
|
||||
backup_time_string,
|
||||
})
|
||||
@ -299,6 +337,7 @@ impl BackupDir {
|
||||
let backup_time = proxmox_time::parse_rfc3339(&backup_time_string)?;
|
||||
Ok(Self {
|
||||
store: group.store,
|
||||
ns: group.ns,
|
||||
dir: (group.group, backup_time).into(),
|
||||
backup_time_string,
|
||||
})
|
||||
@ -306,7 +345,7 @@ impl BackupDir {
|
||||
|
||||
#[inline]
|
||||
pub fn backup_ns(&self) -> &BackupNamespace {
|
||||
&self.dir.group.ns
|
||||
&self.ns
|
||||
}
|
||||
|
||||
#[inline]
|
||||
@ -329,20 +368,16 @@ impl BackupDir {
|
||||
}
|
||||
|
||||
pub fn relative_path(&self) -> PathBuf {
|
||||
format!("{}/{}", self.dir.group, self.backup_time_string).into()
|
||||
let mut path = self.store.namespace_path(&self.ns);
|
||||
path.push(self.dir.group.ty.as_str());
|
||||
path.push(&self.dir.group.id);
|
||||
path.push(&self.backup_time_string);
|
||||
path
|
||||
}
|
||||
|
||||
/// Returns the absolute path for backup_dir, using the cached formatted time string.
|
||||
pub fn full_path(&self) -> PathBuf {
|
||||
let mut base_path = self.store.base_path();
|
||||
for ns in self.dir.group.ns.components() {
|
||||
base_path.push("ns");
|
||||
base_path.push(ns);
|
||||
}
|
||||
base_path.push(self.dir.group.ty.as_str());
|
||||
base_path.push(&self.dir.group.id);
|
||||
base_path.push(&self.backup_time_string);
|
||||
base_path
|
||||
self.store.snapshot_path(&self.ns, &self.dir)
|
||||
}
|
||||
|
||||
pub fn protected_file(&self) -> PathBuf {
|
||||
@ -425,6 +460,46 @@ impl BackupDir {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the datastore.
|
||||
pub fn datastore(&self) -> &Arc<DataStore> {
|
||||
&self.store
|
||||
}
|
||||
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the entity who first created the backup group.
|
||||
pub fn get_owner(&self) -> Result<Authid, Error> {
|
||||
self.store.get_owner(&self.ns, self.as_ref())
|
||||
}
|
||||
|
||||
/// Lock the snapshot and open a reader.
|
||||
pub fn locked_reader(&self) -> Result<crate::SnapshotReader, Error> {
|
||||
crate::SnapshotReader::new_do(self.clone())
|
||||
}
|
||||
|
||||
/// Load the manifest without a lock. Must not be written back.
|
||||
pub fn load_manifest(&self) -> Result<(BackupManifest, u64), Error> {
|
||||
let blob = self.load_blob(MANIFEST_BLOB_NAME)?;
|
||||
let raw_size = blob.raw_size();
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, raw_size))
|
||||
}
|
||||
|
||||
/// Update the manifest of the specified snapshot. Never write a manifest directly,
|
||||
/// only use this method - anything else may break locking guarantees.
|
||||
pub fn update_manifest(
|
||||
&self,
|
||||
update_fn: impl FnOnce(&mut BackupManifest),
|
||||
) -> Result<(), Error> {
|
||||
self.store.update_manifest(self, update_fn)
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<pbs_api_types::BackupNamespace> for BackupDir {
|
||||
fn as_ref(&self) -> &pbs_api_types::BackupNamespace {
|
||||
&self.ns
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<pbs_api_types::BackupDir> for BackupDir {
|
||||
@ -465,7 +540,15 @@ impl From<BackupDir> for pbs_api_types::BackupDir {
|
||||
|
||||
impl fmt::Display for BackupDir {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}/{}", self.dir.group, self.backup_time_string)
|
||||
if self.ns.is_root() {
|
||||
write!(f, "{}/{}", self.dir.group, self.backup_time_string)
|
||||
} else {
|
||||
write!(
|
||||
f,
|
||||
"[{}]:{}/{}",
|
||||
self.ns, self.dir.group, self.backup_time_string
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,4 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::convert::TryFrom;
|
||||
use std::io::{self, Write};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::{Path, PathBuf};
|
||||
@ -350,6 +349,7 @@ impl DataStore {
|
||||
self.inner.chunk_store.base_path()
|
||||
}
|
||||
|
||||
/// Returns the absolute path for a backup namespace on this datastore
|
||||
pub fn namespace_path(&self, ns: &BackupNamespace) -> PathBuf {
|
||||
let mut path = self.base_path();
|
||||
path.reserve(ns.path_len());
|
||||
@ -409,23 +409,24 @@ impl DataStore {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the absolute path for a backup namespace on this datastore
|
||||
pub fn ns_path(&self, ns: &BackupNamespace) -> PathBuf {
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(ns.path());
|
||||
full_path
|
||||
}
|
||||
|
||||
/// Returns the absolute path for a backup_group
|
||||
pub fn group_path(&self, backup_group: &pbs_api_types::BackupGroup) -> PathBuf {
|
||||
let mut full_path = self.base_path();
|
||||
pub fn group_path(
|
||||
&self,
|
||||
ns: &BackupNamespace,
|
||||
backup_group: &pbs_api_types::BackupGroup,
|
||||
) -> PathBuf {
|
||||
let mut full_path = self.namespace_path(ns);
|
||||
full_path.push(backup_group.to_string());
|
||||
full_path
|
||||
}
|
||||
|
||||
/// Returns the absolute path for backup_dir
|
||||
pub fn snapshot_path(&self, backup_dir: &pbs_api_types::BackupDir) -> PathBuf {
|
||||
let mut full_path = self.base_path();
|
||||
pub fn snapshot_path(
|
||||
&self,
|
||||
ns: &BackupNamespace,
|
||||
backup_dir: &pbs_api_types::BackupDir,
|
||||
) -> PathBuf {
|
||||
let mut full_path = self.namespace_path(ns);
|
||||
full_path.push(backup_dir.to_string());
|
||||
full_path
|
||||
}
|
||||
@ -537,9 +538,10 @@ impl DataStore {
|
||||
/// Returns true if all snapshots were removed, and false if some were protected
|
||||
pub fn remove_backup_group(
|
||||
self: &Arc<Self>,
|
||||
ns: &BackupNamespace,
|
||||
backup_group: &pbs_api_types::BackupGroup,
|
||||
) -> Result<bool, Error> {
|
||||
let backup_group = self.backup_group(backup_group.clone());
|
||||
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
|
||||
|
||||
backup_group.destroy()
|
||||
}
|
||||
@ -547,10 +549,11 @@ impl DataStore {
|
||||
/// Remove a backup directory including all content
|
||||
pub fn remove_backup_dir(
|
||||
self: &Arc<Self>,
|
||||
ns: &BackupNamespace,
|
||||
backup_dir: &pbs_api_types::BackupDir,
|
||||
force: bool,
|
||||
) -> Result<(), Error> {
|
||||
let backup_dir = self.backup_dir(backup_dir.clone())?;
|
||||
let backup_dir = self.backup_dir(ns.clone(), backup_dir.clone())?;
|
||||
|
||||
backup_dir.destroy(force)
|
||||
}
|
||||
@ -560,9 +563,10 @@ impl DataStore {
|
||||
/// Or None if there is no backup in the group (or the group dir does not exist).
|
||||
pub fn last_successful_backup(
|
||||
self: &Arc<Self>,
|
||||
ns: &BackupNamespace,
|
||||
backup_group: &pbs_api_types::BackupGroup,
|
||||
) -> Result<Option<i64>, Error> {
|
||||
let backup_group = self.backup_group(backup_group.clone());
|
||||
let backup_group = self.backup_group(ns.clone(), backup_group.clone());
|
||||
|
||||
let group_path = backup_group.full_group_path();
|
||||
|
||||
@ -573,23 +577,31 @@ impl DataStore {
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the path of the 'owner' file.
|
||||
fn owner_path(&self, ns: &BackupNamespace, group: &pbs_api_types::BackupGroup) -> PathBuf {
|
||||
self.group_path(ns, group).join("owner")
|
||||
}
|
||||
|
||||
/// Returns the backup owner.
|
||||
///
|
||||
/// The backup owner is the entity who first created the backup group.
|
||||
pub fn get_owner(&self, backup_group: &pbs_api_types::BackupGroup) -> Result<Authid, Error> {
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(backup_group.to_string());
|
||||
full_path.push("owner");
|
||||
pub fn get_owner(
|
||||
&self,
|
||||
ns: &BackupNamespace,
|
||||
backup_group: &pbs_api_types::BackupGroup,
|
||||
) -> Result<Authid, Error> {
|
||||
let full_path = self.owner_path(ns, backup_group);
|
||||
let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
|
||||
owner.trim_end().parse() // remove trailing newline
|
||||
}
|
||||
|
||||
pub fn owns_backup(
|
||||
&self,
|
||||
ns: &BackupNamespace,
|
||||
backup_group: &pbs_api_types::BackupGroup,
|
||||
auth_id: &Authid,
|
||||
) -> Result<bool, Error> {
|
||||
let owner = self.get_owner(backup_group)?;
|
||||
let owner = self.get_owner(ns, backup_group)?;
|
||||
|
||||
Ok(check_backup_owner(&owner, auth_id).is_ok())
|
||||
}
|
||||
@ -597,13 +609,12 @@ impl DataStore {
|
||||
/// Set the backup owner.
|
||||
pub fn set_owner(
|
||||
&self,
|
||||
ns: &BackupNamespace,
|
||||
backup_group: &pbs_api_types::BackupGroup,
|
||||
auth_id: &Authid,
|
||||
force: bool,
|
||||
) -> Result<(), Error> {
|
||||
let mut path = self.base_path();
|
||||
path.push(backup_group.to_string());
|
||||
path.push("owner");
|
||||
let path = self.owner_path(ns, backup_group);
|
||||
|
||||
let mut open_options = std::fs::OpenOptions::new();
|
||||
open_options.write(true);
|
||||
@ -633,12 +644,13 @@ impl DataStore {
|
||||
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
||||
pub fn create_locked_backup_group(
|
||||
&self,
|
||||
ns: &BackupNamespace,
|
||||
backup_group: &pbs_api_types::BackupGroup,
|
||||
auth_id: &Authid,
|
||||
) -> Result<(Authid, DirLockGuard), Error> {
|
||||
// create intermediate path first:
|
||||
let mut full_path = self.base_path();
|
||||
for ns in backup_group.ns.components() {
|
||||
for ns in ns.components() {
|
||||
full_path.push("ns");
|
||||
full_path.push(ns);
|
||||
}
|
||||
@ -655,8 +667,8 @@ impl DataStore {
|
||||
"backup group",
|
||||
"another backup is already running",
|
||||
)?;
|
||||
self.set_owner(backup_group, auth_id, false)?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
self.set_owner(ns, backup_group, auth_id, false)?;
|
||||
let owner = self.get_owner(ns, backup_group)?; // just to be sure
|
||||
Ok((owner, guard))
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
||||
@ -665,7 +677,7 @@ impl DataStore {
|
||||
"backup group",
|
||||
"another backup is already running",
|
||||
)?;
|
||||
let owner = self.get_owner(backup_group)?; // just to be sure
|
||||
let owner = self.get_owner(ns, backup_group)?; // just to be sure
|
||||
Ok((owner, guard))
|
||||
}
|
||||
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
||||
@ -677,11 +689,15 @@ impl DataStore {
|
||||
/// The BackupGroup directory needs to exist.
|
||||
pub fn create_locked_backup_dir(
|
||||
&self,
|
||||
ns: &BackupNamespace,
|
||||
backup_dir: &pbs_api_types::BackupDir,
|
||||
) -> Result<(PathBuf, bool, DirLockGuard), Error> {
|
||||
let relative_path = PathBuf::from(backup_dir.to_string());
|
||||
let mut full_path = self.base_path();
|
||||
full_path.push(&relative_path);
|
||||
let full_path = self.snapshot_path(ns, backup_dir);
|
||||
let relative_path = full_path.strip_prefix(self.base_path()).map_err(|err| {
|
||||
format_err!(
|
||||
"failed to produce correct path for backup {backup_dir} in namespace {ns}: {err}"
|
||||
)
|
||||
})?;
|
||||
|
||||
let lock = || {
|
||||
lock_dir_noblock(
|
||||
@ -692,9 +708,9 @@ impl DataStore {
|
||||
};
|
||||
|
||||
match std::fs::create_dir(&full_path) {
|
||||
Ok(_) => Ok((relative_path, true, lock()?)),
|
||||
Ok(_) => Ok((relative_path.to_owned(), true, lock()?)),
|
||||
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
|
||||
Ok((relative_path, false, lock()?))
|
||||
Ok((relative_path.to_owned(), false, lock()?))
|
||||
}
|
||||
Err(e) => Err(e.into()),
|
||||
}
|
||||
@ -1135,10 +1151,7 @@ impl DataStore {
|
||||
|
||||
/// Load the manifest without a lock. Must not be written back.
|
||||
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
||||
let blob = backup_dir.load_blob(MANIFEST_BLOB_NAME)?;
|
||||
let raw_size = blob.raw_size();
|
||||
let manifest = BackupManifest::try_from(blob)?;
|
||||
Ok((manifest, raw_size))
|
||||
backup_dir.load_manifest()
|
||||
}
|
||||
|
||||
/// Update the manifest of the specified snapshot. Never write a manifest directly,
|
||||
@ -1240,8 +1253,12 @@ impl DataStore {
|
||||
}
|
||||
|
||||
/// Open a backup group from this datastore.
|
||||
pub fn backup_group(self: &Arc<Self>, group: pbs_api_types::BackupGroup) -> BackupGroup {
|
||||
BackupGroup::new(Arc::clone(&self), group)
|
||||
pub fn backup_group(
|
||||
self: &Arc<Self>,
|
||||
ns: BackupNamespace,
|
||||
group: pbs_api_types::BackupGroup,
|
||||
) -> BackupGroup {
|
||||
BackupGroup::new(Arc::clone(&self), ns, group)
|
||||
}
|
||||
|
||||
/// Open a backup group from this datastore.
|
||||
@ -1254,19 +1271,25 @@ impl DataStore {
|
||||
where
|
||||
T: Into<String>,
|
||||
{
|
||||
self.backup_group((ns, ty, id.into()).into())
|
||||
self.backup_group(ns, (ty, id.into()).into())
|
||||
}
|
||||
|
||||
/*
|
||||
/// Open a backup group from this datastore by backup group path such as `vm/100`.
|
||||
///
|
||||
/// Convenience method for `store.backup_group(path.parse()?)`
|
||||
pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> {
|
||||
Ok(self.backup_group(path.parse()?))
|
||||
todo!("split out the namespace");
|
||||
}
|
||||
*/
|
||||
|
||||
/// Open a snapshot (backup directory) from this datastore.
|
||||
pub fn backup_dir(self: &Arc<Self>, dir: pbs_api_types::BackupDir) -> Result<BackupDir, Error> {
|
||||
BackupDir::with_group(self.backup_group(dir.group), dir.time)
|
||||
pub fn backup_dir(
|
||||
self: &Arc<Self>,
|
||||
ns: BackupNamespace,
|
||||
dir: pbs_api_types::BackupDir,
|
||||
) -> Result<BackupDir, Error> {
|
||||
BackupDir::with_group(self.backup_group(ns, dir.group), dir.time)
|
||||
}
|
||||
|
||||
/// Open a snapshot (backup directory) from this datastore.
|
||||
@ -1280,7 +1303,7 @@ impl DataStore {
|
||||
where
|
||||
T: Into<String>,
|
||||
{
|
||||
self.backup_dir((ns, ty, id.into(), time).into())
|
||||
self.backup_dir(ns, (ty, id.into(), time).into())
|
||||
}
|
||||
|
||||
/// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
|
||||
@ -1292,10 +1315,12 @@ impl DataStore {
|
||||
BackupDir::with_rfc3339(group, time_string.into())
|
||||
}
|
||||
|
||||
/*
|
||||
/// Open a snapshot (backup directory) from this datastore by a snapshot path.
|
||||
pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> {
|
||||
self.backup_dir(path.parse()?)
|
||||
todo!("split out the namespace");
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
|
||||
@ -1391,7 +1416,8 @@ impl Iterator for ListGroups {
|
||||
if BACKUP_ID_REGEX.is_match(name) {
|
||||
return Some(Ok(BackupGroup::new(
|
||||
Arc::clone(&self.store),
|
||||
(self.ns.clone(), group_type, name.to_owned()).into(),
|
||||
self.ns.clone(),
|
||||
(group_type, name.to_owned()).into(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
@ -8,13 +8,14 @@ use nix::dir::Dir;
|
||||
|
||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
||||
|
||||
use pbs_api_types::{BackupNamespace, Operation};
|
||||
|
||||
use crate::backup_info::BackupDir;
|
||||
use crate::dynamic_index::DynamicIndexReader;
|
||||
use crate::fixed_index::FixedIndexReader;
|
||||
use crate::index::IndexFile;
|
||||
use crate::manifest::{archive_type, ArchiveType, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME};
|
||||
use crate::DataStore;
|
||||
use pbs_api_types::Operation;
|
||||
|
||||
/// Helper to access the contents of a datastore backup snapshot
|
||||
///
|
||||
@ -30,10 +31,14 @@ impl SnapshotReader {
|
||||
/// Lock snapshot, reads the manifest and returns a new instance
|
||||
pub fn new(
|
||||
datastore: Arc<DataStore>,
|
||||
ns: BackupNamespace,
|
||||
snapshot: pbs_api_types::BackupDir,
|
||||
) -> Result<Self, Error> {
|
||||
let snapshot = datastore.backup_dir(snapshot)?;
|
||||
Self::new_do(datastore.backup_dir(ns, snapshot)?)
|
||||
}
|
||||
|
||||
pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
|
||||
let datastore = snapshot.datastore();
|
||||
let snapshot_path = snapshot.full_path();
|
||||
|
||||
let locked_dir =
|
||||
|
@ -242,13 +242,8 @@ async fn test_upload_speed(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&(
|
||||
BackupNamespace::root(),
|
||||
BackupType::Host,
|
||||
"benchmark".to_string(),
|
||||
backup_time,
|
||||
)
|
||||
.into(),
|
||||
&BackupNamespace::root(),
|
||||
&(BackupType::Host, "benchmark".to_string(), backup_time).into(),
|
||||
false,
|
||||
true,
|
||||
)
|
||||
|
@ -8,6 +8,7 @@ use serde_json::Value;
|
||||
use proxmox_router::cli::*;
|
||||
use proxmox_schema::api;
|
||||
|
||||
use pbs_api_types::BackupNamespace;
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_client::{BackupReader, RemoteChunkReader};
|
||||
use pbs_tools::crypt_config::CryptConfig;
|
||||
@ -16,9 +17,9 @@ use pbs_tools::json::required_string_param;
|
||||
use crate::{
|
||||
complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name,
|
||||
complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group,
|
||||
extract_repository_from_value, format_key_source, record_repository, BackupDir,
|
||||
BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
|
||||
Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
|
||||
extract_repository_from_value, format_key_source, optional_ns_param, record_repository,
|
||||
BackupDir, BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader,
|
||||
IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
|
||||
};
|
||||
|
||||
#[api(
|
||||
@ -28,6 +29,10 @@ use crate::{
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Snapshot path.",
|
||||
@ -48,6 +53,7 @@ use crate::{
|
||||
async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let snapshot: BackupDir = path.parse()?;
|
||||
|
||||
@ -68,8 +74,15 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let client = connect(&repo)?;
|
||||
|
||||
let client =
|
||||
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_ns,
|
||||
&snapshot,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
@ -114,6 +127,10 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"snapshot": {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
@ -142,10 +159,11 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
|
||||
async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let client = connect(&repo)?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let archive_name = required_string_param(¶m, "archive-name")?;
|
||||
|
||||
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
|
||||
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
|
||||
|
||||
let crypto = crypto_parameters(¶m)?;
|
||||
|
||||
@ -172,6 +190,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_ns,
|
||||
&backup_dir,
|
||||
true,
|
||||
)
|
||||
|
@ -127,24 +127,21 @@ fn record_repository(repo: &BackupRepository) {
|
||||
);
|
||||
}
|
||||
|
||||
enum List {
|
||||
Any,
|
||||
Group(BackupGroup),
|
||||
Namespace(BackupNamespace),
|
||||
}
|
||||
|
||||
async fn api_datastore_list_snapshots(
|
||||
client: &HttpClient,
|
||||
store: &str,
|
||||
list: List,
|
||||
ns: &BackupNamespace,
|
||||
group: Option<&BackupGroup>,
|
||||
) -> Result<Value, Error> {
|
||||
let path = format!("api2/json/admin/datastore/{}/snapshots", store);
|
||||
|
||||
let args = match list {
|
||||
List::Group(group) => serde_json::to_value(group)?,
|
||||
List::Namespace(ns) => json!({ "backup-ns": ns }),
|
||||
List::Any => json!({}),
|
||||
let mut args = match group {
|
||||
Some(group) => serde_json::to_value(group)?,
|
||||
None => json!({}),
|
||||
};
|
||||
if !ns.is_root() {
|
||||
args["backup-ns"] = serde_json::to_value(ns)?;
|
||||
}
|
||||
|
||||
let mut result = client.get(&path, Some(args)).await?;
|
||||
|
||||
@ -154,9 +151,10 @@ async fn api_datastore_list_snapshots(
|
||||
pub async fn api_datastore_latest_snapshot(
|
||||
client: &HttpClient,
|
||||
store: &str,
|
||||
ns: &BackupNamespace,
|
||||
group: BackupGroup,
|
||||
) -> Result<BackupDir, Error> {
|
||||
let list = api_datastore_list_snapshots(client, store, List::Group(group.clone())).await?;
|
||||
let list = api_datastore_list_snapshots(client, store, ns, Some(&group)).await?;
|
||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
|
||||
|
||||
if list.is_empty() {
|
||||
@ -171,12 +169,13 @@ pub async fn api_datastore_latest_snapshot(
|
||||
pub async fn dir_or_last_from_group(
|
||||
client: &HttpClient,
|
||||
repo: &BackupRepository,
|
||||
ns: &BackupNamespace,
|
||||
path: &str,
|
||||
) -> Result<BackupDir, Error> {
|
||||
match path.parse::<BackupPart>()? {
|
||||
BackupPart::Dir(dir) => Ok(dir),
|
||||
BackupPart::Group(group) => {
|
||||
api_datastore_latest_snapshot(&client, repo.store(), group).await
|
||||
api_datastore_latest_snapshot(&client, repo.store(), ns, group).await
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -242,6 +241,14 @@ async fn backup_image<P: AsRef<Path>>(
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
pub fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
|
||||
Ok(match param.get("ns") {
|
||||
Some(Value::String(ns)) => ns.parse()?,
|
||||
Some(_) => bail!("invalid namespace parameter"),
|
||||
None => BackupNamespace::root(),
|
||||
})
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
@ -270,10 +277,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
|
||||
|
||||
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
|
||||
|
||||
let backup_ns: BackupNamespace = match ¶m["ns"] {
|
||||
Value::String(s) => s.parse()?,
|
||||
_ => BackupNamespace::root(),
|
||||
};
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let mut result = client
|
||||
.get(&path, Some(json!({ "backup-ns": backup_ns })))
|
||||
.await?;
|
||||
@ -692,7 +696,7 @@ async fn create_backup(
|
||||
.as_str()
|
||||
.unwrap_or(proxmox_sys::nodename());
|
||||
|
||||
let backup_namespace: BackupNamespace = match param.get("backup-ns") {
|
||||
let backup_ns: BackupNamespace = match param.get("backup-ns") {
|
||||
Some(ns) => ns
|
||||
.as_str()
|
||||
.ok_or_else(|| format_err!("bad namespace {:?}", ns))?
|
||||
@ -822,13 +826,12 @@ async fn create_backup(
|
||||
let client = connect_rate_limited(&repo, rate_limit)?;
|
||||
record_repository(&repo);
|
||||
|
||||
let snapshot = BackupDir::from((
|
||||
backup_namespace,
|
||||
backup_type,
|
||||
backup_id.to_owned(),
|
||||
backup_time,
|
||||
));
|
||||
println!("Starting backup: {snapshot}");
|
||||
let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
|
||||
if backup_ns.is_root() {
|
||||
println!("Starting backup: {snapshot}");
|
||||
} else {
|
||||
println!("Starting backup: [{backup_ns}]:{snapshot}");
|
||||
}
|
||||
|
||||
println!("Client name: {}", proxmox_sys::nodename());
|
||||
|
||||
@ -875,6 +878,7 @@ async fn create_backup(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_ns,
|
||||
&snapshot,
|
||||
verbose,
|
||||
false,
|
||||
@ -1151,55 +1155,59 @@ fn parse_archive_type(name: &str) -> (String, ArchiveType) {
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"archive-name": {
|
||||
description: "Backup archive name.",
|
||||
type: String,
|
||||
},
|
||||
target: {
|
||||
type: String,
|
||||
description: r###"Target directory path. Use '-' to write to standard output.
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"archive-name": {
|
||||
description: "Backup archive name.",
|
||||
type: String,
|
||||
},
|
||||
target: {
|
||||
type: String,
|
||||
description: r###"Target directory path. Use '-' to write to standard output.
|
||||
|
||||
We do not extract '.pxar' archives when writing to standard output.
|
||||
|
||||
"###
|
||||
},
|
||||
rate: {
|
||||
schema: TRAFFIC_CONTROL_RATE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
burst: {
|
||||
schema: TRAFFIC_CONTROL_BURST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"allow-existing-dirs": {
|
||||
type: Boolean,
|
||||
description: "Do not fail if directories already exists.",
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
rate: {
|
||||
schema: TRAFFIC_CONTROL_RATE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
burst: {
|
||||
schema: TRAFFIC_CONTROL_BURST_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"allow-existing-dirs": {
|
||||
type: Boolean,
|
||||
description: "Do not fail if directories already exists.",
|
||||
optional: true,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Restore backup repository.
|
||||
async fn restore(param: Value) -> Result<Value, Error> {
|
||||
@ -1225,9 +1233,14 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
let client = connect_rate_limited(&repo, rate_limit)?;
|
||||
record_repository(&repo);
|
||||
|
||||
let ns = match param.get("ns") {
|
||||
Some(Value::String(ns)) => ns.parse()?,
|
||||
Some(_) => bail!("invalid namespace parameter"),
|
||||
None => BackupNamespace::root(),
|
||||
};
|
||||
let path = json::required_string_param(¶m, "snapshot")?;
|
||||
|
||||
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
|
||||
let backup_dir = dir_or_last_from_group(&client, &repo, &ns, &path).await?;
|
||||
|
||||
let target = json::required_string_param(¶m, "target")?;
|
||||
let target = if target == "-" { None } else { Some(target) };
|
||||
@ -1250,6 +1263,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&ns,
|
||||
&backup_dir,
|
||||
true,
|
||||
)
|
||||
|
@ -18,6 +18,7 @@ use proxmox_schema::*;
|
||||
use proxmox_sys::fd::Fd;
|
||||
use proxmox_sys::sortable;
|
||||
|
||||
use pbs_api_types::BackupNamespace;
|
||||
use pbs_client::tools::key_source::get_encryption_key_password;
|
||||
use pbs_client::{BackupReader, RemoteChunkReader};
|
||||
use pbs_config::key_config::load_and_decrypt_key;
|
||||
@ -30,7 +31,7 @@ use pbs_tools::json::required_string_param;
|
||||
use crate::{
|
||||
complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name,
|
||||
complete_repository, connect, dir_or_last_from_group, extract_repository_from_value,
|
||||
record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
|
||||
optional_ns_param, record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
|
||||
};
|
||||
|
||||
#[sortable]
|
||||
@ -39,6 +40,7 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
|
||||
&ObjectSchema::new(
|
||||
"Mount pxar archive.",
|
||||
&sorted!([
|
||||
("ns", true, &BackupNamespace::API_SCHEMA,),
|
||||
(
|
||||
"snapshot",
|
||||
false,
|
||||
@ -197,8 +199,9 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let path = required_string_param(¶m, "snapshot")?;
|
||||
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
|
||||
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
|
||||
|
||||
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
|
||||
let crypt_config = match keyfile {
|
||||
@ -229,6 +232,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&backup_ns,
|
||||
&backup_dir,
|
||||
true,
|
||||
)
|
||||
|
@ -1,6 +1,6 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use anyhow::{bail, Error};
|
||||
use anyhow::Error;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
use proxmox_router::cli::*;
|
||||
@ -17,7 +17,7 @@ use pbs_tools::json::required_string_param;
|
||||
use crate::{
|
||||
api_datastore_list_snapshots, complete_backup_group, complete_backup_snapshot,
|
||||
complete_repository, connect, crypto_parameters, extract_repository_from_value,
|
||||
record_repository, BackupDir, List, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||
optional_ns_param, record_repository, BackupDir, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
|
||||
};
|
||||
|
||||
#[api(
|
||||
@ -56,17 +56,10 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
|
||||
.map(|group| group.parse())
|
||||
.transpose()?;
|
||||
|
||||
let backup_ns: Option<BackupNamespace> =
|
||||
param["ns"].as_str().map(|ns| ns.parse()).transpose()?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
|
||||
let list = match (group, backup_ns) {
|
||||
(Some(group), None) => List::Group(group),
|
||||
(None, Some(ns)) => List::Namespace(ns),
|
||||
(None, None) => List::Any,
|
||||
(Some(_), Some(_)) => bail!("'ns' and 'group' parameters are mutually exclusive"),
|
||||
};
|
||||
|
||||
let mut data = api_datastore_list_snapshots(&client, repo.store(), list).await?;
|
||||
let mut data =
|
||||
api_datastore_list_snapshots(&client, repo.store(), &backup_ns, group.as_ref()).await?;
|
||||
|
||||
record_repository(&repo);
|
||||
|
||||
|
@ -17,7 +17,7 @@ use proxmox_sys::fs::{create_path, CreateOptions};
|
||||
use pxar::accessor::aio::Accessor;
|
||||
use pxar::decoder::aio::Decoder;
|
||||
|
||||
use pbs_api_types::{BackupDir, CryptMode};
|
||||
use pbs_api_types::{BackupDir, BackupNamespace, CryptMode};
|
||||
use pbs_client::pxar::{create_zip, extract_sub_dir, extract_sub_dir_seq};
|
||||
use pbs_client::tools::{
|
||||
complete_group_or_snapshot, complete_repository, connect, extract_repository_from_value,
|
||||
@ -95,6 +95,7 @@ fn keyfile_path(param: &Value) -> Option<String> {
|
||||
|
||||
async fn list_files(
|
||||
repo: BackupRepository,
|
||||
ns: BackupNamespace,
|
||||
snapshot: BackupDir,
|
||||
path: ExtractPath,
|
||||
crypt_config: Option<Arc<CryptConfig>>,
|
||||
@ -102,8 +103,15 @@ async fn list_files(
|
||||
driver: Option<BlockDriverType>,
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let client = connect(&repo)?;
|
||||
let client =
|
||||
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&ns,
|
||||
&snapshot,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
|
||||
@ -164,70 +172,75 @@ async fn list_files(
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"path": {
|
||||
description: "Path to restore. Directories will be restored as .zip files.",
|
||||
type: String,
|
||||
},
|
||||
"base64": {
|
||||
type: Boolean,
|
||||
description: "If set, 'path' will be interpreted as base64 encoded.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
"driver": {
|
||||
type: BlockDriverType,
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
"json-error": {
|
||||
type: Boolean,
|
||||
description: "If set, errors are returned as json instead of writing to stderr",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"timeout": {
|
||||
type: Integer,
|
||||
description: "Defines the maximum time the call can should take.",
|
||||
minimum: 1,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "A list of elements under the given path",
|
||||
type: Array,
|
||||
items: {
|
||||
type: ArchiveEntry,
|
||||
}
|
||||
}
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"path": {
|
||||
description: "Path to restore. Directories will be restored as .zip files.",
|
||||
type: String,
|
||||
},
|
||||
"base64": {
|
||||
type: Boolean,
|
||||
description: "If set, 'path' will be interpreted as base64 encoded.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
"driver": {
|
||||
type: BlockDriverType,
|
||||
optional: true,
|
||||
},
|
||||
"output-format": {
|
||||
schema: OUTPUT_FORMAT,
|
||||
optional: true,
|
||||
},
|
||||
"json-error": {
|
||||
type: Boolean,
|
||||
description: "If set, errors are returned as json instead of writing to stderr",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"timeout": {
|
||||
type: Integer,
|
||||
description: "Defines the maximum time the call can should take.",
|
||||
minimum: 1,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
},
|
||||
returns: {
|
||||
description: "A list of elements under the given path",
|
||||
type: Array,
|
||||
items: {
|
||||
type: ArchiveEntry,
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// List a directory from a backup snapshot.
|
||||
async fn list(
|
||||
ns: Option<BackupNamespace>,
|
||||
snapshot: String,
|
||||
path: String,
|
||||
base64: bool,
|
||||
@ -236,6 +249,7 @@ async fn list(
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let ns = ns.unwrap_or_default();
|
||||
let snapshot: BackupDir = snapshot.parse()?;
|
||||
let path = parse_path(path, base64)?;
|
||||
|
||||
@ -261,7 +275,7 @@ async fn list(
|
||||
let result = if let Some(timeout) = timeout {
|
||||
match tokio::time::timeout(
|
||||
std::time::Duration::from_secs(timeout),
|
||||
list_files(repo, snapshot, path, crypt_config, keyfile, driver),
|
||||
list_files(repo, ns, snapshot, path, crypt_config, keyfile, driver),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@ -269,7 +283,7 @@ async fn list(
|
||||
Err(_) => Err(http_err!(SERVICE_UNAVAILABLE, "list not finished in time")),
|
||||
}
|
||||
} else {
|
||||
list_files(repo, snapshot, path, crypt_config, keyfile, driver).await
|
||||
list_files(repo, ns, snapshot, path, crypt_config, keyfile, driver).await
|
||||
};
|
||||
|
||||
let output_format = get_output_format(¶m);
|
||||
@ -316,58 +330,63 @@ async fn list(
|
||||
}
|
||||
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"path": {
|
||||
description: "Path to restore. Directories will be restored as .zip files if extracted to stdout.",
|
||||
type: String,
|
||||
},
|
||||
"base64": {
|
||||
type: Boolean,
|
||||
description: "If set, 'path' will be interpreted as base64 encoded.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
target: {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "Target directory path. Use '-' to write to standard output.",
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
verbose: {
|
||||
type: Boolean,
|
||||
description: "Print verbose information",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"driver": {
|
||||
type: BlockDriverType,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
input: {
|
||||
properties: {
|
||||
repository: {
|
||||
schema: REPO_URL_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
snapshot: {
|
||||
type: String,
|
||||
description: "Group/Snapshot path.",
|
||||
},
|
||||
"path": {
|
||||
description: "Path to restore. Directories will be restored as .zip files if extracted to stdout.",
|
||||
type: String,
|
||||
},
|
||||
"base64": {
|
||||
type: Boolean,
|
||||
description: "If set, 'path' will be interpreted as base64 encoded.",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
target: {
|
||||
type: String,
|
||||
optional: true,
|
||||
description: "Target directory path. Use '-' to write to standard output.",
|
||||
},
|
||||
keyfile: {
|
||||
schema: KEYFILE_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"keyfd": {
|
||||
schema: KEYFD_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"crypt-mode": {
|
||||
type: CryptMode,
|
||||
optional: true,
|
||||
},
|
||||
verbose: {
|
||||
type: Boolean,
|
||||
description: "Print verbose information",
|
||||
optional: true,
|
||||
default: false,
|
||||
},
|
||||
"driver": {
|
||||
type: BlockDriverType,
|
||||
optional: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
)]
|
||||
/// Restore files from a backup snapshot.
|
||||
async fn extract(
|
||||
ns: Option<BackupNamespace>,
|
||||
snapshot: String,
|
||||
path: String,
|
||||
base64: bool,
|
||||
@ -376,6 +395,7 @@ async fn extract(
|
||||
param: Value,
|
||||
) -> Result<(), Error> {
|
||||
let repo = extract_repository_from_value(¶m)?;
|
||||
let ns = ns.unwrap_or_default();
|
||||
let snapshot: BackupDir = snapshot.parse()?;
|
||||
let orig_path = path;
|
||||
let path = parse_path(orig_path.clone(), base64)?;
|
||||
@ -401,8 +421,15 @@ async fn extract(
|
||||
};
|
||||
|
||||
let client = connect(&repo)?;
|
||||
let client =
|
||||
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
|
||||
let client = BackupReader::start(
|
||||
client,
|
||||
crypt_config.clone(),
|
||||
repo.store(),
|
||||
&ns,
|
||||
&snapshot,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
let (manifest, _) = client.download_manifest().await?;
|
||||
|
||||
match path {
|
||||
|
@ -59,6 +59,7 @@ use pbs_datastore::{
|
||||
use pbs_tools::json::required_string_param;
|
||||
use proxmox_rest_server::{formatter, WorkerTask};
|
||||
|
||||
use crate::api2::backup::optional_ns_param;
|
||||
use crate::api2::node::rrd::create_value_from_rrd;
|
||||
use crate::backup::{verify_all_backups, verify_backup_dir, verify_backup_group, verify_filter};
|
||||
|
||||
@ -66,29 +67,35 @@ use crate::server::jobstate::Job;
|
||||
|
||||
const GROUP_NOTES_FILE_NAME: &str = "notes";
|
||||
|
||||
fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf {
|
||||
let mut note_path = store.base_path();
|
||||
note_path.push(group.to_string());
|
||||
fn get_group_note_path(
|
||||
store: &DataStore,
|
||||
ns: &BackupNamespace,
|
||||
group: &pbs_api_types::BackupGroup,
|
||||
) -> PathBuf {
|
||||
let mut note_path = store.group_path(ns, group);
|
||||
note_path.push(GROUP_NOTES_FILE_NAME);
|
||||
note_path
|
||||
}
|
||||
|
||||
fn check_priv_or_backup_owner(
|
||||
// FIXME: We could probably switch to pbs-datastore::BackupGroup here to replace all of store,
|
||||
// ns and group.
|
||||
store: &DataStore,
|
||||
ns: &BackupNamespace,
|
||||
group: &pbs_api_types::BackupGroup,
|
||||
auth_id: &Authid,
|
||||
required_privs: u64,
|
||||
) -> Result<(), Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let privs = if group.ns.is_root() {
|
||||
let privs = if ns.is_root() {
|
||||
user_info.lookup_privs(auth_id, &["datastore", store.name()])
|
||||
} else {
|
||||
user_info.lookup_privs(auth_id, &["datastore", store.name(), &group.ns.to_string()])
|
||||
user_info.lookup_privs(auth_id, &["datastore", store.name(), &ns.to_string()])
|
||||
};
|
||||
|
||||
if privs & required_privs == 0 {
|
||||
let owner = store.get_owner(group)?;
|
||||
let owner = store.get_owner(ns, group)?;
|
||||
check_backup_owner(&owner, auth_id)?;
|
||||
}
|
||||
Ok(())
|
||||
@ -212,10 +219,10 @@ pub fn list_groups(
|
||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||
|
||||
datastore
|
||||
.iter_backup_groups(backup_ns)? // FIXME: Namespaces and recursion parameters!
|
||||
.iter_backup_groups(backup_ns.clone())? // FIXME: Namespaces and recursion parameters!
|
||||
.try_fold(Vec::new(), |mut group_info, group| {
|
||||
let group = group?;
|
||||
let owner = match datastore.get_owner(group.as_ref()) {
|
||||
let owner = match datastore.get_owner(&backup_ns, group.as_ref()) {
|
||||
Ok(auth_id) => auth_id,
|
||||
Err(err) => {
|
||||
let id = &store;
|
||||
@ -248,7 +255,7 @@ pub fn list_groups(
|
||||
})
|
||||
.to_owned();
|
||||
|
||||
let note_path = get_group_note_path(&datastore, group.as_ref());
|
||||
let note_path = get_group_note_path(&datastore, &backup_ns, group.as_ref());
|
||||
let comment = file_read_firstline(¬e_path).ok();
|
||||
|
||||
group_info.push(GroupListItem {
|
||||
@ -268,6 +275,10 @@ pub fn list_groups(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
group: {
|
||||
type: pbs_api_types::BackupGroup,
|
||||
flatten: true,
|
||||
@ -283,24 +294,33 @@ pub fn list_groups(
|
||||
/// Delete backup group including all snapshots.
|
||||
pub fn delete_group(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
group: pbs_api_types::BackupGroup,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||
)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
&backup_ns,
|
||||
&group,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
)?;
|
||||
|
||||
if !datastore.remove_backup_group(&group)? {
|
||||
if !datastore.remove_backup_group(&backup_ns, &group)? {
|
||||
bail!("group only partially deleted due to protected snapshots");
|
||||
}
|
||||
|
||||
@ -311,6 +331,10 @@ pub fn delete_group(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_dir: {
|
||||
type: pbs_api_types::BackupDir,
|
||||
flatten: true,
|
||||
@ -327,25 +351,29 @@ pub fn delete_group(
|
||||
/// List snapshot files.
|
||||
pub fn list_snapshot_files(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_dir: pbs_api_types::BackupDir,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<BackupContent>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
|
||||
let snapshot = datastore.backup_dir(backup_dir)?;
|
||||
let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
snapshot.backup_ns(),
|
||||
snapshot.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
|
||||
@ -362,6 +390,10 @@ pub fn list_snapshot_files(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_dir: {
|
||||
type: pbs_api_types::BackupDir,
|
||||
flatten: true,
|
||||
@ -377,30 +409,34 @@ pub fn list_snapshot_files(
|
||||
/// Delete backup snapshot.
|
||||
pub fn delete_snapshot(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_dir: pbs_api_types::BackupDir,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||
)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
let snapshot = datastore.backup_dir(backup_dir)?;
|
||||
let snapshot = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
snapshot.backup_ns(),
|
||||
snapshot.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
)?;
|
||||
|
||||
datastore.remove_backup_dir(snapshot.as_ref(), false)?;
|
||||
snapshot.destroy(false)?;
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
@ -549,7 +585,7 @@ pub fn list_snapshots(
|
||||
};
|
||||
|
||||
groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
|
||||
let owner = match datastore.get_owner(group.as_ref()) {
|
||||
let owner = match group.get_owner() {
|
||||
Ok(auth_id) => auth_id,
|
||||
Err(err) => {
|
||||
eprintln!(
|
||||
@ -583,7 +619,8 @@ fn get_snapshots_count(
|
||||
store
|
||||
.iter_backup_groups_ok(Default::default())? // FIXME: Recurse!
|
||||
.filter(|group| {
|
||||
let owner = match store.get_owner(group.as_ref()) {
|
||||
// FIXME: namespace:
|
||||
let owner = match store.get_owner(&BackupNamespace::root(), group.as_ref()) {
|
||||
Ok(owner) => owner,
|
||||
Err(err) => {
|
||||
let id = store.name();
|
||||
@ -763,7 +800,13 @@ pub fn verify(
|
||||
let dir =
|
||||
datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
|
||||
|
||||
check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
dir.backup_ns(),
|
||||
dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_VERIFY,
|
||||
)?;
|
||||
|
||||
backup_dir = Some(dir);
|
||||
worker_type = "verify_snapshot";
|
||||
@ -776,11 +819,17 @@ pub fn verify(
|
||||
backup_type,
|
||||
backup_id
|
||||
);
|
||||
let group = pbs_api_types::BackupGroup::from((backup_ns, backup_type, backup_id));
|
||||
let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
|
||||
|
||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
&backup_ns,
|
||||
&group,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_VERIFY,
|
||||
)?;
|
||||
|
||||
backup_group = Some(datastore.backup_group(group));
|
||||
backup_group = Some(datastore.backup_group(backup_ns, group));
|
||||
worker_type = "verify_group";
|
||||
}
|
||||
(None, None, None) => {
|
||||
@ -851,6 +900,10 @@ pub fn verify(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
group: {
|
||||
type: pbs_api_types::BackupGroup,
|
||||
flatten: true,
|
||||
@ -879,6 +932,7 @@ pub fn verify(
|
||||
)]
|
||||
/// Prune a group on the datastore
|
||||
pub fn prune(
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
group: pbs_api_types::BackupGroup,
|
||||
dry_run: bool,
|
||||
prune_options: PruneOptions,
|
||||
@ -887,18 +941,27 @@ pub fn prune(
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_PRUNE,
|
||||
)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
let group = datastore.backup_group(group);
|
||||
let group = datastore.backup_group(backup_ns, group);
|
||||
|
||||
check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
group.backup_ns(),
|
||||
group.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
)?;
|
||||
|
||||
let worker_id = format!("{}:{}", store, group);
|
||||
|
||||
@ -962,20 +1025,16 @@ pub fn prune(
|
||||
|
||||
task_log!(worker, "{}", msg);
|
||||
|
||||
let mut result = json!({
|
||||
prune_result.push(json!({
|
||||
"backup-type": group.ty,
|
||||
"backup-id": group.id,
|
||||
"backup-time": backup_time,
|
||||
"keep": keep,
|
||||
"protected": mark.protected(),
|
||||
});
|
||||
if !group.ns.is_root() {
|
||||
result["backup-ns"] = serde_json::to_value(&group.ns)?;
|
||||
}
|
||||
prune_result.push(result);
|
||||
}));
|
||||
|
||||
if !(dry_run || keep) {
|
||||
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
|
||||
if let Err(err) = info.backup_dir.destroy(false) {
|
||||
task_warn!(
|
||||
worker,
|
||||
"failed to remove dir {:?}: {}",
|
||||
@ -1231,20 +1290,22 @@ pub fn download_file(
|
||||
async move {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ,
|
||||
@ -1315,20 +1376,22 @@ pub fn download_file_decoded(
|
||||
async move {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(store, Some(Operation::Read))?;
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ,
|
||||
@ -1445,23 +1508,18 @@ pub fn upload_backup_log(
|
||||
async move {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
get_ns_privs_checked(&store, &backup_ns, &auth_id, PRIV_DATASTORE_BACKUP)?;
|
||||
let datastore = DataStore::lookup_datastore(store, Some(Operation::Write))?;
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
let file_name = CLIENT_LOG_BLOB_NAME;
|
||||
|
||||
let owner = datastore.get_owner(backup_dir.as_ref())?;
|
||||
let owner = backup_dir.get_owner()?;
|
||||
check_backup_owner(&owner, &auth_id)?;
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
let mut path = backup_dir.full_path();
|
||||
path.push(&file_name);
|
||||
|
||||
if path.exists() {
|
||||
@ -1493,6 +1551,10 @@ pub fn upload_backup_log(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_dir: {
|
||||
type: pbs_api_types::BackupDir,
|
||||
flatten: true,
|
||||
@ -1512,23 +1574,26 @@ pub fn upload_backup_log(
|
||||
/// Get the entries of the given path of the catalog
|
||||
pub fn catalog(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_dir: pbs_api_types::BackupDir,
|
||||
filepath: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Vec<ArchiveEntry>, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ,
|
||||
@ -1600,15 +1665,16 @@ pub fn pxar_file_download(
|
||||
async move {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
let filepath = required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
@ -1616,6 +1682,7 @@ pub fn pxar_file_download(
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_READ,
|
||||
@ -1786,6 +1853,10 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_group: {
|
||||
type: pbs_api_types::BackupGroup,
|
||||
flatten: true,
|
||||
@ -1801,21 +1872,29 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
|
||||
/// Get "notes" for a backup group
|
||||
pub fn get_group_notes(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_group: pbs_api_types::BackupGroup,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
|
||||
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
&backup_ns,
|
||||
&backup_group,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
)?;
|
||||
|
||||
let note_path = get_group_note_path(&datastore, &backup_group);
|
||||
let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
|
||||
Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
|
||||
}
|
||||
|
||||
@ -1823,6 +1902,10 @@ pub fn get_group_notes(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_group: {
|
||||
type: pbs_api_types::BackupGroup,
|
||||
flatten: true,
|
||||
@ -1841,22 +1924,30 @@ pub fn get_group_notes(
|
||||
/// Set "notes" for a backup group
|
||||
pub fn set_group_notes(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_group: pbs_api_types::BackupGroup,
|
||||
notes: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
&backup_ns,
|
||||
&backup_group,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
)?;
|
||||
|
||||
let note_path = get_group_note_path(&datastore, &backup_group);
|
||||
let note_path = get_group_note_path(&datastore, &backup_ns, &backup_group);
|
||||
replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
|
||||
|
||||
Ok(())
|
||||
@ -1866,6 +1957,10 @@ pub fn set_group_notes(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_dir: {
|
||||
type: pbs_api_types::BackupDir,
|
||||
flatten: true,
|
||||
@ -1881,28 +1976,31 @@ pub fn set_group_notes(
|
||||
/// Get "notes" for a specific backup
|
||||
pub fn get_notes(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_dir: pbs_api_types::BackupDir,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
)?;
|
||||
|
||||
let (manifest, _) = datastore.load_manifest(&backup_dir)?;
|
||||
let (manifest, _) = backup_dir.load_manifest()?;
|
||||
|
||||
let notes = manifest.unprotected["notes"].as_str().unwrap_or("");
|
||||
|
||||
@ -1913,6 +2011,10 @@ pub fn get_notes(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_dir: {
|
||||
type: pbs_api_types::BackupDir,
|
||||
flatten: true,
|
||||
@ -1931,30 +2033,33 @@ pub fn get_notes(
|
||||
/// Set "notes" for a specific backup
|
||||
pub fn set_notes(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_dir: pbs_api_types::BackupDir,
|
||||
notes: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
)?;
|
||||
|
||||
datastore
|
||||
.update_manifest(&backup_dir, |manifest| {
|
||||
backup_dir
|
||||
.update_manifest(|manifest| {
|
||||
manifest.unprotected["notes"] = notes.into();
|
||||
})
|
||||
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||
@ -1966,6 +2071,10 @@ pub fn set_notes(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_dir: {
|
||||
type: pbs_api_types::BackupDir,
|
||||
flatten: true,
|
||||
@ -1981,22 +2090,25 @@ pub fn set_notes(
|
||||
/// Query protection for a specific backup
|
||||
pub fn get_protection(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_dir: pbs_api_types::BackupDir,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<bool, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_AUDIT,
|
||||
@ -2009,6 +2121,10 @@ pub fn get_protection(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_dir: {
|
||||
type: pbs_api_types::BackupDir,
|
||||
flatten: true,
|
||||
@ -2027,23 +2143,26 @@ pub fn get_protection(
|
||||
/// En- or disable protection for a specific backup
|
||||
pub fn set_protection(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_dir: pbs_api_types::BackupDir,
|
||||
protected: bool,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<(), Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
get_ns_privs_checked(
|
||||
&store,
|
||||
&backup_dir.group.ns,
|
||||
&backup_ns,
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP,
|
||||
)?;
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
check_priv_or_backup_owner(
|
||||
&datastore,
|
||||
backup_dir.backup_ns(),
|
||||
backup_dir.as_ref(),
|
||||
&auth_id,
|
||||
PRIV_DATASTORE_MODIFY,
|
||||
@ -2056,6 +2175,10 @@ pub fn set_protection(
|
||||
input: {
|
||||
properties: {
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
backup_group: {
|
||||
type: pbs_api_types::BackupGroup,
|
||||
flatten: true,
|
||||
@ -2074,6 +2197,7 @@ pub fn set_protection(
|
||||
/// Change owner of a backup group
|
||||
pub fn set_backup_owner(
|
||||
store: String,
|
||||
backup_ns: Option<BackupNamespace>,
|
||||
backup_group: pbs_api_types::BackupGroup,
|
||||
new_owner: Authid,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -2081,13 +2205,14 @@ pub fn set_backup_owner(
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let privs = get_ns_privs(&store, &backup_group.ns, &auth_id)?;
|
||||
let backup_group = datastore.backup_group(backup_group);
|
||||
let backup_ns = backup_ns.unwrap_or_default();
|
||||
let privs = get_ns_privs(&store, &backup_ns, &auth_id)?;
|
||||
let backup_group = datastore.backup_group(backup_ns, backup_group);
|
||||
|
||||
let allowed = if (privs & PRIV_DATASTORE_MODIFY) != 0 {
|
||||
true // High-privilege user/token
|
||||
} else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
|
||||
let owner = datastore.get_owner(backup_group.as_ref())?;
|
||||
let owner = backup_group.get_owner()?;
|
||||
|
||||
match (owner.is_token(), new_owner.is_token()) {
|
||||
(true, true) => {
|
||||
@ -2137,7 +2262,7 @@ pub fn set_backup_owner(
|
||||
);
|
||||
}
|
||||
|
||||
datastore.set_owner(backup_group.as_ref(), &new_owner, true)?;
|
||||
backup_group.set_owner(&new_owner, true)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -614,7 +614,7 @@ impl BackupEnvironment {
|
||||
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
||||
|
||||
if let Some(base) = &self.last_backup {
|
||||
let path = self.datastore.snapshot_path(base.backup_dir.as_ref());
|
||||
let path = base.backup_dir.full_path();
|
||||
if !path.exists() {
|
||||
bail!(
|
||||
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
||||
@ -710,8 +710,11 @@ impl BackupEnvironment {
|
||||
let mut state = self.state.lock().unwrap();
|
||||
state.finished = true;
|
||||
|
||||
self.datastore
|
||||
.remove_backup_dir(self.backup_dir.as_ref(), true)?;
|
||||
self.datastore.remove_backup_dir(
|
||||
self.backup_dir.backup_ns(),
|
||||
self.backup_dir.as_ref(),
|
||||
true,
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -17,9 +17,9 @@ use proxmox_schema::*;
|
||||
use proxmox_sys::sortable;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||
CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||
Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState,
|
||||
BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
|
||||
BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
use pbs_config::CachedUserInfo;
|
||||
use pbs_datastore::index::IndexFile;
|
||||
@ -58,6 +58,14 @@ pub const API_METHOD_UPGRADE_BACKUP: ApiMethod = ApiMethod::new(
|
||||
&Permission::Anybody
|
||||
);
|
||||
|
||||
pub(crate) fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
|
||||
match param.get("backup-ns") {
|
||||
Some(Value::String(ns)) => ns.parse(),
|
||||
None => Ok(BackupNamespace::root()),
|
||||
_ => bail!("invalid backup-ns parameter"),
|
||||
}
|
||||
}
|
||||
|
||||
fn upgrade_to_backup_protocol(
|
||||
parts: Parts,
|
||||
req_body: Body,
|
||||
@ -72,9 +80,9 @@ fn upgrade_to_backup_protocol(
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let store = required_string_param(¶m, "store")?.to_owned();
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir_arg = pbs_api_types::BackupDir::deserialize(¶m)?;
|
||||
|
||||
let backup_ns = &backup_dir_arg.group.ns;
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
let privs = if backup_ns.is_root() {
|
||||
@ -105,7 +113,7 @@ fn upgrade_to_backup_protocol(
|
||||
);
|
||||
}
|
||||
|
||||
if !datastore.ns_path(&backup_ns).exists() {
|
||||
if !datastore.namespace_path(&backup_ns).exists() {
|
||||
proxmox_router::http_bail!(NOT_FOUND, "namespace not found");
|
||||
}
|
||||
|
||||
@ -113,7 +121,7 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_group = datastore.backup_group(backup_dir_arg.group.clone());
|
||||
let backup_group = datastore.backup_group(backup_ns, backup_dir_arg.group.clone());
|
||||
|
||||
let worker_type = if backup_group.backup_type() == BackupType::Host
|
||||
&& backup_group.backup_id() == "benchmark"
|
||||
@ -130,8 +138,11 @@ fn upgrade_to_backup_protocol(
|
||||
};
|
||||
|
||||
// lock backup group to only allow one backup per group at a time
|
||||
let (owner, _group_guard) =
|
||||
datastore.create_locked_backup_group(backup_group.as_ref(), &auth_id)?;
|
||||
let (owner, _group_guard) = datastore.create_locked_backup_group(
|
||||
backup_group.backup_ns(),
|
||||
backup_group.as_ref(),
|
||||
&auth_id,
|
||||
)?;
|
||||
|
||||
// permission check
|
||||
let correct_owner =
|
||||
@ -169,7 +180,7 @@ fn upgrade_to_backup_protocol(
|
||||
}
|
||||
|
||||
// lock last snapshot to prevent forgetting/pruning it during backup
|
||||
let full_path = datastore.snapshot_path(last.backup_dir.as_ref());
|
||||
let full_path = last.backup_dir.full_path();
|
||||
Some(lock_dir_noblock_shared(
|
||||
&full_path,
|
||||
"snapshot",
|
||||
@ -179,7 +190,8 @@ fn upgrade_to_backup_protocol(
|
||||
None
|
||||
};
|
||||
|
||||
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(backup_dir.as_ref())?;
|
||||
let (path, is_new, snap_guard) =
|
||||
datastore.create_locked_backup_dir(backup_dir.backup_ns(), backup_dir.as_ref())?;
|
||||
if !is_new {
|
||||
bail!("backup directory already exists.");
|
||||
}
|
||||
@ -818,7 +830,7 @@ fn download_previous(
|
||||
None => bail!("no valid previous backup"),
|
||||
};
|
||||
|
||||
let mut path = env.datastore.snapshot_path(last_backup.backup_dir.as_ref());
|
||||
let mut path = last_backup.backup_dir.full_path();
|
||||
path.push(&archive_name);
|
||||
|
||||
{
|
||||
|
@ -29,6 +29,7 @@ use pbs_tools::json::required_string_param;
|
||||
use proxmox_rest_server::{H2Service, WorkerTask};
|
||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
||||
|
||||
use crate::api2::backup::optional_ns_param;
|
||||
use crate::api2::helpers;
|
||||
|
||||
mod environment;
|
||||
@ -91,6 +92,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir = pbs_api_types::BackupDir::deserialize(¶m)?;
|
||||
|
||||
let protocols = parts
|
||||
@ -112,9 +114,9 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
let env_type = rpcenv.env_type();
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
if !priv_read {
|
||||
let owner = datastore.get_owner(backup_dir.as_ref())?;
|
||||
let owner = backup_dir.get_owner()?;
|
||||
let correct_owner = owner == auth_id
|
||||
|| (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
|
||||
if !correct_owner {
|
||||
|
@ -17,7 +17,7 @@ use pbs_api_types::{
|
||||
|
||||
use pbs_config::CachedUserInfo;
|
||||
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
|
||||
use pbs_datastore::{DataStore, SnapshotReader, StoreProgress};
|
||||
use pbs_datastore::{DataStore, StoreProgress};
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
|
||||
use crate::{
|
||||
@ -577,7 +577,7 @@ pub fn backup_snapshot(
|
||||
) -> Result<bool, Error> {
|
||||
task_log!(worker, "backup snapshot {}", snapshot);
|
||||
|
||||
let snapshot_reader = match SnapshotReader::new(datastore.clone(), (&snapshot).into()) {
|
||||
let snapshot_reader = match snapshot.locked_reader() {
|
||||
Ok(reader) => reader,
|
||||
Err(err) => {
|
||||
// ignore missing snapshots and continue
|
||||
|
@ -17,9 +17,9 @@ use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
|
||||
use proxmox_uuid::Uuid;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, CryptMode, Operation, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ,
|
||||
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
|
||||
Authid, BackupNamespace, CryptMode, Operation, Userid, DATASTORE_MAP_ARRAY_SCHEMA,
|
||||
DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
|
||||
PRIV_TAPE_READ, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
|
||||
};
|
||||
use pbs_config::CachedUserInfo;
|
||||
use pbs_datastore::dynamic_index::DynamicIndexReader;
|
||||
@ -401,6 +401,10 @@ fn restore_list_worker(
|
||||
restore_owner: &Authid,
|
||||
email: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
// FIXME: Namespace needs to come from somewhere, `snapshots` is just a snapshot string list
|
||||
// here.
|
||||
let ns = BackupNamespace::root();
|
||||
|
||||
let base_path: PathBuf = format!("{}/{}", RESTORE_TMP_DIR, media_set_uuid).into();
|
||||
std::fs::create_dir_all(&base_path)?;
|
||||
|
||||
@ -430,7 +434,7 @@ fn restore_list_worker(
|
||||
})?;
|
||||
|
||||
let (owner, _group_lock) =
|
||||
datastore.create_locked_backup_group(backup_dir.as_ref(), restore_owner)?;
|
||||
datastore.create_locked_backup_group(&ns, backup_dir.as_ref(), restore_owner)?;
|
||||
if restore_owner != &owner {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
task_warn!(
|
||||
@ -458,7 +462,8 @@ fn restore_list_worker(
|
||||
continue;
|
||||
};
|
||||
|
||||
let (_rel_path, is_new, snap_lock) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||
let (_rel_path, is_new, snap_lock) =
|
||||
datastore.create_locked_backup_dir(&ns, &backup_dir)?;
|
||||
|
||||
if !is_new {
|
||||
task_log!(
|
||||
@ -586,7 +591,7 @@ fn restore_list_worker(
|
||||
tmp_path.push(&source_datastore);
|
||||
tmp_path.push(snapshot);
|
||||
|
||||
let path = datastore.snapshot_path(&backup_dir);
|
||||
let path = datastore.snapshot_path(&ns, &backup_dir);
|
||||
|
||||
for entry in std::fs::read_dir(tmp_path)? {
|
||||
let entry = entry?;
|
||||
@ -1036,12 +1041,17 @@ fn restore_archive<'a>(
|
||||
snapshot
|
||||
);
|
||||
|
||||
// FIXME: Namespace
|
||||
let backup_ns = BackupNamespace::root();
|
||||
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
|
||||
|
||||
if let Some((store_map, authid)) = target.as_ref() {
|
||||
if let Some(datastore) = store_map.get_datastore(&datastore_name) {
|
||||
let (owner, _group_lock) =
|
||||
datastore.create_locked_backup_group(backup_dir.as_ref(), authid)?;
|
||||
let (owner, _group_lock) = datastore.create_locked_backup_group(
|
||||
&backup_ns,
|
||||
backup_dir.as_ref(),
|
||||
authid,
|
||||
)?;
|
||||
if *authid != &owner {
|
||||
// only the owner is allowed to create additional snapshots
|
||||
bail!(
|
||||
@ -1053,7 +1063,7 @@ fn restore_archive<'a>(
|
||||
}
|
||||
|
||||
let (rel_path, is_new, _snap_lock) =
|
||||
datastore.create_locked_backup_dir(backup_dir.as_ref())?;
|
||||
datastore.create_locked_backup_dir(&backup_ns, backup_dir.as_ref())?;
|
||||
let mut path = datastore.base_path();
|
||||
path.push(rel_path);
|
||||
|
||||
|
@ -8,7 +8,9 @@ use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
||||
|
||||
use pbs_api_types::{Authid, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID};
|
||||
use pbs_api_types::{
|
||||
Authid, BackupNamespace, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID,
|
||||
};
|
||||
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
|
||||
@ -324,7 +326,7 @@ pub fn verify_backup_dir(
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<bool, Error> {
|
||||
let snap_lock = lock_dir_noblock_shared(
|
||||
&verify_worker.datastore.snapshot_path(backup_dir.as_ref()),
|
||||
&backup_dir.full_path(),
|
||||
"snapshot",
|
||||
"locked by another operation",
|
||||
);
|
||||
@ -510,7 +512,13 @@ pub fn verify_all_backups(
|
||||
}
|
||||
|
||||
let filter_by_owner = |group: &BackupGroup| {
|
||||
match (verify_worker.datastore.get_owner(group.as_ref()), &owner) {
|
||||
match (
|
||||
// FIXME: with recursion the namespace needs to come from the iterator...
|
||||
verify_worker
|
||||
.datastore
|
||||
.get_owner(&BackupNamespace::root(), group.as_ref()),
|
||||
&owner,
|
||||
) {
|
||||
(Ok(ref group_owner), Some(owner)) => {
|
||||
group_owner == owner
|
||||
|| (group_owner.is_token()
|
||||
|
@ -45,11 +45,12 @@ pub fn prune_datastore(
|
||||
let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
|
||||
|
||||
// FIXME: Namespace recursion!
|
||||
for group in datastore.iter_backup_groups(ns)? {
|
||||
for group in datastore.iter_backup_groups(ns.clone())? {
|
||||
let ns_recursed = &ns; // remove_backup_dir might need the inner one
|
||||
let group = group?;
|
||||
let list = group.list_backups()?;
|
||||
|
||||
if !has_privs && !datastore.owns_backup(group.as_ref(), &auth_id)? {
|
||||
if !has_privs && !datastore.owns_backup(&ns_recursed, group.as_ref(), &auth_id)? {
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -75,7 +76,9 @@ pub fn prune_datastore(
|
||||
info.backup_dir.backup_time_string()
|
||||
);
|
||||
if !keep && !dry_run {
|
||||
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
|
||||
if let Err(err) =
|
||||
datastore.remove_backup_dir(ns_recursed, info.backup_dir.as_ref(), false)
|
||||
{
|
||||
task_warn!(
|
||||
worker,
|
||||
"failed to remove dir {:?}: {}",
|
||||
|
@ -15,7 +15,8 @@ use proxmox_router::HttpError;
|
||||
use proxmox_sys::task_log;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, GroupFilter, GroupListItem, Operation, RateLimitConfig, Remote, SnapshotListItem,
|
||||
Authid, BackupNamespace, GroupFilter, GroupListItem, Operation, RateLimitConfig, Remote,
|
||||
SnapshotListItem,
|
||||
};
|
||||
|
||||
use pbs_client::{
|
||||
@ -504,7 +505,9 @@ async fn pull_snapshot_from(
|
||||
snapshot: &pbs_api_types::BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?;
|
||||
// FIXME: Namespace support requires source AND target namespace
|
||||
let ns = BackupNamespace::root();
|
||||
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(&ns, snapshot)?;
|
||||
|
||||
let snapshot_path = snapshot.to_string();
|
||||
if is_new {
|
||||
@ -519,7 +522,7 @@ async fn pull_snapshot_from(
|
||||
)
|
||||
.await
|
||||
{
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(snapshot, true) {
|
||||
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&ns, snapshot, true) {
|
||||
task_log!(worker, "cleanup error - {}", cleanup_err);
|
||||
}
|
||||
return Err(err);
|
||||
@ -604,6 +607,9 @@ async fn pull_group(
|
||||
group: &pbs_api_types::BackupGroup,
|
||||
progress: &mut StoreProgress,
|
||||
) -> Result<(), Error> {
|
||||
// FIXME: Namespace support
|
||||
let ns = BackupNamespace::root();
|
||||
|
||||
let path = format!(
|
||||
"api2/json/admin/datastore/{}/snapshots",
|
||||
params.source.store()
|
||||
@ -623,7 +629,7 @@ async fn pull_group(
|
||||
|
||||
let fingerprint = client.fingerprint();
|
||||
|
||||
let last_sync = params.store.last_successful_backup(group)?;
|
||||
let last_sync = params.store.last_successful_backup(&ns, group)?;
|
||||
|
||||
let mut remote_snapshots = std::collections::HashSet::new();
|
||||
|
||||
@ -674,8 +680,15 @@ async fn pull_group(
|
||||
options,
|
||||
)?;
|
||||
|
||||
let reader =
|
||||
BackupReader::start(new_client, None, params.source.store(), &snapshot, true).await?;
|
||||
let reader = BackupReader::start(
|
||||
new_client,
|
||||
None,
|
||||
params.source.store(),
|
||||
&ns,
|
||||
&snapshot,
|
||||
true,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let result = pull_snapshot_from(
|
||||
worker,
|
||||
@ -693,7 +706,7 @@ async fn pull_group(
|
||||
}
|
||||
|
||||
if params.remove_vanished {
|
||||
let group = params.store.backup_group(group.clone());
|
||||
let group = params.store.backup_group(ns.clone(), group.clone());
|
||||
let local_list = group.list_backups()?;
|
||||
for info in local_list {
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
@ -715,7 +728,7 @@ async fn pull_group(
|
||||
);
|
||||
params
|
||||
.store
|
||||
.remove_backup_dir(info.backup_dir.as_ref(), false)?;
|
||||
.remove_backup_dir(&ns, info.backup_dir.as_ref(), false)?;
|
||||
}
|
||||
}
|
||||
|
||||
@ -744,6 +757,10 @@ pub async fn pull_store(
|
||||
client: &HttpClient,
|
||||
params: &PullParameters,
|
||||
) -> Result<(), Error> {
|
||||
// FIXME: Namespace support requires source AND target namespace
|
||||
let ns = BackupNamespace::root();
|
||||
let local_ns = BackupNamespace::root();
|
||||
|
||||
// explicit create shared lock to prevent GC on newly created chunks
|
||||
let _shared_store_lock = params.store.try_shared_chunk_store_lock()?;
|
||||
|
||||
@ -806,22 +823,23 @@ pub async fn pull_store(
|
||||
progress.done_snapshots = 0;
|
||||
progress.group_snapshots = 0;
|
||||
|
||||
let (owner, _lock_guard) = match params
|
||||
.store
|
||||
.create_locked_backup_group(&group, ¶ms.owner)
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
worker,
|
||||
"sync group {} failed - group lock failed: {}",
|
||||
&group,
|
||||
err
|
||||
);
|
||||
errors = true; // do not stop here, instead continue
|
||||
continue;
|
||||
}
|
||||
};
|
||||
let (owner, _lock_guard) =
|
||||
match params
|
||||
.store
|
||||
.create_locked_backup_group(&ns, &group, ¶ms.owner)
|
||||
{
|
||||
Ok(result) => result,
|
||||
Err(err) => {
|
||||
task_log!(
|
||||
worker,
|
||||
"sync group {} failed - group lock failed: {}",
|
||||
&group,
|
||||
err
|
||||
);
|
||||
errors = true; // do not stop here, instead continue
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// permission check
|
||||
if params.owner != owner {
|
||||
@ -848,7 +866,7 @@ pub async fn pull_store(
|
||||
if new_groups.contains(local_group.as_ref()) {
|
||||
continue;
|
||||
}
|
||||
let owner = params.store.get_owner(&local_group.group())?;
|
||||
let owner = params.store.get_owner(&local_ns, &local_group.group())?;
|
||||
if check_backup_owner(&owner, ¶ms.owner).is_err() {
|
||||
continue;
|
||||
}
|
||||
@ -863,7 +881,7 @@ pub async fn pull_store(
|
||||
local_group.backup_type(),
|
||||
local_group.backup_id()
|
||||
);
|
||||
match params.store.remove_backup_group(local_group.as_ref()) {
|
||||
match params.store.remove_backup_group(&ns, local_group.as_ref()) {
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
task_log!(
|
||||
|
Loading…
Reference in New Issue
Block a user