tree-wide: remove DatastoreWithNamespace

instead move the acl_path helper to BackupNamespace, and introduce a new
helper for printing a store+ns when logging/generating error messages.

Suggested-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2022-05-25 15:14:56 +02:00 committed by Thomas Lamprecht
parent 974a3e521a
commit abd8248520
15 changed files with 243 additions and 377 deletions

View File

@ -694,6 +694,17 @@ impl BackupNamespace {
}
Ok(())
}
pub fn acl_path<'a>(&'a self, store: &'a str) -> Vec<&'a str> {
let mut path: Vec<&str> = vec!["datastore", store];
if self.is_root() {
path
} else {
path.extend(self.inner.iter().map(|comp| comp.as_str()));
path
}
}
}
impl fmt::Display for BackupNamespace {
@ -1026,35 +1037,6 @@ impl fmt::Display for BackupDir {
}
}
/// Helper struct for places where sensible formatting of store+NS combo is required
pub struct DatastoreWithNamespace {
pub store: String,
pub ns: BackupNamespace,
}
impl fmt::Display for DatastoreWithNamespace {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.ns.is_root() {
write!(f, "datastore {}, root namespace", self.store)
} else {
write!(f, "datastore '{}', namespace '{}'", self.store, self.ns)
}
}
}
impl DatastoreWithNamespace {
pub fn acl_path(&self) -> Vec<&str> {
let mut path: Vec<&str> = vec!["datastore", &self.store];
if self.ns.is_root() {
path
} else {
path.extend(self.ns.inner.iter().map(|comp| comp.as_str()));
path
}
}
}
/// Used when both a backup group or a directory can be valid.
pub enum BackupPart {
Group(BackupGroup),
@ -1479,3 +1461,12 @@ pub fn print_ns_and_snapshot(ns: &BackupNamespace, dir: &BackupDir) -> String {
format!("{}/{}", ns.display_as_path(), dir)
}
}
/// Prints a Datastore name and [`BackupNamespace`] for logs/errors.
pub fn print_store_and_ns(store: &str, ns: &BackupNamespace) -> String {
if ns.is_root() {
format!("datastore '{}', root namespace", store)
} else {
format!("datastore '{}', namespace '{}'", store, ns)
}
}

View File

@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::*;
use crate::{
Authid, BackupNamespace, BackupType, DatastoreWithNamespace, RateLimitConfig, Userid,
BACKUP_GROUP_SCHEMA, BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA,
MEDIA_POOL_NAME_SCHEMA, NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA,
BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
NS_MAX_DEPTH_REDUCED_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA,
};
@ -224,10 +224,10 @@ pub struct VerificationJobConfig {
}
impl VerificationJobConfig {
pub fn store_with_ns(&self) -> DatastoreWithNamespace {
DatastoreWithNamespace {
store: self.store.clone(),
ns: self.ns.clone().unwrap_or_default(),
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}
@ -508,10 +508,10 @@ pub struct SyncJobConfig {
}
impl SyncJobConfig {
pub fn store_with_ns(&self) -> DatastoreWithNamespace {
DatastoreWithNamespace {
store: self.store.clone(),
ns: self.ns.clone().unwrap_or_default(),
pub fn acl_path(&self) -> Vec<&str> {
match self.ns.as_ref() {
Some(ns) => ns.acl_path(&self.store),
None => vec!["datastore", &self.store],
}
}
}

View File

@ -8,7 +8,7 @@ use nix::dir::Dir;
use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{BackupNamespace, DatastoreWithNamespace, Operation};
use pbs_api_types::{print_store_and_ns, BackupNamespace, Operation};
use crate::backup_info::BackupDir;
use crate::dynamic_index::DynamicIndexReader;
@ -39,10 +39,6 @@ impl SnapshotReader {
pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
let datastore = snapshot.datastore();
let store_with_ns = DatastoreWithNamespace {
store: datastore.name().to_owned(),
ns: snapshot.backup_ns().clone(),
};
let snapshot_path = snapshot.full_path();
let locked_dir =
@ -54,7 +50,7 @@ impl SnapshotReader {
Err(err) => {
bail!(
"manifest load error on {}, snapshot '{}' - {}",
store_with_ns,
print_store_and_ns(datastore.name(), snapshot.backup_ns()),
snapshot.dir(),
err
);

View File

@ -32,14 +32,14 @@ use pxar::accessor::aio::Accessor;
use pxar::EntryKind;
use pbs_api_types::{
print_ns_and_snapshot, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode,
DataStoreListItem, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus,
GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
print_ns_and_snapshot, print_store_and_ns, Authid, BackupContent, BackupNamespace, BackupType,
Counts, CryptMode, DataStoreListItem, DataStoreStatus, GarbageCollectionStatus, GroupListItem,
Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA,
VERIFICATION_OUTDATED_AFTER_SCHEMA,
};
use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo;
@ -86,24 +86,20 @@ fn get_group_note_path(
// 2. load datastore
// 3. if needed (only limited access), check owner of group
fn check_privs_and_load_store(
store_with_ns: &DatastoreWithNamespace,
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
full_access_privs: u64,
partial_access_privs: u64,
operation: Option<Operation>,
backup_group: &pbs_api_types::BackupGroup,
) -> Result<Arc<DataStore>, Error> {
let limited = check_ns_privs_full(
store_with_ns,
auth_id,
full_access_privs,
partial_access_privs,
)?;
let limited = check_ns_privs_full(store, ns, auth_id, full_access_privs, partial_access_privs)?;
let datastore = DataStore::lookup_datastore(&store_with_ns.store, operation)?;
let datastore = DataStore::lookup_datastore(store, operation)?;
if limited {
let owner = datastore.get_owner(&store_with_ns.ns, backup_group)?;
let owner = datastore.get_owner(ns, backup_group)?;
check_backup_owner(&owner, &auth_id)?;
}
@ -186,33 +182,30 @@ pub fn list_groups(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<GroupListItem>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store: store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let list_all = !check_ns_privs_full(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
)?;
let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
datastore
.iter_backup_groups(store_with_ns.ns.clone())? // FIXME: Namespaces and recursion parameters!
.iter_backup_groups(ns.clone())? // FIXME: Namespaces and recursion parameters!
.try_fold(Vec::new(), |mut group_info, group| {
let group = group?;
let owner = match datastore.get_owner(&store_with_ns.ns, group.as_ref()) {
let owner = match datastore.get_owner(&ns, group.as_ref()) {
Ok(auth_id) => auth_id,
Err(err) => {
eprintln!(
"Failed to get owner of group '{}' in {} - {}",
group.group(),
store_with_ns,
print_store_and_ns(&store, &ns),
err
);
return Ok(group_info);
@ -243,7 +236,7 @@ pub fn list_groups(
})
.to_owned();
let note_path = get_group_note_path(&datastore, &store_with_ns.ns, group.as_ref());
let note_path = get_group_note_path(&datastore, &ns, group.as_ref());
let comment = file_read_firstline(&note_path).ok();
group_info.push(GroupListItem {
@ -288,14 +281,11 @@ pub fn delete_group(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE,
@ -303,7 +293,7 @@ pub fn delete_group(
&group,
)?;
if !datastore.remove_backup_group(&store_with_ns.ns, &group)? {
if !datastore.remove_backup_group(&ns, &group)? {
bail!("group only partially deleted due to protected snapshots");
}
@ -340,14 +330,11 @@ pub fn list_snapshot_files(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<BackupContent>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store: store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
@ -355,7 +342,7 @@ pub fn list_snapshot_files(
&backup_dir.group,
)?;
let snapshot = datastore.backup_dir(store_with_ns.ns, backup_dir)?;
let snapshot = datastore.backup_dir(ns, backup_dir)?;
let info = BackupInfo::new(snapshot)?;
@ -393,14 +380,11 @@ pub fn delete_snapshot(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store: store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE,
@ -408,7 +392,7 @@ pub fn delete_snapshot(
&backup_dir.group,
)?;
let snapshot = datastore.backup_dir(store_with_ns.ns, backup_dir)?;
let snapshot = datastore.backup_dir(ns, backup_dir)?;
snapshot.destroy(false)?;
@ -454,38 +438,35 @@ pub fn list_snapshots(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.clone(),
};
let list_all = !check_ns_privs_full(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
)?;
let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
// backup group and provide an error free (Err -> None) accessor
let groups = match (backup_type, backup_id) {
(Some(backup_type), Some(backup_id)) => {
vec![datastore.backup_group_from_parts(ns, backup_type, backup_id)]
vec![datastore.backup_group_from_parts(ns.clone(), backup_type, backup_id)]
}
// FIXME: Recursion
(Some(backup_type), None) => datastore
.iter_backup_groups_ok(ns)?
.iter_backup_groups_ok(ns.clone())?
.filter(|group| group.backup_type() == backup_type)
.collect(),
// FIXME: Recursion
(None, Some(backup_id)) => datastore
.iter_backup_groups_ok(ns)?
.iter_backup_groups_ok(ns.clone())?
.filter(|group| group.backup_id() == backup_id)
.collect(),
// FIXME: Recursion
(None, None) => datastore.list_backup_groups(ns)?,
(None, None) => datastore.list_backup_groups(ns.clone())?,
};
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
@ -567,7 +548,7 @@ pub fn list_snapshots(
eprintln!(
"Failed to get owner of group '{}' in {} - {}",
group.group(),
&store_with_ns,
print_store_and_ns(&store, &ns),
err
);
return Ok(snapshots);
@ -745,19 +726,16 @@ pub fn verify(
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let ns = ns.unwrap_or_default();
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.clone(),
};
let owner_check_required = check_ns_privs_full(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_VERIFY,
PRIV_DATASTORE_BACKUP,
)?;
let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Read))?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let ignore_verified = ignore_verified.unwrap_or(true);
let worker_id;
@ -770,7 +748,7 @@ pub fn verify(
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!(
"{}:{}/{}/{}/{:08X}",
store_with_ns.store,
store,
ns.display_as_path(),
backup_type,
backup_id,
@ -790,7 +768,7 @@ pub fn verify(
(Some(backup_type), Some(backup_id), None) => {
worker_id = format!(
"{}:{}/{}/{}",
store_with_ns.store,
store,
ns.display_as_path(),
backup_type,
backup_id
@ -807,9 +785,9 @@ pub fn verify(
}
(None, None, None) => {
worker_id = if ns.is_root() {
format!("{}", store_with_ns.store)
store
} else {
format!("{}:{}", store_with_ns.store, ns.display_as_path())
format!("{}:{}", store, ns.display_as_path())
};
}
_ => bail!("parameters do not specify a backup group or snapshot"),
@ -921,13 +899,11 @@ pub fn prune(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_PRUNE,
@ -935,8 +911,8 @@ pub fn prune(
&group,
)?;
let worker_id = format!("{}:{}:{}", store_with_ns.store, store_with_ns.ns, group);
let group = datastore.backup_group(store_with_ns.ns.clone(), group);
let worker_id = format!("{}:{}:{}", store, ns, group);
let group = datastore.backup_group(ns.clone(), group);
let mut prune_result = Vec::new();
@ -982,7 +958,7 @@ pub fn prune(
task_log!(
worker,
"Starting prune on {} group \"{}\"",
store_with_ns,
print_store_and_ns(&store, &ns),
group.group(),
);
}
@ -1177,13 +1153,8 @@ fn can_access_any_ns(store: Arc<DataStore>, auth_id: &Authid, user_info: &Cached
};
let wanted =
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP;
let name = store.name();
iter.any(|ns| -> bool {
let store_with_ns = DatastoreWithNamespace {
store: name.to_string(),
ns: ns,
};
let user_privs = user_info.lookup_privs(&auth_id, &store_with_ns.acl_path());
let user_privs = user_info.lookup_privs(&auth_id, &ns.acl_path(store.name()));
user_privs & wanted != 0
})
}
@ -1275,13 +1246,10 @@ pub fn download_file(
let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&backup_ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
@ -1293,7 +1261,10 @@ pub fn download_file(
println!(
"Download {} from {} ({}/{})",
file_name, store_with_ns, backup_dir, file_name
file_name,
print_store_and_ns(&store, &backup_ns),
backup_dir,
file_name
);
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
@ -1359,13 +1330,11 @@ pub fn download_file_decoded(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&backup_ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
@ -1374,7 +1343,7 @@ pub fn download_file_decoded(
)?;
let file_name = required_string_param(&param, "file-name")?.to_owned();
let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let (manifest, files) = read_backup_index(&backup_dir)?;
for file in files {
@ -1385,7 +1354,10 @@ pub fn download_file_decoded(
println!(
"Download {} from {} ({}/{})",
file_name, store_with_ns, backup_dir_api, file_name
file_name,
print_store_and_ns(&store, &backup_ns),
backup_dir_api,
file_name
);
let mut path = datastore.base_path();
@ -1488,21 +1460,19 @@ pub fn upload_backup_log(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&backup_ns,
&auth_id,
0,
PRIV_DATASTORE_BACKUP,
Some(Operation::Write),
&backup_dir_api.group,
)?;
let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
let backup_dir = datastore.backup_dir(backup_ns.clone(), backup_dir_api.clone())?;
let file_name = CLIENT_LOG_BLOB_NAME;
@ -1513,7 +1483,10 @@ pub fn upload_backup_log(
bail!("backup already contains a log.");
}
println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
println!(
"Upload backup log to {} {backup_dir_api}/{file_name}",
print_store_and_ns(&store, &backup_ns),
);
let data = req_body
.map_err(Error::from)
@ -1567,13 +1540,11 @@ pub fn catalog(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Vec<ArchiveEntry>, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
@ -1581,7 +1552,7 @@ pub fn catalog(
&backup_dir.group,
)?;
let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
let file_name = CATALOG_NAME;
@ -1650,13 +1621,11 @@ pub fn pxar_file_download(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?;
let ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: ns.clone(),
};
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP,
@ -1859,13 +1828,11 @@ pub fn get_group_notes(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
@ -1873,7 +1840,7 @@ pub fn get_group_notes(
&backup_group,
)?;
let note_path = get_group_note_path(&datastore, &store_with_ns.ns, &backup_group);
let note_path = get_group_note_path(&datastore, &ns, &backup_group);
Ok(file_read_optional_string(note_path)?.unwrap_or_else(|| "".to_owned()))
}
@ -1909,12 +1876,11 @@ pub fn set_group_notes(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
@ -1922,7 +1888,7 @@ pub fn set_group_notes(
&backup_group,
)?;
let note_path = get_group_note_path(&datastore, &store_with_ns.ns, &backup_group);
let note_path = get_group_note_path(&datastore, &ns, &backup_group);
replace_file(note_path, notes.as_bytes(), CreateOptions::new(), false)?;
Ok(())
@ -1956,13 +1922,11 @@ pub fn get_notes(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<String, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store: store.clone(),
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
@ -1970,7 +1934,7 @@ pub fn get_notes(
&backup_dir.group,
)?;
let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
let (manifest, _) = backup_dir.load_manifest()?;
@ -2011,13 +1975,11 @@ pub fn set_notes(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
@ -2025,7 +1987,7 @@ pub fn set_notes(
&backup_dir.group,
)?;
let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
backup_dir
.update_manifest(|manifest| {
@ -2064,12 +2026,10 @@ pub fn get_protection(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<bool, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP,
@ -2077,7 +2037,7 @@ pub fn get_protection(
&backup_dir.group,
)?;
let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
Ok(backup_dir.is_protected())
}
@ -2114,12 +2074,10 @@ pub fn set_protection(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let datastore = check_privs_and_load_store(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
@ -2127,7 +2085,7 @@ pub fn set_protection(
&backup_dir.group,
)?;
let backup_dir = datastore.backup_dir(store_with_ns.ns.clone(), backup_dir)?;
let backup_dir = datastore.backup_dir(ns.clone(), backup_dir)?;
datastore.update_protection(&backup_dir, protected)
}
@ -2164,20 +2122,18 @@ pub fn set_backup_owner(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store,
ns: ns.unwrap_or_default(),
};
let ns = ns.unwrap_or_default();
let owner_check_required = check_ns_privs_full(
&store_with_ns,
&store,
&ns,
&auth_id,
PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_BACKUP,
)?;
let datastore = DataStore::lookup_datastore(&store_with_ns.store, Some(Operation::Write))?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_group = datastore.backup_group(store_with_ns.ns, backup_group);
let backup_group = datastore.backup_group(ns, backup_group);
if owner_check_required {
let owner = backup_group.get_owner()?;

View File

@ -6,9 +6,8 @@ use proxmox_router::{ApiMethod, Permission, Router, RpcEnvironment};
use proxmox_schema::*;
use pbs_api_types::{
Authid, BackupNamespace, DatastoreWithNamespace, NamespaceListItem, Operation,
DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PROXMOX_SAFE_ID_FORMAT,
Authid, BackupNamespace, NamespaceListItem, Operation, DATASTORE_SCHEMA, NS_MAX_DEPTH_SCHEMA,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PROXMOX_SAFE_ID_FORMAT,
};
use pbs_datastore::DataStore;
@ -54,12 +53,7 @@ pub fn create_namespace(
let mut ns = parent.clone();
ns.push(name.clone())?;
let store_with_ns = DatastoreWithNamespace {
store: store.clone(),
ns,
};
check_ns_modification_privs(&store_with_ns, &auth_id)?;
check_ns_modification_privs(&store, &ns, &auth_id)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
@ -102,12 +96,8 @@ pub fn list_namespaces(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
const PRIVS_OK: u64 = PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT;
// first do a base check to avoid leaking if a NS exists or not
let store_with_parent = DatastoreWithNamespace {
store: store.clone(),
ns: parent.clone(),
};
check_ns_privs(&store_with_parent, &auth_id, PRIVS_OK)?;
check_ns_privs(&store, &parent, &auth_id, PRIVS_OK)?;
let user_info = CachedUserInfo::new()?;
@ -122,7 +112,7 @@ pub fn list_namespaces(
if ns.is_root() {
return true; // already covered by access permission above
}
let privs = user_info.lookup_privs(&auth_id, &["datastore", &store, &ns.to_string()]);
let privs = user_info.lookup_privs(&auth_id, &ns.acl_path(&store));
privs & PRIVS_OK != 0
})
.map(ns_to_item)
@ -158,11 +148,8 @@ pub fn delete_namespace(
rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store_with_ns = DatastoreWithNamespace {
store: store.clone(),
ns: ns.clone(),
};
check_ns_modification_privs(&store_with_ns, &auth_id)?;
check_ns_modification_privs(&store, &ns, &auth_id)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;

View File

@ -58,7 +58,7 @@ pub fn list_verification_jobs(
.convert_to_typed_array("verification")?
.into_iter()
.filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.store_with_ns().acl_path());
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
if privs & required_privs == 0 {
return false;
}
@ -114,11 +114,9 @@ pub fn run_verification_job(
let (config, _digest) = verify::config()?;
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let store_with_ns = verification_job.store_with_ns();
user_info.check_privs(
&auth_id,
&store_with_ns.acl_path(),
&verification_job.acl_path(),
PRIV_DATASTORE_VERIFY,
true,
)?;

View File

@ -17,10 +17,9 @@ use proxmox_schema::*;
use proxmox_sys::sortable;
use pbs_api_types::{
Authid, BackupNamespace, BackupType, DatastoreWithNamespace, Operation, SnapshotVerifyState,
VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
PRIV_DATASTORE_BACKUP,
Authid, BackupNamespace, BackupType, Operation, SnapshotVerifyState, VerifyState,
BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::index::IndexFile;
@ -82,10 +81,6 @@ fn upgrade_to_backup_protocol(
let store = required_string_param(&param, "store")?.to_owned();
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.clone(),
ns: backup_ns.clone(),
};
let backup_dir_arg = pbs_api_types::BackupDir::deserialize(&param)?;
let user_info = CachedUserInfo::new()?;
@ -93,7 +88,7 @@ fn upgrade_to_backup_protocol(
user_info
.check_privs(
&auth_id,
&store_with_ns.acl_path(),
&backup_ns.acl_path(&store),
PRIV_DATASTORE_BACKUP,
false,
)

View File

@ -20,7 +20,7 @@ pub fn check_sync_job_read_access(
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.store_with_ns().acl_path());
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_AUDIT == 0 {
return false;
}
@ -38,7 +38,7 @@ pub fn check_sync_job_modify_access(
auth_id: &Authid,
job: &SyncJobConfig,
) -> bool {
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.store_with_ns().acl_path());
let ns_anchor_privs = user_info.lookup_privs(auth_id, &job.acl_path());
if ns_anchor_privs & PRIV_DATASTORE_BACKUP == 0 {
return false;
}

View File

@ -45,7 +45,7 @@ pub fn list_verification_jobs(
let list = list
.into_iter()
.filter(|job: &VerificationJobConfig| {
let privs = user_info.lookup_privs(&auth_id, &job.store_with_ns().acl_path());
let privs = user_info.lookup_privs(&auth_id, &job.acl_path());
privs & required_privs != 00
})
@ -79,12 +79,7 @@ pub fn create_verification_job(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let user_info = CachedUserInfo::new()?;
user_info.check_privs(
&auth_id,
&config.store_with_ns().acl_path(),
PRIV_DATASTORE_VERIFY,
false,
)?;
user_info.check_privs(&auth_id, &config.acl_path(), PRIV_DATASTORE_VERIFY, false)?;
let _lock = verify::lock_config()?;
@ -130,12 +125,7 @@ pub fn read_verification_job(
let verification_job: VerificationJobConfig = config.lookup("verification", &id)?;
let required_privs = PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_VERIFY;
user_info.check_privs(
&auth_id,
&verification_job.store_with_ns().acl_path(),
required_privs,
true,
)?;
user_info.check_privs(&auth_id, &verification_job.acl_path(), required_privs, true)?;
rpcenv["digest"] = hex::encode(&digest).into();
@ -216,12 +206,7 @@ pub fn update_verification_job(
let mut data: VerificationJobConfig = config.lookup("verification", &id)?;
// check existing store and NS
user_info.check_privs(
&auth_id,
&data.store_with_ns().acl_path(),
PRIV_DATASTORE_VERIFY,
true,
)?;
user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
if let Some(delete) = delete {
for delete_prop in delete {
@ -283,12 +268,7 @@ pub fn update_verification_job(
}
// check new store and NS
user_info.check_privs(
&auth_id,
&data.store_with_ns().acl_path(),
PRIV_DATASTORE_VERIFY,
true,
)?;
user_info.check_privs(&auth_id, &data.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
config.set_data(&id, "verification", &data)?;
@ -333,12 +313,7 @@ pub fn delete_verification_job(
let (mut config, expected_digest) = verify::config()?;
let job: VerificationJobConfig = config.lookup("verification", &id)?;
user_info.check_privs(
&auth_id,
&job.store_with_ns().acl_path(),
PRIV_DATASTORE_VERIFY,
true,
)?;
user_info.check_privs(&auth_id, &job.acl_path(), PRIV_DATASTORE_VERIFY, true)?;
if let Some(ref digest) = digest {
let digest = <[u8; 32]>::from_hex(digest)?;

View File

@ -17,9 +17,9 @@ use proxmox_schema::{BooleanSchema, ObjectSchema};
use proxmox_sys::sortable;
use pbs_api_types::{
Authid, DatastoreWithNamespace, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::index::IndexFile;
@ -80,20 +80,16 @@ fn upgrade_to_backup_reader_protocol(
let store = required_string_param(&param, "store")?.to_owned();
let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.clone(),
ns: backup_ns.clone(),
};
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(&auth_id, &store_with_ns.acl_path());
let acl_path = backup_ns.acl_path(&store);
let privs = user_info.lookup_privs(&auth_id, &acl_path);
let priv_read = privs & PRIV_DATASTORE_READ != 0;
let priv_backup = privs & PRIV_DATASTORE_BACKUP != 0;
// priv_backup needs owner check further down below!
if !priv_read && !priv_backup {
bail!("no permissions on /{}", store_with_ns.acl_path().join("/"));
bail!("no permissions on /{}", acl_path.join("/"));
}
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;

View File

@ -10,7 +10,7 @@ use proxmox_schema::api;
use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{
print_ns_and_snapshot, Authid, DatastoreWithNamespace, GroupFilter, MediaPoolConfig, Operation,
print_ns_and_snapshot, print_store_and_ns, Authid, GroupFilter, MediaPoolConfig, Operation,
TapeBackupJobConfig, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA,
PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
};
@ -453,11 +453,6 @@ fn backup_worker(
let mut need_catalog = false; // avoid writing catalog for empty jobs
for (group_number, group) in group_list.into_iter().enumerate() {
let store_with_ns = DatastoreWithNamespace {
store: datastore_name.to_owned(),
ns: group.backup_ns().clone(),
};
progress.done_groups = group_number as u64;
progress.done_snapshots = 0;
progress.group_snapshots = 0;
@ -474,7 +469,7 @@ fn backup_worker(
task_log!(
worker,
"{}, group {} was empty",
store_with_ns,
print_store_and_ns(datastore_name, group.backup_ns()),
group.group()
);
continue;

View File

@ -18,10 +18,9 @@ use proxmox_uuid::Uuid;
use pbs_api_types::{
parse_ns_and_snapshot, print_ns_and_snapshot, Authid, BackupDir, BackupNamespace, CryptMode,
DatastoreWithNamespace, Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA,
DATASTORE_MAP_LIST_SCHEMA, DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA,
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
Operation, TapeRestoreNamespace, Userid, DATASTORE_MAP_ARRAY_SCHEMA, DATASTORE_MAP_LIST_SCHEMA,
DRIVE_NAME_SCHEMA, MAX_NAMESPACE_DEPTH, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
PRIV_TAPE_READ, TAPE_RESTORE_NAMESPACE_SCHEMA, TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::dynamic_index::DynamicIndexReader;
@ -200,13 +199,15 @@ impl DataStoreMap {
fn check_datastore_privs(
user_info: &CachedUserInfo,
store_with_ns: &DatastoreWithNamespace,
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
owner: Option<&Authid>,
) -> Result<(), Error> {
let privs = user_info.lookup_privs(auth_id, &store_with_ns.acl_path());
let acl_path = ns.acl_path(store);
let privs = user_info.lookup_privs(auth_id, &acl_path);
if (privs & PRIV_DATASTORE_BACKUP) == 0 {
bail!("no permissions on /{}", store_with_ns.acl_path().join("/"));
bail!("no permissions on /{}", acl_path.join("/"));
}
if let Some(ref owner) = owner {
@ -230,24 +231,20 @@ fn check_and_create_namespaces(
owner: Option<&Authid>,
) -> Result<(), Error> {
// check normal restore privs first
let mut store_with_ns = DatastoreWithNamespace {
store: store.name().to_string(),
ns: ns.clone(),
};
check_datastore_privs(user_info, &store_with_ns, auth_id, owner)?;
check_datastore_privs(user_info, store.name(), ns, auth_id, owner)?;
// try create recursively if it does not exist
if !store.namespace_exists(ns) {
store_with_ns.ns = Default::default();
let mut tmp_ns = BackupNamespace::root();
for comp in ns.components() {
store_with_ns.ns.push(comp.to_string())?;
if !store.namespace_exists(&store_with_ns.ns) {
check_ns_modification_privs(&store_with_ns, auth_id).map_err(|_err| {
format_err!("no permission to create namespace '{}'", store_with_ns.ns)
tmp_ns.push(comp.to_string())?;
if !store.namespace_exists(&tmp_ns) {
check_ns_modification_privs(store.name(), &tmp_ns, auth_id).map_err(|_err| {
format_err!("no permission to create namespace '{}'", tmp_ns)
})?;
store.create_namespace(&store_with_ns.ns.parent(), comp.to_string())?;
store.create_namespace(&tmp_ns.parent(), comp.to_string())?;
}
}
}
@ -338,10 +335,8 @@ pub fn restore(
for (target, namespaces) in used_datastores.values() {
check_datastore_privs(
&user_info,
&DatastoreWithNamespace {
store: target.name().to_string(),
ns: Default::default(),
},
target.name(),
&BackupNamespace::root(),
&auth_id,
owner.as_ref(),
)?;
@ -579,13 +574,13 @@ fn check_snapshot_restorable(
let mut can_restore_some = false;
for ns in namespaces {
// only simple check, ns creation comes later
let store_with_ns = DatastoreWithNamespace {
store: datastore.name().to_string(),
ns: ns.clone(),
};
if let Err(err) =
check_datastore_privs(user_info, &store_with_ns, auth_id, Some(restore_owner))
{
if let Err(err) = check_datastore_privs(
user_info,
datastore.name(),
&ns,
auth_id,
Some(restore_owner),
) {
task_warn!(worker, "cannot restore {store}:{snapshot} to {ns}: '{err}'");
continue;
}

View File

@ -3,38 +3,37 @@ use std::sync::Arc;
use anyhow::{bail, Error};
use pbs_api_types::{
privs_to_priv_names, Authid, BackupNamespace, DatastoreWithNamespace, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ,
privs_to_priv_names, Authid, BackupNamespace, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::{backup_info::BackupGroup, DataStore, ListGroups, ListNamespacesRecursive};
/// Asserts that `privs` are fulfilled on datastore + (optional) namespace.
pub fn check_ns_privs(
store_with_ns: &DatastoreWithNamespace,
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
privs: u64,
) -> Result<(), Error> {
check_ns_privs_full(store_with_ns, auth_id, privs, 0).map(|_| ())
check_ns_privs_full(store, ns, auth_id, privs, 0).map(|_| ())
}
/// Asserts that `privs` for creating/destroying namespace in datastore are fulfilled.
pub fn check_ns_modification_privs(
store_with_ns: &DatastoreWithNamespace,
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
) -> Result<(), Error> {
// we could allow it as easy purge-whole datastore, but lets be more restrictive for now
if store_with_ns.ns.is_root() {
if ns.is_root() {
// TODO
bail!("Cannot create/delete root namespace!");
}
let parent = DatastoreWithNamespace {
store: store_with_ns.store.clone(),
ns: store_with_ns.ns.parent(),
};
let parent = ns.parent();
check_ns_privs(&parent, auth_id, PRIV_DATASTORE_MODIFY)
check_ns_privs(store, &parent, auth_id, PRIV_DATASTORE_MODIFY)
}
/// Asserts that either either `full_access_privs` or `partial_access_privs` are fulfilled on
@ -43,13 +42,15 @@ pub fn check_ns_modification_privs(
/// Return value indicates whether further checks like group ownerships are required because
/// `full_access_privs` are missing.
pub fn check_ns_privs_full(
store_with_ns: &DatastoreWithNamespace,
store: &str,
ns: &BackupNamespace,
auth_id: &Authid,
full_access_privs: u64,
partial_access_privs: u64,
) -> Result<bool, Error> {
let user_info = CachedUserInfo::new()?;
let privs = user_info.lookup_privs(auth_id, &store_with_ns.acl_path());
let acl_path = ns.acl_path(store);
let privs = user_info.lookup_privs(auth_id, &acl_path);
if full_access_privs != 0 && (privs & full_access_privs) != 0 {
return Ok(false);
@ -59,7 +60,7 @@ pub fn check_ns_privs_full(
}
let priv_names = privs_to_priv_names(full_access_privs | partial_access_privs).join("|");
let path = format!("/{}", store_with_ns.acl_path().join("/"));
let path = format!("/{}", acl_path.join("/"));
proxmox_router::http_bail!(
FORBIDDEN,
@ -158,11 +159,9 @@ impl<'a> Iterator for ListAccessibleBackupGroups<'a> {
let mut override_owner = false;
if let Some(auth_id) = &self.auth_id {
let info = &self.user_info;
let store_with_ns = DatastoreWithNamespace {
store: self.store.name().to_string(),
ns: ns.clone(),
};
let privs = info.lookup_privs(&auth_id, &store_with_ns.acl_path());
let privs =
info.lookup_privs(&auth_id, &ns.acl_path(self.store.name()));
if privs & NS_PRIVS_OK == 0 {
continue;

View File

@ -9,7 +9,7 @@ use anyhow::{bail, format_err, Error};
use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{
print_ns_and_snapshot, Authid, BackupNamespace, BackupType, CryptMode, DatastoreWithNamespace,
print_ns_and_snapshot, print_store_and_ns, Authid, BackupNamespace, BackupType, CryptMode,
SnapshotVerifyState, VerifyState, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_VERIFY, UPID,
};
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
@ -453,14 +453,10 @@ pub fn verify_backup_group(
let mut list = match group.list_backups() {
Ok(list) => list,
Err(err) => {
let store_with_ns = DatastoreWithNamespace {
store: verify_worker.datastore.name().to_owned(),
ns: group.backup_ns().clone(),
};
task_log!(
verify_worker.worker,
"verify {}, group {} - unable to list backups: {}",
store_with_ns,
print_store_and_ns(verify_worker.datastore.name(), group.backup_ns()),
group.group(),
err,
);

View File

@ -16,7 +16,7 @@ use proxmox_router::HttpError;
use proxmox_sys::task_log;
use pbs_api_types::{
Authid, BackupNamespace, DatastoreWithNamespace, GroupFilter, GroupListItem, NamespaceListItem,
print_store_and_ns, Authid, BackupNamespace, GroupFilter, GroupListItem, NamespaceListItem,
Operation, RateLimitConfig, Remote, SnapshotListItem, MAX_NAMESPACE_DEPTH,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
};
@ -116,14 +116,6 @@ impl PullParameters {
pub async fn client(&self) -> Result<HttpClient, Error> {
crate::api2::config::remote::remote_client(&self.remote, Some(self.limit.clone())).await
}
/// Returns DatastoreWithNamespace with namespace (or local namespace anchor).
pub fn store_with_ns(&self, ns: BackupNamespace) -> DatastoreWithNamespace {
DatastoreWithNamespace {
store: self.store.name().to_string(),
ns,
}
}
}
async fn pull_index_chunks<I: IndexFile>(
@ -792,15 +784,12 @@ async fn query_namespaces(
Ok(list.iter().map(|item| item.ns.clone()).collect())
}
fn check_and_create_ns(
params: &PullParameters,
store_with_ns: &DatastoreWithNamespace,
) -> Result<bool, Error> {
let ns = &store_with_ns.ns;
fn check_and_create_ns(params: &PullParameters, ns: &BackupNamespace) -> Result<bool, Error> {
let mut created = false;
let store_ns_str = print_store_and_ns(params.store.name(), ns);
if !ns.is_root() && !params.store.namespace_path(&ns).exists() {
check_ns_modification_privs(&store_with_ns, &params.owner)
check_ns_modification_privs(params.store.name(), ns, &params.owner)
.map_err(|err| format_err!("Creating {ns} not allowed - {err}"))?;
let name = match ns.components().last() {
@ -811,24 +800,24 @@ fn check_and_create_ns(
};
if let Err(err) = params.store.create_namespace(&ns.parent(), name) {
bail!(
"sync into {} failed - namespace creation failed: {}",
&store_with_ns,
err
);
bail!("sync into {store_ns_str} failed - namespace creation failed: {err}");
}
created = true;
}
check_ns_privs(&store_with_ns, &params.owner, PRIV_DATASTORE_BACKUP)
.map_err(|err| format_err!("sync into {store_with_ns} not allowed - {err}"))?;
check_ns_privs(
params.store.name(),
ns,
&params.owner,
PRIV_DATASTORE_BACKUP,
)
.map_err(|err| format_err!("sync into {store_ns_str} not allowed - {err}"))?;
Ok(created)
}
fn check_and_remove_ns(params: &PullParameters, local_ns: &BackupNamespace) -> Result<bool, Error> {
let store_with_ns = params.store_with_ns(local_ns.clone());
check_ns_modification_privs(&store_with_ns, &params.owner)
check_ns_modification_privs(&params.store.name(), local_ns, &params.owner)
.map_err(|err| format_err!("Removing {local_ns} not allowed - {err}"))?;
params.store.remove_namespace_recursive(local_ns, true)
@ -851,8 +840,8 @@ fn check_and_remove_vanished_ns(
.store
.recursive_iter_backup_ns_ok(params.ns.clone(), Some(max_depth))?
.filter(|ns| {
let store_with_ns = params.store_with_ns(ns.clone());
let user_privs = user_info.lookup_privs(&params.owner, &store_with_ns.acl_path());
let user_privs =
user_info.lookup_privs(&params.owner, &ns.acl_path(params.store.name()));
user_privs & (PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_AUDIT) != 0
})
.collect();
@ -927,32 +916,30 @@ pub async fn pull_store(
let mut synced_ns = HashSet::with_capacity(namespaces.len());
for namespace in namespaces {
let source_store_ns = DatastoreWithNamespace {
store: params.source.store().to_owned(),
ns: namespace.clone(),
};
let source_store_ns_str = print_store_and_ns(params.source.store(), &namespace);
let target_ns = namespace.map_prefix(&params.remote_ns, &params.ns)?;
let target_store_ns = params.store_with_ns(target_ns.clone());
let target_store_ns_str = print_store_and_ns(params.store.name(), &target_ns);
task_log!(worker, "----");
task_log!(
worker,
"Syncing {} into {}",
source_store_ns,
target_store_ns
source_store_ns_str,
target_store_ns_str
);
synced_ns.insert(target_ns.clone());
match check_and_create_ns(&params, &target_store_ns) {
match check_and_create_ns(&params, &target_ns) {
Ok(true) => task_log!(worker, "Created namespace {}", target_ns),
Ok(false) => {}
Err(err) => {
task_log!(
worker,
"Cannot sync {} into {} - {}",
source_store_ns,
target_store_ns,
source_store_ns_str,
target_store_ns_str,
err,
);
errors = true;