sync/pull: make namespace aware
Allow pulling all groups from a certain source namespace, and possibly sub namespaces until max-depth, into a target namespace. If any sub-namespaces get pulled, they will be mapped relatively from the source parent namespace to the target parent namespace. Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
committed by
Thomas Lamprecht
parent
31aa38b684
commit
c06c1b4bd7
@ -1,6 +1,8 @@
|
||||
use ::serde::{Deserialize, Serialize};
|
||||
use anyhow::{bail, Error};
|
||||
use hex::FromHex;
|
||||
use pbs_api_types::BackupNamespace;
|
||||
use pbs_api_types::MAX_NAMESPACE_DEPTH;
|
||||
use serde_json::Value;
|
||||
|
||||
use proxmox_router::{http_bail, Permission, Router, RpcEnvironment};
|
||||
@ -25,11 +27,21 @@ pub fn check_sync_job_read_access(
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(ref ns) = job.ns {
|
||||
let ns_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store, &ns.to_string()]);
|
||||
if ns_privs & PRIV_DATASTORE_AUDIT == 0 {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
let remote_privs = user_info.lookup_privs(auth_id, &["remote", &job.remote]);
|
||||
remote_privs & PRIV_REMOTE_AUDIT != 0
|
||||
}
|
||||
|
||||
// user can run the corresponding pull job
|
||||
/// checks whether user can run the corresponding pull job
|
||||
///
|
||||
/// namespace creation/deletion ACL and backup group ownership checks happen in the pull code directly.
|
||||
/// remote side checks/filters remote datastore/namespace/group access.
|
||||
pub fn check_sync_job_modify_access(
|
||||
user_info: &CachedUserInfo,
|
||||
auth_id: &Authid,
|
||||
@ -40,6 +52,13 @@ pub fn check_sync_job_modify_access(
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(ref ns) = job.ns {
|
||||
let ns_privs = user_info.lookup_privs(auth_id, &["datastore", &job.store, &ns.to_string()]);
|
||||
if ns_privs & PRIV_DATASTORE_BACKUP == 0 {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(true) = job.remove_vanished {
|
||||
if datastore_privs & PRIV_DATASTORE_PRUNE == 0 {
|
||||
return false;
|
||||
@ -198,6 +217,10 @@ pub enum DeletableProperty {
|
||||
rate_out,
|
||||
/// Delete the burst_out property.
|
||||
burst_out,
|
||||
/// Delete the ns property,
|
||||
ns,
|
||||
/// Delete the remote_ns property,
|
||||
remote_ns,
|
||||
}
|
||||
|
||||
#[api(
|
||||
@ -283,10 +306,28 @@ pub fn update_sync_job(
|
||||
DeletableProperty::burst_out => {
|
||||
data.limit.burst_out = None;
|
||||
}
|
||||
DeletableProperty::ns => {
|
||||
data.ns = None;
|
||||
}
|
||||
DeletableProperty::remote_ns => {
|
||||
data.remote_ns = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let check_max_depth = |ns: &BackupNamespace, depth| -> Result<(), Error> {
|
||||
if ns.depth() + depth >= MAX_NAMESPACE_DEPTH {
|
||||
bail!(
|
||||
"namespace and recursion depth exceed limit: {} + {} >= {}",
|
||||
ns.depth(),
|
||||
depth,
|
||||
MAX_NAMESPACE_DEPTH
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
if let Some(comment) = update.comment {
|
||||
let comment = comment.trim().to_string();
|
||||
if comment.is_empty() {
|
||||
@ -299,12 +340,23 @@ pub fn update_sync_job(
|
||||
if let Some(store) = update.store {
|
||||
data.store = store;
|
||||
}
|
||||
if let Some(ns) = update.ns {
|
||||
check_max_depth(&ns, update.max_depth.unwrap_or(data.max_depth))?;
|
||||
data.ns = Some(ns);
|
||||
}
|
||||
if let Some(remote) = update.remote {
|
||||
data.remote = remote;
|
||||
}
|
||||
if let Some(remote_store) = update.remote_store {
|
||||
data.remote_store = remote_store;
|
||||
}
|
||||
if let Some(remote_ns) = update.remote_ns {
|
||||
check_max_depth(
|
||||
&remote_ns,
|
||||
update.max_depth.unwrap_or(data.max_depth),
|
||||
)?;
|
||||
data.remote_ns = Some(remote_ns);
|
||||
}
|
||||
if let Some(owner) = update.owner {
|
||||
data.owner = Some(owner);
|
||||
}
|
||||
@ -335,6 +387,15 @@ pub fn update_sync_job(
|
||||
if update.remove_vanished.is_some() {
|
||||
data.remove_vanished = update.remove_vanished;
|
||||
}
|
||||
if let Some(max_depth) = update.max_depth {
|
||||
if let Some(ref ns) = data.ns {
|
||||
check_max_depth(ns, max_depth)?;
|
||||
}
|
||||
if let Some(ref ns) = data.remote_ns {
|
||||
check_max_depth(ns, max_depth)?;
|
||||
}
|
||||
data.max_depth = max_depth;
|
||||
}
|
||||
|
||||
if !check_sync_job_modify_access(&user_info, &auth_id, &data) {
|
||||
bail!("permission check failed");
|
||||
@ -453,10 +514,13 @@ acl:1:/remote/remote1/remotestore1:write@pbs:RemoteSyncOperator
|
||||
id: "regular".to_string(),
|
||||
remote: "remote0".to_string(),
|
||||
remote_store: "remotestore1".to_string(),
|
||||
remote_ns: None,
|
||||
store: "localstore0".to_string(),
|
||||
ns: None,
|
||||
owner: Some(write_auth_id.clone()),
|
||||
comment: None,
|
||||
remove_vanished: None,
|
||||
max_depth: 0,
|
||||
group_filter: None,
|
||||
schedule: None,
|
||||
limit: pbs_api_types::RateLimitConfig::default(), // no limit
|
||||
|
@ -39,6 +39,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
let remote = captures.get(1);
|
||||
let remote_store = captures.get(2);
|
||||
let local_store = captures.get(3);
|
||||
let local_ns = captures.get(4).map(|m| m.as_str());
|
||||
|
||||
if let (Some(remote), Some(remote_store), Some(local_store)) =
|
||||
(remote, remote_store, local_store)
|
||||
@ -46,6 +47,7 @@ fn check_job_privs(auth_id: &Authid, user_info: &CachedUserInfo, upid: &UPID) ->
|
||||
return check_pull_privs(
|
||||
auth_id,
|
||||
local_store.as_str(),
|
||||
local_ns,
|
||||
remote.as_str(),
|
||||
remote_store.as_str(),
|
||||
false,
|
||||
|
@ -9,9 +9,9 @@ use proxmox_schema::api;
|
||||
use proxmox_sys::task_log;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
|
||||
GROUP_FILTER_LIST_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE, PRIV_REMOTE_READ,
|
||||
REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
Authid, BackupNamespace, GroupFilter, RateLimitConfig, SyncJobConfig, DATASTORE_SCHEMA,
|
||||
GROUP_FILTER_LIST_SCHEMA, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_PRUNE,
|
||||
PRIV_REMOTE_READ, REMOTE_ID_SCHEMA, REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
};
|
||||
use pbs_config::CachedUserInfo;
|
||||
use proxmox_rest_server::WorkerTask;
|
||||
@ -22,13 +22,24 @@ use crate::server::pull::{pull_store, PullParameters};
|
||||
pub fn check_pull_privs(
|
||||
auth_id: &Authid,
|
||||
store: &str,
|
||||
ns: Option<&str>,
|
||||
remote: &str,
|
||||
remote_store: &str,
|
||||
delete: bool,
|
||||
) -> Result<(), Error> {
|
||||
let user_info = CachedUserInfo::new()?;
|
||||
|
||||
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_BACKUP, false)?;
|
||||
let local_store_ns_acl_path = match ns {
|
||||
Some(ns) => vec!["datastore", store, ns],
|
||||
None => vec!["datastore", store],
|
||||
};
|
||||
|
||||
user_info.check_privs(
|
||||
auth_id,
|
||||
&local_store_ns_acl_path,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
false,
|
||||
)?;
|
||||
user_info.check_privs(
|
||||
auth_id,
|
||||
&["remote", remote, remote_store],
|
||||
@ -37,7 +48,12 @@ pub fn check_pull_privs(
|
||||
)?;
|
||||
|
||||
if delete {
|
||||
user_info.check_privs(auth_id, &["datastore", store], PRIV_DATASTORE_PRUNE, false)?;
|
||||
user_info.check_privs(
|
||||
auth_id,
|
||||
&local_store_ns_acl_path,
|
||||
PRIV_DATASTORE_PRUNE,
|
||||
false,
|
||||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@ -49,14 +65,17 @@ impl TryFrom<&SyncJobConfig> for PullParameters {
|
||||
fn try_from(sync_job: &SyncJobConfig) -> Result<Self, Self::Error> {
|
||||
PullParameters::new(
|
||||
&sync_job.store,
|
||||
sync_job.ns.clone().unwrap_or_default(),
|
||||
&sync_job.remote,
|
||||
&sync_job.remote_store,
|
||||
sync_job.remote_ns.clone().unwrap_or_default(),
|
||||
sync_job
|
||||
.owner
|
||||
.as_ref()
|
||||
.unwrap_or_else(|| Authid::root_auth_id())
|
||||
.clone(),
|
||||
sync_job.remove_vanished,
|
||||
sync_job.max_depth,
|
||||
sync_job.group_filter.clone(),
|
||||
sync_job.limit.clone(),
|
||||
)
|
||||
@ -71,10 +90,11 @@ pub fn do_sync_job(
|
||||
to_stdout: bool,
|
||||
) -> Result<String, Error> {
|
||||
let job_id = format!(
|
||||
"{}:{}:{}:{}",
|
||||
"{}:{}:{}:{}:{}",
|
||||
sync_job.remote,
|
||||
sync_job.remote_store,
|
||||
sync_job.store,
|
||||
sync_job.ns.clone().unwrap_or_default(),
|
||||
job.jobname()
|
||||
);
|
||||
let worker_type = job.jobtype().to_string();
|
||||
@ -154,16 +174,28 @@ pub fn do_sync_job(
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
remote: {
|
||||
schema: REMOTE_ID_SCHEMA,
|
||||
},
|
||||
"remote-store": {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"remote-ns": {
|
||||
type: BackupNamespace,
|
||||
optional: true,
|
||||
},
|
||||
"remove-vanished": {
|
||||
schema: REMOVE_VANISHED_BACKUPS_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"group-filter": {
|
||||
schema: GROUP_FILTER_LIST_SCHEMA,
|
||||
optional: true,
|
||||
@ -186,9 +218,12 @@ The delete flag additionally requires the Datastore.Prune privilege on '/datasto
|
||||
/// Sync store from other repository
|
||||
async fn pull(
|
||||
store: String,
|
||||
ns: Option<BackupNamespace>,
|
||||
remote: String,
|
||||
remote_store: String,
|
||||
remote_ns: Option<BackupNamespace>,
|
||||
remove_vanished: Option<bool>,
|
||||
max_depth: Option<usize>,
|
||||
group_filter: Option<Vec<GroupFilter>>,
|
||||
limit: RateLimitConfig,
|
||||
_info: &ApiMethod,
|
||||
@ -197,14 +232,32 @@ async fn pull(
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let delete = remove_vanished.unwrap_or(false);
|
||||
|
||||
check_pull_privs(&auth_id, &store, &remote, &remote_store, delete)?;
|
||||
let ns = ns.unwrap_or_default();
|
||||
let max_depth = max_depth.unwrap_or(0);
|
||||
let ns_str = if ns.is_root() {
|
||||
None
|
||||
} else {
|
||||
Some(ns.to_string())
|
||||
};
|
||||
|
||||
check_pull_privs(
|
||||
&auth_id,
|
||||
&store,
|
||||
ns_str.as_deref(),
|
||||
&remote,
|
||||
&remote_store,
|
||||
delete,
|
||||
)?;
|
||||
|
||||
let pull_params = PullParameters::new(
|
||||
&store,
|
||||
ns,
|
||||
&remote,
|
||||
&remote_store,
|
||||
remote_ns.unwrap_or_default(),
|
||||
auth_id.clone(),
|
||||
remove_vanished,
|
||||
max_depth,
|
||||
group_filter,
|
||||
limit,
|
||||
)?;
|
||||
@ -217,7 +270,13 @@ async fn pull(
|
||||
auth_id.to_string(),
|
||||
true,
|
||||
move |worker| async move {
|
||||
task_log!(worker, "sync datastore '{}' start", store);
|
||||
task_log!(
|
||||
worker,
|
||||
"pull datastore '{}' from '{}/{}'",
|
||||
store,
|
||||
remote,
|
||||
remote_store,
|
||||
);
|
||||
|
||||
let pull_future = pull_store(&worker, &client, &pull_params);
|
||||
let future = select! {
|
||||
@ -227,7 +286,7 @@ async fn pull(
|
||||
|
||||
let _ = future?;
|
||||
|
||||
task_log!(worker, "sync datastore '{}' end", store);
|
||||
task_log!(worker, "pull datastore '{}' end", store);
|
||||
|
||||
Ok(())
|
||||
},
|
||||
|
Reference in New Issue
Block a user