prune datastore: support max-depth and improve priv checks
use the relatively new variant of ListAccessibleBackupGroups to also allow pruning the groups that one doesn't own but has the respective privileges on their namespace level. This was previously handled by the API endpoint itself, which was ok as long as only one level was looked at. Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
65aba79a9b
commit
e3c26aea31
|
@ -1063,6 +1063,10 @@ pub fn prune(
|
||||||
type: BackupNamespace,
|
type: BackupNamespace,
|
||||||
optional: true,
|
optional: true,
|
||||||
},
|
},
|
||||||
|
"max-depth": {
|
||||||
|
schema: NS_MAX_DEPTH_SCHEMA,
|
||||||
|
optional: true,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
returns: {
|
returns: {
|
||||||
|
@ -1079,6 +1083,7 @@ pub fn prune_datastore(
|
||||||
prune_options: PruneOptions,
|
prune_options: PruneOptions,
|
||||||
store: String,
|
store: String,
|
||||||
ns: Option<BackupNamespace>,
|
ns: Option<BackupNamespace>,
|
||||||
|
max_depth: Option<usize>,
|
||||||
_param: Value,
|
_param: Value,
|
||||||
rpcenv: &mut dyn RpcEnvironment,
|
rpcenv: &mut dyn RpcEnvironment,
|
||||||
) -> Result<String, Error> {
|
) -> Result<String, Error> {
|
||||||
|
@ -1090,15 +1095,21 @@ pub fn prune_datastore(
|
||||||
|
|
||||||
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
let to_stdout = rpcenv.env_type() == RpcEnvironmentType::CLI;
|
||||||
|
|
||||||
// FIXME: add max-depth
|
|
||||||
|
|
||||||
let upid_str = WorkerTask::new_thread(
|
let upid_str = WorkerTask::new_thread(
|
||||||
"prune",
|
"prune",
|
||||||
Some(worker_id),
|
Some(worker_id),
|
||||||
auth_id.to_string(),
|
auth_id.to_string(),
|
||||||
to_stdout,
|
to_stdout,
|
||||||
move |worker| {
|
move |worker| {
|
||||||
crate::server::prune_datastore(worker, auth_id, prune_options, datastore, ns, dry_run)
|
crate::server::prune_datastore(
|
||||||
|
worker,
|
||||||
|
auth_id,
|
||||||
|
prune_options,
|
||||||
|
datastore,
|
||||||
|
ns,
|
||||||
|
max_depth.unwrap_or(MAX_NAMESPACE_DEPTH), // canoot rely on schema default
|
||||||
|
dry_run,
|
||||||
|
)
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
|
|
@ -4,12 +4,14 @@ use anyhow::Error;
|
||||||
|
|
||||||
use proxmox_sys::{task_log, task_warn};
|
use proxmox_sys::{task_log, task_warn};
|
||||||
|
|
||||||
use pbs_api_types::{Authid, BackupNamespace, Operation, PruneOptions, PRIV_DATASTORE_MODIFY};
|
use pbs_api_types::{
|
||||||
use pbs_config::CachedUserInfo;
|
Authid, BackupNamespace, Operation, PruneOptions, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
|
||||||
|
};
|
||||||
use pbs_datastore::prune::compute_prune_info;
|
use pbs_datastore::prune::compute_prune_info;
|
||||||
use pbs_datastore::DataStore;
|
use pbs_datastore::DataStore;
|
||||||
use proxmox_rest_server::WorkerTask;
|
use proxmox_rest_server::WorkerTask;
|
||||||
|
|
||||||
|
use crate::backup::ListAccessibleBackupGroups;
|
||||||
use crate::server::jobstate::Job;
|
use crate::server::jobstate::Job;
|
||||||
|
|
||||||
pub fn prune_datastore(
|
pub fn prune_datastore(
|
||||||
|
@ -18,7 +20,7 @@ pub fn prune_datastore(
|
||||||
prune_options: PruneOptions,
|
prune_options: PruneOptions,
|
||||||
datastore: Arc<DataStore>,
|
datastore: Arc<DataStore>,
|
||||||
ns: BackupNamespace,
|
ns: BackupNamespace,
|
||||||
//max_depth: Option<usize>, // FIXME
|
max_depth: usize,
|
||||||
dry_run: bool,
|
dry_run: bool,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let store = &datastore.name();
|
let store = &datastore.name();
|
||||||
|
@ -47,26 +49,24 @@ pub fn prune_datastore(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_info = CachedUserInfo::new()?;
|
for group in ListAccessibleBackupGroups::new_with_privs(
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
&datastore,
|
||||||
let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
|
ns.clone(),
|
||||||
|
max_depth,
|
||||||
// FIXME: Namespace recursion!
|
Some(PRIV_DATASTORE_MODIFY), // overides the owner check
|
||||||
for group in datastore.iter_backup_groups(ns.clone())? {
|
Some(PRIV_DATASTORE_PRUNE), // additionally required if owner
|
||||||
let ns_recursed = &ns; // remove_backup_dir might need the inner one
|
Some(&auth_id),
|
||||||
|
)? {
|
||||||
let group = group?;
|
let group = group?;
|
||||||
|
let ns = group.backup_ns();
|
||||||
let list = group.list_backups()?;
|
let list = group.list_backups()?;
|
||||||
|
|
||||||
if !has_privs && !datastore.owns_backup(&ns_recursed, group.as_ref(), &auth_id)? {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
let mut prune_info = compute_prune_info(list, &prune_options)?;
|
||||||
prune_info.reverse(); // delete older snapshots first
|
prune_info.reverse(); // delete older snapshots first
|
||||||
|
|
||||||
task_log!(
|
task_log!(
|
||||||
worker,
|
worker,
|
||||||
"Pruning group \"{}/{}\"",
|
"Pruning group {ns}:\"{}/{}\"",
|
||||||
group.backup_type(),
|
group.backup_type(),
|
||||||
group.backup_id()
|
group.backup_id()
|
||||||
);
|
);
|
||||||
|
@ -83,9 +83,7 @@ pub fn prune_datastore(
|
||||||
info.backup_dir.backup_time_string()
|
info.backup_dir.backup_time_string()
|
||||||
);
|
);
|
||||||
if !keep && !dry_run {
|
if !keep && !dry_run {
|
||||||
if let Err(err) =
|
if let Err(err) = datastore.remove_backup_dir(ns, info.backup_dir.as_ref(), false) {
|
||||||
datastore.remove_backup_dir(ns_recursed, info.backup_dir.as_ref(), false)
|
|
||||||
{
|
|
||||||
let path = info.backup_dir.relative_path();
|
let path = info.backup_dir.relative_path();
|
||||||
task_warn!(worker, "failed to remove dir {path:?}: {err}");
|
task_warn!(worker, "failed to remove dir {path:?}: {err}");
|
||||||
}
|
}
|
||||||
|
@ -128,6 +126,7 @@ pub fn do_prune_job(
|
||||||
prune_options,
|
prune_options,
|
||||||
datastore,
|
datastore,
|
||||||
BackupNamespace::default(),
|
BackupNamespace::default(),
|
||||||
|
pbs_api_types::MAX_NAMESPACE_DEPTH,
|
||||||
false,
|
false,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue