verify: introduce & use new Datastore.Verify privilege

for verifying a whole datastore. Datastore.Backup now allows verifying
only backups owned by the triggering user.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2020-10-30 12:36:39 +01:00 committed by Thomas Lamprecht
parent b728a69e7d
commit 09f6a24078
4 changed files with 53 additions and 7 deletions

View File

@ -42,6 +42,7 @@ use crate::config::acl::{
PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_PRUNE,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_VERIFY,
}; };
fn check_priv_or_backup_owner( fn check_priv_or_backup_owner(
@ -537,7 +538,7 @@ pub fn status(
schema: UPID_SCHEMA, schema: UPID_SCHEMA,
}, },
access: { access: {
permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_READ | PRIV_DATASTORE_BACKUP, true), // fixme permission: &Permission::Privilege(&["datastore", "{store}"], PRIV_DATASTORE_VERIFY | PRIV_DATASTORE_BACKUP, true),
}, },
)] )]
/// Verify backups. /// Verify backups.
@ -553,6 +554,7 @@ pub fn verify(
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let datastore = DataStore::lookup_datastore(&store)?; let datastore = DataStore::lookup_datastore(&store)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let worker_id; let worker_id;
let mut backup_dir = None; let mut backup_dir = None;
@ -563,12 +565,18 @@ pub fn verify(
(Some(backup_type), Some(backup_id), Some(backup_time)) => { (Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time); worker_id = format!("{}:{}/{}/{:08X}", store, backup_type, backup_id, backup_time);
let dir = BackupDir::new(backup_type, backup_id, backup_time)?; let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
backup_dir = Some(dir); backup_dir = Some(dir);
worker_type = "verify_snapshot"; worker_type = "verify_snapshot";
} }
(Some(backup_type), Some(backup_id), None) => { (Some(backup_type), Some(backup_id), None) => {
worker_id = format!("{}:{}/{}", store, backup_type, backup_id); worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
let group = BackupGroup::new(backup_type, backup_id); let group = BackupGroup::new(backup_type, backup_id);
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
backup_group = Some(group); backup_group = Some(group);
worker_type = "verify_group"; worker_type = "verify_group";
} }
@ -578,13 +586,12 @@ pub fn verify(
_ => bail!("parameters do not specify a backup group or snapshot"), _ => bail!("parameters do not specify a backup group or snapshot"),
} }
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false }; let to_stdout = if rpcenv.env_type() == RpcEnvironmentType::CLI { true } else { false };
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
worker_type, worker_type,
Some(worker_id.clone()), Some(worker_id.clone()),
auth_id, auth_id.clone(),
to_stdout, to_stdout,
move |worker| { move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
@ -617,7 +624,16 @@ pub fn verify(
)?; )?;
failed_dirs failed_dirs
} else { } else {
verify_all_backups(datastore, worker.clone(), worker.upid(), None)? let privs = CachedUserInfo::new()?
.lookup_privs(&auth_id, &["datastore", &store]);
let owner = if privs & PRIV_DATASTORE_VERIFY == 0 {
Some(auth_id)
} else {
None
};
verify_all_backups(datastore, worker.clone(), worker.upid(), owner, None)?
}; };
if failed_dirs.len() > 0 { if failed_dirs.len() > 0 {
worker.log("Failed to verify following snapshots:"); worker.log("Failed to verify following snapshots:");

View File

@ -482,7 +482,7 @@ pub fn verify_backup_group(
Ok((count, errors)) Ok((count, errors))
} }
/// Verify all backups inside a datastore /// Verify all (owned) backups inside a datastore
/// ///
/// Errors are logged to the worker log. /// Errors are logged to the worker log.
/// ///
@ -493,14 +493,41 @@ pub fn verify_all_backups(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
worker: Arc<dyn TaskState + Send + Sync>, worker: Arc<dyn TaskState + Send + Sync>,
upid: &UPID, upid: &UPID,
owner: Option<Authid>,
filter: Option<&dyn Fn(&BackupManifest) -> bool>, filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<Vec<String>, Error> { ) -> Result<Vec<String>, Error> {
let mut errors = Vec::new(); let mut errors = Vec::new();
if let Some(owner) = &owner {
task_log!(
worker,
"verify datastore {} - limiting to backups owned by {}",
datastore.name(),
owner
);
}
let filter_by_owner = |group: &BackupGroup| {
if let Some(owner) = &owner {
match datastore.get_owner(group) {
Ok(ref group_owner) => {
group_owner == owner
|| (group_owner.is_token()
&& !owner.is_token()
&& group_owner.user() == owner.user())
},
Err(_) => false,
}
} else {
true
}
};
let mut list = match BackupGroup::list_groups(&datastore.base_path()) { let mut list = match BackupGroup::list_groups(&datastore.base_path()) {
Ok(list) => list Ok(list) => list
.into_iter() .into_iter()
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark")) .filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
.filter(filter_by_owner)
.collect::<Vec<BackupGroup>>(), .collect::<Vec<BackupGroup>>(),
Err(err) => { Err(err) => {
task_log!( task_log!(

View File

@ -30,6 +30,7 @@ constnamedbitmap! {
PRIV_DATASTORE_ALLOCATE("Datastore.Allocate"); PRIV_DATASTORE_ALLOCATE("Datastore.Allocate");
PRIV_DATASTORE_MODIFY("Datastore.Modify"); PRIV_DATASTORE_MODIFY("Datastore.Modify");
PRIV_DATASTORE_READ("Datastore.Read"); PRIV_DATASTORE_READ("Datastore.Read");
PRIV_DATASTORE_VERIFY("Datastore.Verify");
/// Datastore.Backup also requires backup ownership /// Datastore.Backup also requires backup ownership
PRIV_DATASTORE_BACKUP("Datastore.Backup"); PRIV_DATASTORE_BACKUP("Datastore.Backup");
@ -64,12 +65,14 @@ pub const ROLE_DATASTORE_ADMIN: u64 =
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_AUDIT |
PRIV_DATASTORE_MODIFY | PRIV_DATASTORE_MODIFY |
PRIV_DATASTORE_READ | PRIV_DATASTORE_READ |
PRIV_DATASTORE_VERIFY |
PRIV_DATASTORE_BACKUP | PRIV_DATASTORE_BACKUP |
PRIV_DATASTORE_PRUNE; PRIV_DATASTORE_PRUNE;
/// Datastore.Reader can read datastore content an do restore /// Datastore.Reader can read/verify datastore content and do restore
pub const ROLE_DATASTORE_READER: u64 = pub const ROLE_DATASTORE_READER: u64 =
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_AUDIT |
PRIV_DATASTORE_VERIFY |
PRIV_DATASTORE_READ; PRIV_DATASTORE_READ;
/// Datastore.Backup can do backup and restore, but no prune. /// Datastore.Backup can do backup and restore, but no prune.

View File

@ -65,7 +65,7 @@ pub fn do_verification_job(
task_log!(worker,"task triggered by schedule '{}'", event_str); task_log!(worker,"task triggered by schedule '{}'", event_str);
} }
let result = verify_all_backups(datastore, worker.clone(), worker.upid(), Some(&filter)); let result = verify_all_backups(datastore, worker.clone(), worker.upid(), None, Some(&filter));
let job_result = match result { let job_result = match result {
Ok(ref errors) if errors.is_empty() => Ok(()), Ok(ref errors) if errors.is_empty() => Ok(()),
Ok(_) => Err(format_err!("verification failed - please check the log for details")), Ok(_) => Err(format_err!("verification failed - please check the log for details")),