api: verify: support namespaces

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2022-05-10 19:04:17 +02:00
parent 8e82cc807c
commit 59229bd7f1
6 changed files with 79 additions and 51 deletions

View File

@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize};
use proxmox_schema::*; use proxmox_schema::*;
use crate::{ use crate::{
Authid, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, DATASTORE_SCHEMA, Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA,
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
SINGLE_LINE_COMMENT_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
}; };
const_regex! { const_regex! {
@ -182,6 +182,10 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
optional: true, optional: true,
schema: VERIFICATION_SCHEDULE_SCHEMA, schema: VERIFICATION_SCHEDULE_SCHEMA,
}, },
ns: {
optional: true,
schema: BACKUP_NAMESPACE_SCHEMA,
},
} }
)] )]
#[derive(Serialize, Deserialize, Updater)] #[derive(Serialize, Deserialize, Updater)]
@ -205,6 +209,9 @@ pub struct VerificationJobConfig {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
/// when to schedule this job in calendar event notation /// when to schedule this job in calendar event notation
pub schedule: Option<String>, pub schedule: Option<String>,
#[serde(skip_serializing_if = "Option::is_none", default)]
/// on which backup namespace to run the verification recursively
pub ns: Option<BackupNamespace>,
} }
#[api( #[api(

View File

@ -206,7 +206,10 @@ pub use manifest::BackupManifest;
pub use store_progress::StoreProgress; pub use store_progress::StoreProgress;
mod datastore; mod datastore;
pub use datastore::{check_backup_owner, DataStore, ListGroups, ListNamespaces, ListSnapshots}; pub use datastore::{
check_backup_owner, DataStore, ListGroups, ListNamespaces, ListNamespacesRecursive,
ListSnapshots,
};
mod snapshot_reader; mod snapshot_reader;
pub use snapshot_reader::SnapshotReader; pub use snapshot_reader::SnapshotReader;

View File

@ -36,9 +36,9 @@ use pbs_api_types::{
DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT,
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
}; };
use pbs_client::pxar::{create_tar, create_zip}; use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -727,6 +727,10 @@ pub fn status(
schema: BACKUP_TIME_SCHEMA, schema: BACKUP_TIME_SCHEMA,
optional: true, optional: true,
}, },
"max-depth": {
schema: NS_MAX_DEPTH_SCHEMA,
optional: true,
},
}, },
}, },
returns: { returns: {
@ -750,6 +754,7 @@ pub fn verify(
backup_time: Option<i64>, backup_time: Option<i64>,
ignore_verified: Option<bool>, ignore_verified: Option<bool>,
outdated_after: Option<i64>, outdated_after: Option<i64>,
max_depth: Option<usize>,
rpcenv: &mut dyn RpcEnvironment, rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -771,8 +776,6 @@ pub fn verify(
let mut backup_group = None; let mut backup_group = None;
let mut worker_type = "verify"; let mut worker_type = "verify";
// FIXME: Recursion
// FIXME: Namespaces and worker ID, could this be an issue?
match (backup_type, backup_id, backup_time) { match (backup_type, backup_id, backup_time) {
(Some(backup_type), Some(backup_id), Some(backup_time)) => { (Some(backup_type), Some(backup_id), Some(backup_time)) => {
worker_id = format!( worker_id = format!(
@ -783,8 +786,12 @@ pub fn verify(
backup_id, backup_id,
backup_time backup_time
); );
let dir = let dir = datastore.backup_dir_from_parts(
datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?; backup_ns.clone(),
backup_type,
backup_id,
backup_time,
)?;
if owner_check_required { if owner_check_required {
let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?; let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
@ -809,11 +816,15 @@ pub fn verify(
check_backup_owner(&owner, &auth_id)?; check_backup_owner(&owner, &auth_id)?;
} }
backup_group = Some(datastore.backup_group(backup_ns, group)); backup_group = Some(datastore.backup_group(backup_ns.clone(), group));
worker_type = "verify_group"; worker_type = "verify_group";
} }
(None, None, None) => { (None, None, None) => {
worker_id = store.clone(); worker_id = if backup_ns.is_root() {
store.clone()
} else {
format!("{store}:{}", backup_ns.display_as_path())
};
} }
_ => bail!("parameters do not specify a backup group or snapshot"), _ => bail!("parameters do not specify a backup group or snapshot"),
} }
@ -854,11 +865,11 @@ pub fn verify(
None None
}; };
// FIXME namespace missing here..
verify_all_backups( verify_all_backups(
&verify_worker, &verify_worker,
worker.upid(), worker.upid(),
backup_ns,
max_depth,
owner, owner,
Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)), Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
)? )?

View File

@ -155,6 +155,8 @@ pub enum DeletableProperty {
Schedule, Schedule,
/// Delete outdated after property. /// Delete outdated after property.
OutdatedAfter, OutdatedAfter,
/// Delete namespace property, defaulting to root namespace then.
Ns,
} }
#[api( #[api(
@ -234,6 +236,9 @@ pub fn update_verification_job(
DeletableProperty::Schedule => { DeletableProperty::Schedule => {
data.schedule = None; data.schedule = None;
} }
DeletableProperty::Ns => {
data.ns = None;
}
} }
} }
} }
@ -268,6 +273,11 @@ pub fn update_verification_job(
if update.schedule.is_some() { if update.schedule.is_some() {
data.schedule = update.schedule; data.schedule = update.schedule;
} }
if let Some(ns) = update.ns {
if !ns.is_root() {
data.ns = Some(ns);
}
}
config.set_data(&id, "verification", &data)?; config.set_data(&id, "verification", &data)?;

View File

@ -19,6 +19,8 @@ use proxmox_sys::fs::lock_dir_noblock_shared;
use crate::tools::parallel_handler::ParallelHandler; use crate::tools::parallel_handler::ParallelHandler;
use crate::backup::hierarchy::ListAccessibleBackupGroups;
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have /// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
/// already been verified or detected as corrupt. /// already been verified or detected as corrupt.
pub struct VerifyWorker { pub struct VerifyWorker {
@ -495,6 +497,8 @@ pub fn verify_backup_group(
pub fn verify_all_backups( pub fn verify_all_backups(
verify_worker: &VerifyWorker, verify_worker: &VerifyWorker,
upid: &UPID, upid: &UPID,
ns: BackupNamespace,
max_depth: Option<usize>,
owner: Option<Authid>, owner: Option<Authid>,
filter: Option<&dyn Fn(&BackupManifest) -> bool>, filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<Vec<String>, Error> { ) -> Result<Vec<String>, Error> {
@ -507,50 +511,36 @@ pub fn verify_all_backups(
verify_worker.datastore.name() verify_worker.datastore.name()
); );
if let Some(owner) = &owner { let owner_filtered = if let Some(owner) = &owner {
task_log!(worker, "limiting to backups owned by {}", owner); task_log!(worker, "limiting to backups owned by {}", owner);
} true
} else {
let filter_by_owner = |group: &BackupGroup| { false
match (
// FIXME: with recursion the namespace needs to come from the iterator...
verify_worker
.datastore
.get_owner(&BackupNamespace::root(), group.as_ref()),
&owner,
) {
(Ok(ref group_owner), Some(owner)) => {
group_owner == owner
|| (group_owner.is_token()
&& !owner.is_token()
&& group_owner.user() == owner.user())
}
(Ok(_), None) => true,
(Err(err), Some(_)) => {
// intentionally not in task log
// the task user might not be allowed to see this group!
println!("Failed to get owner of group '{}' - {}", group, err);
false
}
(Err(err), None) => {
// we don't filter by owner, but we want to log the error
task_log!(worker, "Failed to get owner of group '{} - {}", group, err);
errors.push(group.to_string());
true
}
}
}; };
// FIXME: This should probably simply enable recursion (or the call have a recursion parameter) // FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
let mut list = match verify_worker let store = Arc::clone(&verify_worker.datastore);
.datastore let max_depth = max_depth.unwrap_or(pbs_api_types::MAX_NAMESPACE_DEPTH);
.iter_backup_groups_ok(Default::default())
{ let mut list = match ListAccessibleBackupGroups::new(store, ns.clone(), max_depth, owner) {
Ok(list) => list Ok(list) => list
.filter_map(|group| match group {
Ok(group) => Some(group),
Err(err) if owner_filtered => {
// intentionally not in task log, the user might not see this group!
println!("error on iterating groups in ns '{ns}' - {err}");
None
}
Err(err) => {
// we don't filter by owner, but we want to log the error
task_log!(worker, "error on iterating groups in ns '{ns}' - {err}");
errors.push(err.to_string());
None
}
})
.filter(|group| { .filter(|group| {
!(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark") !(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
}) })
.filter(filter_by_owner)
.collect::<Vec<BackupGroup>>(), .collect::<Vec<BackupGroup>>(),
Err(err) => { Err(err) => {
task_log!(worker, "unable to list backups: {}", err,); task_log!(worker, "unable to list backups: {}", err,);

View File

@ -40,10 +40,17 @@ pub fn do_verification_job(
task_log!(worker, "task triggered by schedule '{}'", event_str); task_log!(worker, "task triggered by schedule '{}'", event_str);
} }
let ns = match verification_job.ns {
Some(ref ns) => ns.clone(),
None => Default::default(),
};
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore); let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
let result = verify_all_backups( let result = verify_all_backups(
&verify_worker, &verify_worker,
worker.upid(), worker.upid(),
ns,
None,
None, None,
Some(&move |manifest| { Some(&move |manifest| {
verify_filter(ignore_verified_snapshots, outdated_after, manifest) verify_filter(ignore_verified_snapshots, outdated_after, manifest)