api: verify: support namespaces
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
8e82cc807c
commit
59229bd7f1
|
@ -7,9 +7,9 @@ use serde::{Deserialize, Serialize};
|
|||
use proxmox_schema::*;
|
||||
|
||||
use crate::{
|
||||
Authid, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA, DATASTORE_SCHEMA,
|
||||
DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA, PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA,
|
||||
SINGLE_LINE_COMMENT_SCHEMA,
|
||||
Authid, BackupNamespace, BackupType, RateLimitConfig, Userid, BACKUP_GROUP_SCHEMA,
|
||||
BACKUP_NAMESPACE_SCHEMA, DATASTORE_SCHEMA, DRIVE_NAME_SCHEMA, MEDIA_POOL_NAME_SCHEMA,
|
||||
PROXMOX_SAFE_ID_FORMAT, REMOTE_ID_SCHEMA, SINGLE_LINE_COMMENT_SCHEMA,
|
||||
};
|
||||
|
||||
const_regex! {
|
||||
|
@ -182,6 +182,10 @@ pub const VERIFICATION_OUTDATED_AFTER_SCHEMA: Schema =
|
|||
optional: true,
|
||||
schema: VERIFICATION_SCHEDULE_SCHEMA,
|
||||
},
|
||||
ns: {
|
||||
optional: true,
|
||||
schema: BACKUP_NAMESPACE_SCHEMA,
|
||||
},
|
||||
}
|
||||
)]
|
||||
#[derive(Serialize, Deserialize, Updater)]
|
||||
|
@ -205,6 +209,9 @@ pub struct VerificationJobConfig {
|
|||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
/// when to schedule this job in calendar event notation
|
||||
pub schedule: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none", default)]
|
||||
/// on which backup namespace to run the verification recursively
|
||||
pub ns: Option<BackupNamespace>,
|
||||
}
|
||||
|
||||
#[api(
|
||||
|
|
|
@ -206,7 +206,10 @@ pub use manifest::BackupManifest;
|
|||
pub use store_progress::StoreProgress;
|
||||
|
||||
mod datastore;
|
||||
pub use datastore::{check_backup_owner, DataStore, ListGroups, ListNamespaces, ListSnapshots};
|
||||
pub use datastore::{
|
||||
check_backup_owner, DataStore, ListGroups, ListNamespaces, ListNamespacesRecursive,
|
||||
ListSnapshots,
|
||||
};
|
||||
|
||||
mod snapshot_reader;
|
||||
pub use snapshot_reader::SnapshotReader;
|
||||
|
|
|
@ -36,9 +36,9 @@ use pbs_api_types::{
|
|||
DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
|
||||
RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||
DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
|
||||
PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
|
||||
UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT,
|
||||
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ,
|
||||
PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
};
|
||||
use pbs_client::pxar::{create_tar, create_zip};
|
||||
use pbs_config::CachedUserInfo;
|
||||
|
@ -727,6 +727,10 @@ pub fn status(
|
|||
schema: BACKUP_TIME_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
"max-depth": {
|
||||
schema: NS_MAX_DEPTH_SCHEMA,
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
returns: {
|
||||
|
@ -750,6 +754,7 @@ pub fn verify(
|
|||
backup_time: Option<i64>,
|
||||
ignore_verified: Option<bool>,
|
||||
outdated_after: Option<i64>,
|
||||
max_depth: Option<usize>,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<Value, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
@ -771,8 +776,6 @@ pub fn verify(
|
|||
let mut backup_group = None;
|
||||
let mut worker_type = "verify";
|
||||
|
||||
// FIXME: Recursion
|
||||
// FIXME: Namespaces and worker ID, could this be an issue?
|
||||
match (backup_type, backup_id, backup_time) {
|
||||
(Some(backup_type), Some(backup_id), Some(backup_time)) => {
|
||||
worker_id = format!(
|
||||
|
@ -783,8 +786,12 @@ pub fn verify(
|
|||
backup_id,
|
||||
backup_time
|
||||
);
|
||||
let dir =
|
||||
datastore.backup_dir_from_parts(backup_ns, backup_type, backup_id, backup_time)?;
|
||||
let dir = datastore.backup_dir_from_parts(
|
||||
backup_ns.clone(),
|
||||
backup_type,
|
||||
backup_id,
|
||||
backup_time,
|
||||
)?;
|
||||
|
||||
if owner_check_required {
|
||||
let owner = datastore.get_owner(dir.backup_ns(), dir.as_ref())?;
|
||||
|
@ -809,11 +816,15 @@ pub fn verify(
|
|||
check_backup_owner(&owner, &auth_id)?;
|
||||
}
|
||||
|
||||
backup_group = Some(datastore.backup_group(backup_ns, group));
|
||||
backup_group = Some(datastore.backup_group(backup_ns.clone(), group));
|
||||
worker_type = "verify_group";
|
||||
}
|
||||
(None, None, None) => {
|
||||
worker_id = store.clone();
|
||||
worker_id = if backup_ns.is_root() {
|
||||
store.clone()
|
||||
} else {
|
||||
format!("{store}:{}", backup_ns.display_as_path())
|
||||
};
|
||||
}
|
||||
_ => bail!("parameters do not specify a backup group or snapshot"),
|
||||
}
|
||||
|
@ -854,11 +865,11 @@ pub fn verify(
|
|||
None
|
||||
};
|
||||
|
||||
// FIXME namespace missing here..
|
||||
|
||||
verify_all_backups(
|
||||
&verify_worker,
|
||||
worker.upid(),
|
||||
backup_ns,
|
||||
max_depth,
|
||||
owner,
|
||||
Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
|
||||
)?
|
||||
|
|
|
@ -155,6 +155,8 @@ pub enum DeletableProperty {
|
|||
Schedule,
|
||||
/// Delete outdated after property.
|
||||
OutdatedAfter,
|
||||
/// Delete namespace property, defaulting to root namespace then.
|
||||
Ns,
|
||||
}
|
||||
|
||||
#[api(
|
||||
|
@ -234,6 +236,9 @@ pub fn update_verification_job(
|
|||
DeletableProperty::Schedule => {
|
||||
data.schedule = None;
|
||||
}
|
||||
DeletableProperty::Ns => {
|
||||
data.ns = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -268,6 +273,11 @@ pub fn update_verification_job(
|
|||
if update.schedule.is_some() {
|
||||
data.schedule = update.schedule;
|
||||
}
|
||||
if let Some(ns) = update.ns {
|
||||
if !ns.is_root() {
|
||||
data.ns = Some(ns);
|
||||
}
|
||||
}
|
||||
|
||||
config.set_data(&id, "verification", &data)?;
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@ use proxmox_sys::fs::lock_dir_noblock_shared;
|
|||
|
||||
use crate::tools::parallel_handler::ParallelHandler;
|
||||
|
||||
use crate::backup::hierarchy::ListAccessibleBackupGroups;
|
||||
|
||||
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
|
||||
/// already been verified or detected as corrupt.
|
||||
pub struct VerifyWorker {
|
||||
|
@ -495,6 +497,8 @@ pub fn verify_backup_group(
|
|||
pub fn verify_all_backups(
|
||||
verify_worker: &VerifyWorker,
|
||||
upid: &UPID,
|
||||
ns: BackupNamespace,
|
||||
max_depth: Option<usize>,
|
||||
owner: Option<Authid>,
|
||||
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
|
@ -507,50 +511,36 @@ pub fn verify_all_backups(
|
|||
verify_worker.datastore.name()
|
||||
);
|
||||
|
||||
if let Some(owner) = &owner {
|
||||
let owner_filtered = if let Some(owner) = &owner {
|
||||
task_log!(worker, "limiting to backups owned by {}", owner);
|
||||
}
|
||||
|
||||
let filter_by_owner = |group: &BackupGroup| {
|
||||
match (
|
||||
// FIXME: with recursion the namespace needs to come from the iterator...
|
||||
verify_worker
|
||||
.datastore
|
||||
.get_owner(&BackupNamespace::root(), group.as_ref()),
|
||||
&owner,
|
||||
) {
|
||||
(Ok(ref group_owner), Some(owner)) => {
|
||||
group_owner == owner
|
||||
|| (group_owner.is_token()
|
||||
&& !owner.is_token()
|
||||
&& group_owner.user() == owner.user())
|
||||
}
|
||||
(Ok(_), None) => true,
|
||||
(Err(err), Some(_)) => {
|
||||
// intentionally not in task log
|
||||
// the task user might not be allowed to see this group!
|
||||
println!("Failed to get owner of group '{}' - {}", group, err);
|
||||
false
|
||||
}
|
||||
(Err(err), None) => {
|
||||
// we don't filter by owner, but we want to log the error
|
||||
task_log!(worker, "Failed to get owner of group '{} - {}", group, err);
|
||||
errors.push(group.to_string());
|
||||
true
|
||||
}
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
// FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
|
||||
let mut list = match verify_worker
|
||||
.datastore
|
||||
.iter_backup_groups_ok(Default::default())
|
||||
{
|
||||
let store = Arc::clone(&verify_worker.datastore);
|
||||
let max_depth = max_depth.unwrap_or(pbs_api_types::MAX_NAMESPACE_DEPTH);
|
||||
|
||||
let mut list = match ListAccessibleBackupGroups::new(store, ns.clone(), max_depth, owner) {
|
||||
Ok(list) => list
|
||||
.filter_map(|group| match group {
|
||||
Ok(group) => Some(group),
|
||||
Err(err) if owner_filtered => {
|
||||
// intentionally not in task log, the user might not see this group!
|
||||
println!("error on iterating groups in ns '{ns}' - {err}");
|
||||
None
|
||||
}
|
||||
Err(err) => {
|
||||
// we don't filter by owner, but we want to log the error
|
||||
task_log!(worker, "error on iterating groups in ns '{ns}' - {err}");
|
||||
errors.push(err.to_string());
|
||||
None
|
||||
}
|
||||
})
|
||||
.filter(|group| {
|
||||
!(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
|
||||
})
|
||||
.filter(filter_by_owner)
|
||||
.collect::<Vec<BackupGroup>>(),
|
||||
Err(err) => {
|
||||
task_log!(worker, "unable to list backups: {}", err,);
|
||||
|
|
|
@ -40,10 +40,17 @@ pub fn do_verification_job(
|
|||
task_log!(worker, "task triggered by schedule '{}'", event_str);
|
||||
}
|
||||
|
||||
let ns = match verification_job.ns {
|
||||
Some(ref ns) => ns.clone(),
|
||||
None => Default::default(),
|
||||
};
|
||||
|
||||
let verify_worker = crate::backup::VerifyWorker::new(worker.clone(), datastore);
|
||||
let result = verify_all_backups(
|
||||
&verify_worker,
|
||||
worker.upid(),
|
||||
ns,
|
||||
None,
|
||||
None,
|
||||
Some(&move |manifest| {
|
||||
verify_filter(ignore_verified_snapshots, outdated_after, manifest)
|
||||
|
|
Loading…
Reference in New Issue