verify: directly pass manifest to filter function

In order to avoid loading the manifest twice during verify.
This commit is contained in:
Dietmar Maurer 2020-10-29 07:59:19 +01:00
parent 227a39b34b
commit d771a608f5
4 changed files with 26 additions and 24 deletions

View File

@ -579,7 +579,6 @@ pub fn verify(
move |worker| { move |worker| {
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
let filter = |_backup_info: &BackupInfo| { true };
let failed_dirs = if let Some(backup_dir) = backup_dir { let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut res = Vec::new(); let mut res = Vec::new();
@ -590,6 +589,7 @@ pub fn verify(
corrupt_chunks, corrupt_chunks,
worker.clone(), worker.clone(),
worker.upid().clone(), worker.upid().clone(),
None,
)? { )? {
res.push(backup_dir.to_string()); res.push(backup_dir.to_string());
} }
@ -603,11 +603,11 @@ pub fn verify(
None, None,
worker.clone(), worker.clone(),
worker.upid(), worker.upid(),
&filter, None,
)?; )?;
failed_dirs failed_dirs
} else { } else {
verify_all_backups(datastore, worker.clone(), worker.upid(), &filter)? verify_all_backups(datastore, worker.clone(), worker.upid(), None)?
}; };
if failed_dirs.len() > 0 { if failed_dirs.len() > 0 {
worker.log("Failed to verify following snapshots:"); worker.log("Failed to verify following snapshots:");

View File

@ -533,6 +533,7 @@ impl BackupEnvironment {
corrupt_chunks, corrupt_chunks,
worker.clone(), worker.clone(),
worker.upid().clone(), worker.upid().clone(),
None,
snap_lock, snap_lock,
)? { )? {
bail!("verification failed - please check the log for details"); bail!("verification failed - please check the log for details");

View File

@ -14,6 +14,7 @@ use crate::{
BackupGroup, BackupGroup,
BackupDir, BackupDir,
BackupInfo, BackupInfo,
BackupManifest,
IndexFile, IndexFile,
CryptMode, CryptMode,
FileInfo, FileInfo,
@ -284,6 +285,7 @@ pub fn verify_backup_dir(
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>, corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: Arc<dyn TaskState + Send + Sync>, worker: Arc<dyn TaskState + Send + Sync>,
upid: UPID, upid: UPID,
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let snap_lock = lock_dir_noblock_shared( let snap_lock = lock_dir_noblock_shared(
&datastore.snapshot_path(&backup_dir), &datastore.snapshot_path(&backup_dir),
@ -297,6 +299,7 @@ pub fn verify_backup_dir(
corrupt_chunks, corrupt_chunks,
worker, worker,
upid, upid,
filter,
snap_lock snap_lock
), ),
Err(err) => { Err(err) => {
@ -320,6 +323,7 @@ pub fn verify_backup_dir_with_lock(
corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>, corrupt_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
worker: Arc<dyn TaskState + Send + Sync>, worker: Arc<dyn TaskState + Send + Sync>,
upid: UPID, upid: UPID,
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
_snap_lock: Dir, _snap_lock: Dir,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let manifest = match datastore.load_manifest(&backup_dir) { let manifest = match datastore.load_manifest(&backup_dir) {
@ -336,6 +340,18 @@ pub fn verify_backup_dir_with_lock(
} }
}; };
if let Some(filter) = filter {
if filter(&manifest) == false {
task_log!(
worker,
"SKIPPED: verify {}:{} (recently verified)",
datastore.name(),
backup_dir,
);
return Ok(true);
}
}
task_log!(worker, "verify {}:{}", datastore.name(), backup_dir); task_log!(worker, "verify {}:{}", datastore.name(), backup_dir);
let mut error_count = 0; let mut error_count = 0;
@ -412,7 +428,7 @@ pub fn verify_backup_group(
progress: Option<(usize, usize)>, // (done, snapshot_count) progress: Option<(usize, usize)>, // (done, snapshot_count)
worker: Arc<dyn TaskState + Send + Sync>, worker: Arc<dyn TaskState + Send + Sync>,
upid: &UPID, upid: &UPID,
filter: &dyn Fn(&BackupInfo) -> bool, filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<(usize, Vec<String>), Error> { ) -> Result<(usize, Vec<String>), Error> {
let mut errors = Vec::new(); let mut errors = Vec::new();
@ -439,16 +455,6 @@ pub fn verify_backup_group(
for info in list { for info in list {
count += 1; count += 1;
if filter(&info) == false {
task_log!(
worker,
"SKIPPED: verify {}:{} (recently verified)",
datastore.name(),
info.backup_dir,
);
continue;
}
if !verify_backup_dir( if !verify_backup_dir(
datastore.clone(), datastore.clone(),
&info.backup_dir, &info.backup_dir,
@ -456,6 +462,7 @@ pub fn verify_backup_group(
corrupt_chunks.clone(), corrupt_chunks.clone(),
worker.clone(), worker.clone(),
upid.clone(), upid.clone(),
filter,
)? { )? {
errors.push(info.backup_dir.to_string()); errors.push(info.backup_dir.to_string());
} }
@ -486,7 +493,7 @@ pub fn verify_all_backups(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
worker: Arc<dyn TaskState + Send + Sync>, worker: Arc<dyn TaskState + Send + Sync>,
upid: &UPID, upid: &UPID,
filter: &dyn Fn(&BackupInfo) -> bool, filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<Vec<String>, Error> { ) -> Result<Vec<String>, Error> {
let mut errors = Vec::new(); let mut errors = Vec::new();

View File

@ -7,7 +7,7 @@ use crate::{
config::verify::VerificationJobConfig, config::verify::VerificationJobConfig,
backup::{ backup::{
DataStore, DataStore,
BackupInfo, BackupManifest,
verify_all_backups, verify_all_backups,
}, },
task_log, task_log,
@ -23,19 +23,13 @@ pub fn do_verification_job(
let datastore = DataStore::lookup_datastore(&verification_job.store)?; let datastore = DataStore::lookup_datastore(&verification_job.store)?;
let datastore2 = datastore.clone();
let outdated_after = verification_job.outdated_after.clone(); let outdated_after = verification_job.outdated_after.clone();
let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true); let ignore_verified_snapshots = verification_job.ignore_verified.unwrap_or(true);
let filter = move |backup_info: &BackupInfo| { let filter = move |manifest: &BackupManifest| {
if !ignore_verified_snapshots { if !ignore_verified_snapshots {
return true; return true;
} }
let manifest = match datastore2.load_manifest(&backup_info.backup_dir) {
Ok((manifest, _)) => manifest,
Err(_) => return true, // include, so task picks this up as error
};
let raw_verify_state = manifest.unprotected["verify_state"].clone(); let raw_verify_state = manifest.unprotected["verify_state"].clone();
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) { match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
@ -71,7 +65,7 @@ pub fn do_verification_job(
task_log!(worker,"task triggered by schedule '{}'", event_str); task_log!(worker,"task triggered by schedule '{}'", event_str);
} }
let result = verify_all_backups(datastore, worker.clone(), worker.upid(), &filter); let result = verify_all_backups(datastore, worker.clone(), worker.upid(), Some(&filter));
let job_result = match result { let job_result = match result {
Ok(ref errors) if errors.is_empty() => Ok(()), Ok(ref errors) if errors.is_empty() => Ok(()),
Ok(_) => Err(format_err!("verification failed - please check the log for details")), Ok(_) => Err(format_err!("verification failed - please check the log for details")),