verify: keep track and log which dirs failed the verification

so that we can print a list at the end of the worker which backups
are corrupt.

this is useful if there are many snapshots and some in between had an
error. Before this patch, the task log simply says to 'look in the logs'
but if the log is very long it makes it hard to see what exactly failed.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
This commit is contained in:
Dominik Csapak 2020-07-30 09:09:05 +02:00 committed by Dietmar Maurer
parent d8594d87f1
commit adfdc36936
2 changed files with 25 additions and 20 deletions

View File

@ -474,16 +474,24 @@ pub fn verify(
let upid_str = WorkerTask::new_thread( let upid_str = WorkerTask::new_thread(
"verify", Some(worker_id.clone()), &username, to_stdout, move |worker| "verify", Some(worker_id.clone()), &username, to_stdout, move |worker|
{ {
let success = if let Some(backup_dir) = backup_dir { let failed_dirs = if let Some(backup_dir) = backup_dir {
let mut verified_chunks = HashSet::with_capacity(1024*16); let mut verified_chunks = HashSet::with_capacity(1024*16);
let mut corrupt_chunks = HashSet::with_capacity(64); let mut corrupt_chunks = HashSet::with_capacity(64);
verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? let mut res = Vec::new();
if !verify_backup_dir(&datastore, &backup_dir, &mut verified_chunks, &mut corrupt_chunks, &worker)? {
res.push(backup_dir.to_string());
}
res
} else if let Some(backup_group) = backup_group { } else if let Some(backup_group) = backup_group {
verify_backup_group(&datastore, &backup_group, &worker)? verify_backup_group(&datastore, &backup_group, &worker)?
} else { } else {
verify_all_backups(&datastore, &worker)? verify_all_backups(&datastore, &worker)?
}; };
if !success { if failed_dirs.len() > 0 {
worker.log("Failed to verify following snapshots:");
for dir in failed_dirs {
worker.log(format!("\t{}", dir));
}
bail!("verfication failed - please check the log for details"); bail!("verfication failed - please check the log for details");
} }
Ok(()) Ok(())

View File

@ -198,34 +198,32 @@ pub fn verify_backup_dir(
/// Errors are logged to the worker log. /// Errors are logged to the worker log.
/// ///
/// Returns /// Returns
/// - Ok(true) if verify is successful /// - Ok(failed_dirs) where failed_dirs had verification errors
/// - Ok(false) if there were verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<bool, Error> { pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &WorkerTask) -> Result<Vec<String>, Error> {
let mut errors = Vec::new();
let mut list = match group.list_backups(&datastore.base_path()) { let mut list = match group.list_backups(&datastore.base_path()) {
Ok(list) => list, Ok(list) => list,
Err(err) => { Err(err) => {
worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err)); worker.log(format!("verify group {}:{} - unable to list backups: {}", datastore.name(), group, err));
return Ok(false); return Ok(errors);
} }
}; };
worker.log(format!("verify group {}:{}", datastore.name(), group)); worker.log(format!("verify group {}:{}", datastore.name(), group));
let mut error_count = 0;
let mut verified_chunks = HashSet::with_capacity(1024*16); // start with 16384 chunks (up to 65GB) let mut verified_chunks = HashSet::with_capacity(1024*16); // start with 16384 chunks (up to 65GB)
let mut corrupt_chunks = HashSet::with_capacity(64); // start with 64 chunks since we assume there are few corrupt ones let mut corrupt_chunks = HashSet::with_capacity(64); // start with 64 chunks since we assume there are few corrupt ones
BackupInfo::sort_list(&mut list, false); // newest first BackupInfo::sort_list(&mut list, false); // newest first
for info in list { for info in list {
if !verify_backup_dir(datastore, &info.backup_dir, &mut verified_chunks, &mut corrupt_chunks, worker)?{ if !verify_backup_dir(datastore, &info.backup_dir, &mut verified_chunks, &mut corrupt_chunks, worker)?{
error_count += 1; errors.push(info.backup_dir.to_string());
} }
} }
Ok(error_count == 0) Ok(errors)
} }
/// Verify all backups inside a datastore /// Verify all backups inside a datastore
@ -233,27 +231,26 @@ pub fn verify_backup_group(datastore: &DataStore, group: &BackupGroup, worker: &
/// Errors are logged to the worker log. /// Errors are logged to the worker log.
/// ///
/// Returns /// Returns
/// - Ok(true) if verify is successful /// - Ok(failed_dirs) where failed_dirs had verification errors
/// - Ok(false) if there were verification errors
/// - Err(_) if task was aborted /// - Err(_) if task was aborted
pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<bool, Error> { pub fn verify_all_backups(datastore: &DataStore, worker: &WorkerTask) -> Result<Vec<String>, Error> {
let mut errors = Vec::new();
let list = match BackupGroup::list_groups(&datastore.base_path()) { let list = match BackupGroup::list_groups(&datastore.base_path()) {
Ok(list) => list, Ok(list) => list,
Err(err) => { Err(err) => {
worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err)); worker.log(format!("verify datastore {} - unable to list backups: {}", datastore.name(), err));
return Ok(false); return Ok(errors);
} }
}; };
worker.log(format!("verify datastore {}", datastore.name())); worker.log(format!("verify datastore {}", datastore.name()));
let mut error_count = 0;
for group in list { for group in list {
if !verify_backup_group(datastore, &group, worker)? { let mut group_errors = verify_backup_group(datastore, &group, worker)?;
error_count += 1; errors.append(&mut group_errors);
}
} }
Ok(error_count == 0) Ok(errors)
} }