From 4f09d31085fd147c59c79d42215035baf3a51c32 Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Tue, 1 Sep 2020 13:33:04 +0200 Subject: [PATCH] src/backup/verify.rs: use global hashes (instead of per group) This makes verify more predictable. --- src/api2/admin/datastore.rs | 7 ++++--- src/backup/verify.rs | 28 ++++++++++++++++++++-------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index f0f59ab7..0ff1631f 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -513,16 +513,17 @@ pub fn verify( userid, to_stdout, move |worker| { + let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); + let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); + let failed_dirs = if let Some(backup_dir) = backup_dir { - let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); - let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); let mut res = Vec::new(); if !verify_backup_dir(datastore, &backup_dir, verified_chunks, corrupt_chunks, worker.clone())? { res.push(backup_dir.to_string()); } res } else if let Some(backup_group) = backup_group { - verify_backup_group(datastore, &backup_group, worker.clone())? + verify_backup_group(datastore, &backup_group, verified_chunks, corrupt_chunks, worker.clone())? } else { verify_all_backups(datastore, worker.clone())? }; diff --git a/src/backup/verify.rs b/src/backup/verify.rs index fd103a44..ff2894ac 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -309,7 +309,13 @@ pub fn verify_backup_dir( /// Returns /// - Ok(failed_dirs) where failed_dirs had verification errors /// - Err(_) if task was aborted -pub fn verify_backup_group(datastore: Arc, group: &BackupGroup, worker: Arc) -> Result, Error> { +pub fn verify_backup_group( + datastore: Arc, + group: &BackupGroup, + verified_chunks: Arc>>, + corrupt_chunks: Arc>>, + worker: Arc, +) -> Result, Error> { let mut errors = Vec::new(); let mut list = match group.list_backups(&datastore.base_path()) { @@ -322,12 +328,6 @@ pub fn verify_backup_group(datastore: Arc, group: &BackupGroup, worke worker.log(format!("verify group {}:{}", datastore.name(), group)); - // start with 16384 chunks (up to 65GB) - let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); - - // start with 64 chunks since we assume there are few corrupt ones - let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); - BackupInfo::sort_list(&mut list, false); // newest first for info in list { if !verify_backup_dir(datastore.clone(), &info.backup_dir, verified_chunks.clone(), corrupt_chunks.clone(), worker.clone())?{ @@ -359,10 +359,22 @@ pub fn verify_all_backups(datastore: Arc, worker: Arc) -> list.sort_unstable(); + // start with 16384 chunks (up to 65GB) + let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16))); + + // start with 64 chunks since we assume there are few corrupt ones + let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64))); + worker.log(format!("verify datastore {}", datastore.name())); for group in list { - let mut group_errors = verify_backup_group(datastore.clone(), &group, worker.clone())?; + let mut group_errors = verify_backup_group( + datastore.clone(), + &group, + verified_chunks.clone(), + corrupt_chunks.clone(), + worker.clone(), + )?; errors.append(&mut group_errors); }