From 26af61debc2e1deb653c12b389cb0faf607a12dd Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Thu, 15 Apr 2021 10:00:04 +0200 Subject: [PATCH] backup verify: re-check if we can skip a chunk in the actual verify loop Fixes a non-negligible performance regression from commit 7f394c807bca3f451e77b6a1cf7de7c6e7df5f92 While we skip known-verified chunks in the stat-and-inode-sort loop, those are only the ones from previous indexes. If there's a repeated chunk in one index they would get re-verified more often as required. So, add the check again explicitly to the read+verify loop. Signed-off-by: Thomas Lamprecht --- src/backup/verify.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/backup/verify.rs b/src/backup/verify.rs index ed959813..1192821e 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -214,6 +214,12 @@ fn verify_index_chunks( let info = index.chunk_info(pos).unwrap(); + // we must always recheck this here, the parallel worker below alter it! + // Else we miss skipping repeated chunks from the same index, and re-verify them all + if verify_worker.verified_chunks.lock().unwrap().contains(&info.digest) { + continue; // already verified + } + match verify_worker.datastore.load_chunk(&info.digest) { Err(err) => { verify_worker.corrupt_chunks.lock().unwrap().insert(info.digest);