2021-04-14 10:53:19 +00:00
|
|
|
use nix::dir::Dir;
|
2020-07-29 11:29:13 +00:00
|
|
|
use std::collections::HashSet;
|
2021-04-14 10:53:19 +00:00
|
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
2020-09-01 09:17:13 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use std::time::Instant;
|
2020-07-29 11:29:13 +00:00
|
|
|
|
2020-08-25 15:30:27 +00:00
|
|
|
use anyhow::{bail, format_err, Error};
|
2020-06-24 11:11:45 +00:00
|
|
|
|
2021-11-23 16:57:00 +00:00
|
|
|
use proxmox_sys::{task_log, WorkerTaskContext};
|
2021-11-19 09:51:41 +00:00
|
|
|
|
2022-04-14 13:05:58 +00:00
|
|
|
use pbs_api_types::{Authid, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID};
|
2022-04-14 12:03:46 +00:00
|
|
|
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
|
2021-08-30 09:49:22 +00:00
|
|
|
use pbs_datastore::index::IndexFile;
|
|
|
|
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
|
2022-04-14 12:03:46 +00:00
|
|
|
use pbs_datastore::{DataBlob, DataStore, StoreProgress};
|
2021-11-23 16:57:00 +00:00
|
|
|
use proxmox_sys::fs::lock_dir_noblock_shared;
|
2021-07-06 11:26:35 +00:00
|
|
|
|
2022-02-14 13:12:39 +00:00
|
|
|
use crate::tools::parallel_handler::ParallelHandler;
|
2020-06-24 11:11:45 +00:00
|
|
|
|
2021-01-25 13:42:58 +00:00
|
|
|
/// A VerifyWorker encapsulates a task worker, datastore and information about which chunks have
|
|
|
|
/// already been verified or detected as corrupt.
|
|
|
|
pub struct VerifyWorker {
|
2021-09-27 06:39:44 +00:00
|
|
|
worker: Arc<dyn WorkerTaskContext>,
|
2021-01-25 13:42:58 +00:00
|
|
|
datastore: Arc<DataStore>,
|
2021-04-14 10:53:19 +00:00
|
|
|
verified_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
|
|
|
corrupt_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
2021-01-25 13:42:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl VerifyWorker {
|
|
|
|
/// Creates a new VerifyWorker for a given task worker and datastore.
|
2021-09-27 06:39:44 +00:00
|
|
|
pub fn new(worker: Arc<dyn WorkerTaskContext>, datastore: Arc<DataStore>) -> Self {
|
2021-01-25 13:42:58 +00:00
|
|
|
Self {
|
|
|
|
worker,
|
|
|
|
datastore,
|
|
|
|
// start with 16k chunks == up to 64G data
|
2021-04-14 10:53:19 +00:00
|
|
|
verified_chunks: Arc::new(Mutex::new(HashSet::with_capacity(16 * 1024))),
|
2021-01-25 13:42:58 +00:00
|
|
|
// start with 64 chunks since we assume there are few corrupt ones
|
|
|
|
corrupt_chunks: Arc::new(Mutex::new(HashSet::with_capacity(64))),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-28 08:26:00 +00:00
|
|
|
fn verify_blob(backup_dir: &BackupDir, info: &FileInfo) -> Result<(), Error> {
|
2022-04-24 17:09:38 +00:00
|
|
|
let blob = backup_dir.load_blob(&info.filename)?;
|
2020-06-24 11:11:45 +00:00
|
|
|
|
2020-07-29 11:29:13 +00:00
|
|
|
let raw_size = blob.raw_size();
|
2020-06-24 11:11:45 +00:00
|
|
|
if raw_size != info.size {
|
|
|
|
bail!("wrong size ({} != {})", info.size, raw_size);
|
|
|
|
}
|
|
|
|
|
2020-07-28 08:23:16 +00:00
|
|
|
let csum = openssl::sha::sha256(blob.raw_data());
|
2020-06-24 11:11:45 +00:00
|
|
|
if csum != info.csum {
|
|
|
|
bail!("wrong index checksum");
|
|
|
|
}
|
|
|
|
|
2020-08-03 12:10:43 +00:00
|
|
|
match blob.crypt_mode()? {
|
|
|
|
CryptMode::Encrypt => Ok(()),
|
|
|
|
CryptMode::None => {
|
|
|
|
// digest already verified above
|
|
|
|
blob.decode(None, None)?;
|
|
|
|
Ok(())
|
2022-04-14 12:03:46 +00:00
|
|
|
}
|
2020-08-03 12:10:43 +00:00
|
|
|
CryptMode::SignOnly => bail!("Invalid CryptMode for blob"),
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-07 15:30:33 +00:00
|
|
|
fn rename_corrupted_chunk(
|
|
|
|
datastore: Arc<DataStore>,
|
2022-04-14 12:03:46 +00:00
|
|
|
digest: &[u8; 32],
|
2021-09-24 05:40:49 +00:00
|
|
|
worker: &dyn WorkerTaskContext,
|
2020-09-07 15:30:33 +00:00
|
|
|
) {
|
|
|
|
let (path, digest_str) = datastore.chunk_path(digest);
|
|
|
|
|
|
|
|
let mut counter = 0;
|
|
|
|
let mut new_path = path.clone();
|
2020-09-08 10:29:53 +00:00
|
|
|
loop {
|
2020-09-07 15:30:33 +00:00
|
|
|
new_path.set_file_name(format!("{}.{}.bad", digest_str, counter));
|
2021-04-14 10:53:19 +00:00
|
|
|
if new_path.exists() && counter < 9 {
|
|
|
|
counter += 1;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2020-09-07 15:30:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
match std::fs::rename(&path, &new_path) {
|
|
|
|
Ok(_) => {
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(worker, "corrupted chunk renamed to {:?}", &new_path);
|
2022-04-14 12:03:46 +00:00
|
|
|
}
|
2020-09-07 15:30:33 +00:00
|
|
|
Err(err) => {
|
|
|
|
match err.kind() {
|
2022-04-14 12:03:46 +00:00
|
|
|
std::io::ErrorKind::NotFound => { /* ignored */ }
|
|
|
|
_ => task_log!(
|
|
|
|
worker,
|
|
|
|
"could not rename corrupted chunk {:?} - {}",
|
|
|
|
&path,
|
|
|
|
err
|
|
|
|
),
|
2020-09-07 15:30:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-06-26 06:14:45 +00:00
|
|
|
fn verify_index_chunks(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker: &VerifyWorker,
|
2020-09-01 09:17:13 +00:00
|
|
|
index: Box<dyn IndexFile + Send>,
|
2020-08-10 11:25:08 +00:00
|
|
|
crypt_mode: CryptMode,
|
2020-06-26 06:14:45 +00:00
|
|
|
) -> Result<(), Error> {
|
2020-10-01 12:48:49 +00:00
|
|
|
let errors = Arc::new(AtomicUsize::new(0));
|
2020-06-26 06:14:45 +00:00
|
|
|
|
2020-09-01 09:17:13 +00:00
|
|
|
let start_time = Instant::now();
|
2020-06-26 06:14:45 +00:00
|
|
|
|
2020-09-01 09:17:13 +00:00
|
|
|
let mut read_bytes = 0;
|
|
|
|
let mut decoded_bytes = 0;
|
2020-08-25 06:52:24 +00:00
|
|
|
|
2021-01-25 13:42:58 +00:00
|
|
|
let worker2 = Arc::clone(&verify_worker.worker);
|
|
|
|
let datastore2 = Arc::clone(&verify_worker.datastore);
|
|
|
|
let corrupt_chunks2 = Arc::clone(&verify_worker.corrupt_chunks);
|
|
|
|
let verified_chunks2 = Arc::clone(&verify_worker.verified_chunks);
|
2020-10-01 12:48:49 +00:00
|
|
|
let errors2 = Arc::clone(&errors);
|
2020-09-26 09:14:37 +00:00
|
|
|
|
|
|
|
let decoder_pool = ParallelHandler::new(
|
2021-04-14 10:53:19 +00:00
|
|
|
"verify chunk decoder",
|
|
|
|
4,
|
|
|
|
move |(chunk, digest, size): (DataBlob, [u8; 32], u64)| {
|
2020-09-26 09:14:37 +00:00
|
|
|
let chunk_crypt_mode = match chunk.crypt_mode() {
|
|
|
|
Err(err) => {
|
|
|
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(worker2, "can't verify chunk, unknown CryptMode - {}", err);
|
2020-09-26 09:14:37 +00:00
|
|
|
errors2.fetch_add(1, Ordering::SeqCst);
|
|
|
|
return Ok(());
|
2022-04-14 12:03:46 +00:00
|
|
|
}
|
2020-09-26 09:14:37 +00:00
|
|
|
Ok(mode) => mode,
|
|
|
|
};
|
|
|
|
|
|
|
|
if chunk_crypt_mode != crypt_mode {
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(
|
|
|
|
worker2,
|
2020-09-26 09:14:37 +00:00
|
|
|
"chunk CryptMode {:?} does not match index CryptMode {:?}",
|
|
|
|
chunk_crypt_mode,
|
|
|
|
crypt_mode
|
2020-10-12 09:46:34 +00:00
|
|
|
);
|
2020-09-26 09:14:37 +00:00
|
|
|
errors2.fetch_add(1, Ordering::SeqCst);
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Err(err) = chunk.verify_unencrypted(size as usize, &digest) {
|
|
|
|
corrupt_chunks2.lock().unwrap().insert(digest);
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(worker2, "{}", err);
|
2020-09-26 09:14:37 +00:00
|
|
|
errors2.fetch_add(1, Ordering::SeqCst);
|
2020-10-12 09:46:34 +00:00
|
|
|
rename_corrupted_chunk(datastore2.clone(), &digest, &worker2);
|
2020-09-26 09:14:37 +00:00
|
|
|
} else {
|
|
|
|
verified_chunks2.lock().unwrap().insert(digest);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
2022-04-14 12:03:46 +00:00
|
|
|
},
|
2020-09-26 09:14:37 +00:00
|
|
|
);
|
|
|
|
|
2021-04-15 10:36:50 +00:00
|
|
|
let skip_chunk = |digest: &[u8; 32]| -> bool {
|
2022-04-14 12:03:46 +00:00
|
|
|
if verify_worker
|
|
|
|
.verified_chunks
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.contains(digest)
|
|
|
|
{
|
2021-04-15 10:36:50 +00:00
|
|
|
true
|
2022-04-14 12:03:46 +00:00
|
|
|
} else if verify_worker
|
|
|
|
.corrupt_chunks
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.contains(digest)
|
|
|
|
{
|
2021-11-23 16:57:00 +00:00
|
|
|
let digest_str = hex::encode(digest);
|
2022-04-14 12:03:46 +00:00
|
|
|
task_log!(
|
|
|
|
verify_worker.worker,
|
|
|
|
"chunk {} was marked as corrupt",
|
|
|
|
digest_str
|
|
|
|
);
|
2021-04-15 10:36:50 +00:00
|
|
|
errors.fetch_add(1, Ordering::SeqCst);
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-06-18 09:29:09 +00:00
|
|
|
let check_abort = |pos: usize| -> Result<(), Error> {
|
2021-04-15 10:56:30 +00:00
|
|
|
if pos & 1023 == 0 {
|
|
|
|
verify_worker.worker.check_abort()?;
|
2021-09-24 09:56:53 +00:00
|
|
|
verify_worker.worker.fail_on_shutdown()?;
|
2021-04-15 10:56:30 +00:00
|
|
|
}
|
2021-06-18 09:29:09 +00:00
|
|
|
Ok(())
|
|
|
|
};
|
2020-09-01 09:17:13 +00:00
|
|
|
|
2021-06-18 09:29:09 +00:00
|
|
|
let chunk_list =
|
|
|
|
verify_worker
|
|
|
|
.datastore
|
|
|
|
.get_chunks_in_order(&index, skip_chunk, check_abort)?;
|
2021-04-13 14:35:36 +00:00
|
|
|
|
|
|
|
for (pos, _) in chunk_list {
|
|
|
|
verify_worker.worker.check_abort()?;
|
2021-09-24 09:56:53 +00:00
|
|
|
verify_worker.worker.fail_on_shutdown()?;
|
2021-04-13 14:35:36 +00:00
|
|
|
|
|
|
|
let info = index.chunk_info(pos).unwrap();
|
|
|
|
|
2021-04-15 08:00:04 +00:00
|
|
|
// we must always recheck this here, the parallel worker below alter it!
|
2021-04-15 10:36:50 +00:00
|
|
|
if skip_chunk(&info.digest) {
|
|
|
|
continue; // already verified or marked corrupt
|
2021-04-15 08:00:04 +00:00
|
|
|
}
|
|
|
|
|
2021-01-25 13:42:58 +00:00
|
|
|
match verify_worker.datastore.load_chunk(&info.digest) {
|
2020-09-26 09:14:37 +00:00
|
|
|
Err(err) => {
|
2022-04-14 12:03:46 +00:00
|
|
|
verify_worker
|
|
|
|
.corrupt_chunks
|
|
|
|
.lock()
|
|
|
|
.unwrap()
|
|
|
|
.insert(info.digest);
|
|
|
|
task_log!(
|
|
|
|
verify_worker.worker,
|
|
|
|
"can't verify chunk, load failed - {}",
|
|
|
|
err
|
|
|
|
);
|
2020-09-26 09:14:37 +00:00
|
|
|
errors.fetch_add(1, Ordering::SeqCst);
|
2021-04-14 10:53:19 +00:00
|
|
|
rename_corrupted_chunk(
|
|
|
|
verify_worker.datastore.clone(),
|
|
|
|
&info.digest,
|
|
|
|
&verify_worker.worker,
|
|
|
|
);
|
2020-09-26 09:14:37 +00:00
|
|
|
}
|
|
|
|
Ok(chunk) => {
|
2021-04-13 14:35:36 +00:00
|
|
|
let size = info.size();
|
2020-09-26 09:14:37 +00:00
|
|
|
read_bytes += chunk.raw_size();
|
|
|
|
decoder_pool.send((chunk, info.digest, size))?;
|
|
|
|
decoded_bytes += size;
|
|
|
|
}
|
2020-07-29 11:29:13 +00:00
|
|
|
}
|
2020-06-26 06:14:45 +00:00
|
|
|
}
|
|
|
|
|
2020-09-26 09:14:37 +00:00
|
|
|
decoder_pool.complete()?;
|
|
|
|
|
2020-09-01 09:17:13 +00:00
|
|
|
let elapsed = start_time.elapsed().as_secs_f64();
|
|
|
|
|
2021-04-14 10:53:19 +00:00
|
|
|
let read_bytes_mib = (read_bytes as f64) / (1024.0 * 1024.0);
|
|
|
|
let decoded_bytes_mib = (decoded_bytes as f64) / (1024.0 * 1024.0);
|
2020-09-01 09:17:13 +00:00
|
|
|
|
2021-04-14 10:53:19 +00:00
|
|
|
let read_speed = read_bytes_mib / elapsed;
|
|
|
|
let decode_speed = decoded_bytes_mib / elapsed;
|
2020-09-01 09:17:13 +00:00
|
|
|
|
|
|
|
let error_count = errors.load(Ordering::SeqCst);
|
|
|
|
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.worker,
|
2020-10-12 09:46:34 +00:00
|
|
|
" verified {:.2}/{:.2} MiB in {:.2} seconds, speed {:.2}/{:.2} MiB/s ({} errors)",
|
|
|
|
read_bytes_mib,
|
|
|
|
decoded_bytes_mib,
|
|
|
|
elapsed,
|
|
|
|
read_speed,
|
|
|
|
decode_speed,
|
|
|
|
error_count,
|
|
|
|
);
|
2020-09-01 09:17:13 +00:00
|
|
|
|
|
|
|
if errors.load(Ordering::SeqCst) > 0 {
|
2020-07-30 07:09:03 +00:00
|
|
|
bail!("chunks could not be verified");
|
|
|
|
}
|
|
|
|
|
2020-06-26 06:14:45 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-07-29 11:29:13 +00:00
|
|
|
fn verify_fixed_index(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker: &VerifyWorker,
|
2020-07-29 11:29:13 +00:00
|
|
|
backup_dir: &BackupDir,
|
|
|
|
info: &FileInfo,
|
|
|
|
) -> Result<(), Error> {
|
2020-06-24 11:11:45 +00:00
|
|
|
let mut path = backup_dir.relative_path();
|
|
|
|
path.push(&info.filename);
|
|
|
|
|
2021-01-25 13:42:58 +00:00
|
|
|
let index = verify_worker.datastore.open_fixed_reader(&path)?;
|
2020-06-24 11:11:45 +00:00
|
|
|
|
|
|
|
let (csum, size) = index.compute_csum();
|
|
|
|
if size != info.size {
|
|
|
|
bail!("wrong size ({} != {})", info.size, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if csum != info.csum {
|
|
|
|
bail!("wrong index checksum");
|
|
|
|
}
|
|
|
|
|
2021-04-14 10:53:19 +00:00
|
|
|
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
|
2020-07-29 11:29:13 +00:00
|
|
|
fn verify_dynamic_index(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker: &VerifyWorker,
|
2020-07-29 11:29:13 +00:00
|
|
|
backup_dir: &BackupDir,
|
|
|
|
info: &FileInfo,
|
|
|
|
) -> Result<(), Error> {
|
2020-06-24 11:11:45 +00:00
|
|
|
let mut path = backup_dir.relative_path();
|
|
|
|
path.push(&info.filename);
|
|
|
|
|
2021-01-25 13:42:58 +00:00
|
|
|
let index = verify_worker.datastore.open_dynamic_reader(&path)?;
|
2020-06-24 11:11:45 +00:00
|
|
|
|
|
|
|
let (csum, size) = index.compute_csum();
|
|
|
|
if size != info.size {
|
|
|
|
bail!("wrong size ({} != {})", info.size, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
if csum != info.csum {
|
|
|
|
bail!("wrong index checksum");
|
|
|
|
}
|
|
|
|
|
2021-04-14 10:53:19 +00:00
|
|
|
verify_index_chunks(verify_worker, Box::new(index), info.chunk_crypt_mode())
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify a single backup snapshot
|
|
|
|
///
|
|
|
|
/// This checks all archives inside a backup snapshot.
|
|
|
|
/// Errors are logged to the worker log.
|
|
|
|
///
|
2020-06-25 10:55:34 +00:00
|
|
|
/// Returns
|
|
|
|
/// - Ok(true) if verify is successful
|
|
|
|
/// - Ok(false) if there were verification errors
|
|
|
|
/// - Err(_) if task was aborted
|
2020-07-29 11:29:13 +00:00
|
|
|
pub fn verify_backup_dir(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker: &VerifyWorker,
|
2020-07-29 11:29:13 +00:00
|
|
|
backup_dir: &BackupDir,
|
2020-10-12 09:46:34 +00:00
|
|
|
upid: UPID,
|
2020-10-29 06:59:19 +00:00
|
|
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
2020-07-29 11:29:13 +00:00
|
|
|
) -> Result<bool, Error> {
|
2020-10-20 08:08:24 +00:00
|
|
|
let snap_lock = lock_dir_noblock_shared(
|
2022-04-19 08:38:46 +00:00
|
|
|
&verify_worker.datastore.snapshot_path(backup_dir.as_ref()),
|
2020-10-14 12:16:33 +00:00
|
|
|
"snapshot",
|
2021-04-14 10:53:19 +00:00
|
|
|
"locked by another operation",
|
|
|
|
);
|
2020-10-20 08:08:24 +00:00
|
|
|
match snap_lock {
|
2021-04-14 10:53:19 +00:00
|
|
|
Ok(snap_lock) => {
|
|
|
|
verify_backup_dir_with_lock(verify_worker, backup_dir, upid, filter, snap_lock)
|
|
|
|
}
|
2020-10-20 08:08:24 +00:00
|
|
|
Err(err) => {
|
|
|
|
task_log!(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.worker,
|
2020-10-20 08:08:24 +00:00
|
|
|
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.datastore.name(),
|
2020-10-20 08:08:24 +00:00
|
|
|
backup_dir,
|
|
|
|
err,
|
|
|
|
);
|
|
|
|
Ok(true)
|
|
|
|
}
|
2020-10-14 12:16:33 +00:00
|
|
|
}
|
2020-10-20 08:08:24 +00:00
|
|
|
}
|
2020-10-14 12:16:33 +00:00
|
|
|
|
2020-10-20 08:08:24 +00:00
|
|
|
/// See verify_backup_dir
|
|
|
|
pub fn verify_backup_dir_with_lock(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker: &VerifyWorker,
|
2020-10-20 08:08:24 +00:00
|
|
|
backup_dir: &BackupDir,
|
|
|
|
upid: UPID,
|
2020-10-29 06:59:19 +00:00
|
|
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
2020-10-20 08:08:24 +00:00
|
|
|
_snap_lock: Dir,
|
|
|
|
) -> Result<bool, Error> {
|
2021-12-30 11:57:37 +00:00
|
|
|
let manifest = match verify_worker.datastore.load_manifest(backup_dir) {
|
2020-07-31 08:25:30 +00:00
|
|
|
Ok((manifest, _)) => manifest,
|
2020-06-24 11:11:45 +00:00
|
|
|
Err(err) => {
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.worker,
|
2020-10-12 09:46:34 +00:00
|
|
|
"verify {}:{} - manifest load error: {}",
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.datastore.name(),
|
2020-10-12 09:46:34 +00:00
|
|
|
backup_dir,
|
|
|
|
err,
|
|
|
|
);
|
2020-06-25 10:55:34 +00:00
|
|
|
return Ok(false);
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-10-29 06:59:19 +00:00
|
|
|
if let Some(filter) = filter {
|
2021-01-18 13:12:27 +00:00
|
|
|
if !filter(&manifest) {
|
2020-10-29 06:59:19 +00:00
|
|
|
task_log!(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.worker,
|
2020-10-29 06:59:19 +00:00
|
|
|
"SKIPPED: verify {}:{} (recently verified)",
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.datastore.name(),
|
2020-10-29 06:59:19 +00:00
|
|
|
backup_dir,
|
|
|
|
);
|
|
|
|
return Ok(true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-14 12:03:46 +00:00
|
|
|
task_log!(
|
|
|
|
verify_worker.worker,
|
|
|
|
"verify {}:{}",
|
|
|
|
verify_worker.datastore.name(),
|
|
|
|
backup_dir
|
|
|
|
);
|
2020-06-24 11:11:45 +00:00
|
|
|
|
|
|
|
let mut error_count = 0;
|
|
|
|
|
2020-09-15 08:19:23 +00:00
|
|
|
let mut verify_result = VerifyState::Ok;
|
2020-06-24 11:11:45 +00:00
|
|
|
for info in manifest.files() {
|
2021-10-08 09:19:37 +00:00
|
|
|
let result = proxmox_lang::try_block!({
|
2021-01-25 13:42:58 +00:00
|
|
|
task_log!(verify_worker.worker, " check {}", info.filename);
|
2020-06-24 11:11:45 +00:00
|
|
|
match archive_type(&info.filename)? {
|
2021-12-30 11:57:37 +00:00
|
|
|
ArchiveType::FixedIndex => verify_fixed_index(verify_worker, backup_dir, info),
|
|
|
|
ArchiveType::DynamicIndex => verify_dynamic_index(verify_worker, backup_dir, info),
|
2022-04-24 17:09:38 +00:00
|
|
|
ArchiveType::Blob => verify_blob(backup_dir, info),
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
});
|
2020-06-25 10:55:34 +00:00
|
|
|
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.worker.check_abort()?;
|
2021-09-24 09:56:53 +00:00
|
|
|
verify_worker.worker.fail_on_shutdown()?;
|
2020-06-25 10:55:34 +00:00
|
|
|
|
2020-06-24 11:11:45 +00:00
|
|
|
if let Err(err) = result {
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.worker,
|
2020-10-12 09:46:34 +00:00
|
|
|
"verify {}:{}/{} failed: {}",
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.datastore.name(),
|
2020-10-12 09:46:34 +00:00
|
|
|
backup_dir,
|
|
|
|
info.filename,
|
|
|
|
err,
|
|
|
|
);
|
2020-06-24 11:11:45 +00:00
|
|
|
error_count += 1;
|
2020-09-15 08:19:23 +00:00
|
|
|
verify_result = VerifyState::Failed;
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-25 15:30:27 +00:00
|
|
|
let verify_state = SnapshotVerifyState {
|
2020-09-15 08:19:23 +00:00
|
|
|
state: verify_result,
|
2020-10-12 09:46:34 +00:00
|
|
|
upid,
|
2020-08-25 15:30:27 +00:00
|
|
|
};
|
2020-10-16 07:31:12 +00:00
|
|
|
let verify_state = serde_json::to_value(verify_state)?;
|
2021-04-14 10:53:19 +00:00
|
|
|
verify_worker
|
|
|
|
.datastore
|
2021-12-30 11:57:37 +00:00
|
|
|
.update_manifest(backup_dir, |manifest| {
|
2021-04-14 10:53:19 +00:00
|
|
|
manifest.unprotected["verify_state"] = verify_state;
|
|
|
|
})
|
|
|
|
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
|
2020-08-25 15:30:27 +00:00
|
|
|
|
2020-06-25 10:55:34 +00:00
|
|
|
Ok(error_count == 0)
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
|
2020-06-25 10:55:34 +00:00
|
|
|
/// Verify all backups inside a backup group
|
|
|
|
///
|
|
|
|
/// Errors are logged to the worker log.
|
|
|
|
///
|
|
|
|
/// Returns
|
2020-09-02 05:43:04 +00:00
|
|
|
/// - Ok((count, failed_dirs)) where failed_dirs had verification errors
|
2020-06-25 10:55:34 +00:00
|
|
|
/// - Err(_) if task was aborted
|
2020-10-28 12:19:21 +00:00
|
|
|
pub fn verify_backup_group(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker: &VerifyWorker,
|
2020-09-01 11:33:04 +00:00
|
|
|
group: &BackupGroup,
|
2020-11-30 15:27:21 +00:00
|
|
|
progress: &mut StoreProgress,
|
2020-10-12 09:46:34 +00:00
|
|
|
upid: &UPID,
|
2020-10-29 06:59:19 +00:00
|
|
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
2020-11-30 15:27:21 +00:00
|
|
|
) -> Result<Vec<String>, Error> {
|
2020-07-30 07:09:05 +00:00
|
|
|
let mut errors = Vec::new();
|
2022-04-20 13:30:04 +00:00
|
|
|
let mut list = match group.list_backups() {
|
2020-06-24 11:11:45 +00:00
|
|
|
Ok(list) => list,
|
|
|
|
Err(err) => {
|
2020-10-12 09:46:34 +00:00
|
|
|
task_log!(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.worker,
|
2020-10-12 09:46:34 +00:00
|
|
|
"verify group {}:{} - unable to list backups: {}",
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker.datastore.name(),
|
2020-10-12 09:46:34 +00:00
|
|
|
group,
|
|
|
|
err,
|
|
|
|
);
|
2020-11-30 15:27:21 +00:00
|
|
|
return Ok(errors);
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2020-11-30 15:27:21 +00:00
|
|
|
let snapshot_count = list.len();
|
2021-04-14 10:53:19 +00:00
|
|
|
task_log!(
|
|
|
|
verify_worker.worker,
|
|
|
|
"verify group {}:{} ({} snapshots)",
|
|
|
|
verify_worker.datastore.name(),
|
|
|
|
group,
|
|
|
|
snapshot_count
|
|
|
|
);
|
2020-06-24 11:11:45 +00:00
|
|
|
|
2020-11-30 15:27:21 +00:00
|
|
|
progress.group_snapshots = snapshot_count as u64;
|
2020-09-02 05:43:04 +00:00
|
|
|
|
2020-06-24 11:11:45 +00:00
|
|
|
BackupInfo::sort_list(&mut list, false); // newest first
|
2020-11-30 15:27:21 +00:00
|
|
|
for (pos, info) in list.into_iter().enumerate() {
|
2021-04-14 10:53:19 +00:00
|
|
|
if !verify_backup_dir(verify_worker, &info.backup_dir, upid.clone(), filter)? {
|
2020-07-30 07:09:05 +00:00
|
|
|
errors.push(info.backup_dir.to_string());
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
2020-11-30 15:27:21 +00:00
|
|
|
progress.done_snapshots = pos as u64 + 1;
|
2021-04-14 10:53:19 +00:00
|
|
|
task_log!(verify_worker.worker, "percentage done: {}", progress);
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 15:27:21 +00:00
|
|
|
Ok(errors)
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
|
2020-10-30 11:36:39 +00:00
|
|
|
/// Verify all (owned) backups inside a datastore
|
2020-06-25 10:55:34 +00:00
|
|
|
///
|
|
|
|
/// Errors are logged to the worker log.
|
|
|
|
///
|
|
|
|
/// Returns
|
2020-07-30 07:09:05 +00:00
|
|
|
/// - Ok(failed_dirs) where failed_dirs had verification errors
|
2020-06-25 10:55:34 +00:00
|
|
|
/// - Err(_) if task was aborted
|
2020-10-28 12:19:21 +00:00
|
|
|
pub fn verify_all_backups(
|
2021-01-25 13:42:58 +00:00
|
|
|
verify_worker: &VerifyWorker,
|
2020-10-12 09:46:34 +00:00
|
|
|
upid: &UPID,
|
2020-10-30 11:36:39 +00:00
|
|
|
owner: Option<Authid>,
|
2020-10-29 06:59:19 +00:00
|
|
|
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
|
2020-10-12 09:46:34 +00:00
|
|
|
) -> Result<Vec<String>, Error> {
|
2020-07-30 07:09:05 +00:00
|
|
|
let mut errors = Vec::new();
|
2021-01-25 13:42:58 +00:00
|
|
|
let worker = Arc::clone(&verify_worker.worker);
|
2020-06-24 11:11:45 +00:00
|
|
|
|
2022-04-14 12:03:46 +00:00
|
|
|
task_log!(
|
|
|
|
worker,
|
|
|
|
"verify datastore {}",
|
|
|
|
verify_worker.datastore.name()
|
|
|
|
);
|
2020-11-10 12:52:50 +00:00
|
|
|
|
2020-10-30 11:36:39 +00:00
|
|
|
if let Some(owner) = &owner {
|
2020-11-10 12:52:50 +00:00
|
|
|
task_log!(worker, "limiting to backups owned by {}", owner);
|
2020-10-30 11:36:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
let filter_by_owner = |group: &BackupGroup| {
|
2022-04-19 08:38:46 +00:00
|
|
|
match (verify_worker.datastore.get_owner(group.as_ref()), &owner) {
|
2020-11-10 12:52:49 +00:00
|
|
|
(Ok(ref group_owner), Some(owner)) => {
|
|
|
|
group_owner == owner
|
|
|
|
|| (group_owner.is_token()
|
|
|
|
&& !owner.is_token()
|
|
|
|
&& group_owner.user() == owner.user())
|
2022-04-14 12:03:46 +00:00
|
|
|
}
|
2020-11-10 12:52:49 +00:00
|
|
|
(Ok(_), None) => true,
|
|
|
|
(Err(err), Some(_)) => {
|
|
|
|
// intentionally not in task log
|
|
|
|
// the task user might not be allowed to see this group!
|
|
|
|
println!("Failed to get owner of group '{}' - {}", group, err);
|
|
|
|
false
|
2022-04-14 12:03:46 +00:00
|
|
|
}
|
2020-11-10 12:52:49 +00:00
|
|
|
(Err(err), None) => {
|
|
|
|
// we don't filter by owner, but we want to log the error
|
2022-04-19 08:38:46 +00:00
|
|
|
task_log!(worker, "Failed to get owner of group '{} - {}", group, err);
|
2020-11-10 12:52:49 +00:00
|
|
|
errors.push(group.to_string());
|
|
|
|
true
|
2022-04-14 12:03:46 +00:00
|
|
|
}
|
2020-10-30 11:36:39 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-04-21 13:04:59 +00:00
|
|
|
// FIXME: This should probably simply enable recursion (or the call have a recursion parameter)
|
|
|
|
let mut list = match verify_worker
|
|
|
|
.datastore
|
|
|
|
.iter_backup_groups_ok(Default::default())
|
|
|
|
{
|
2020-09-10 06:54:29 +00:00
|
|
|
Ok(list) => list
|
2022-04-14 13:05:58 +00:00
|
|
|
.filter(|group| {
|
|
|
|
!(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
|
|
|
|
})
|
2020-10-30 11:36:39 +00:00
|
|
|
.filter(filter_by_owner)
|
2020-09-10 06:54:29 +00:00
|
|
|
.collect::<Vec<BackupGroup>>(),
|
2020-06-24 11:11:45 +00:00
|
|
|
Err(err) => {
|
2021-04-14 10:53:19 +00:00
|
|
|
task_log!(worker, "unable to list backups: {}", err,);
|
2020-07-30 07:09:05 +00:00
|
|
|
return Ok(errors);
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-04-20 10:20:28 +00:00
|
|
|
list.sort_unstable_by(|a, b| a.group().cmp(b.group()));
|
2020-08-25 06:38:47 +00:00
|
|
|
|
2020-11-30 15:27:21 +00:00
|
|
|
let group_count = list.len();
|
|
|
|
task_log!(worker, "found {} groups", group_count);
|
2020-06-24 11:11:45 +00:00
|
|
|
|
2020-11-30 15:27:21 +00:00
|
|
|
let mut progress = StoreProgress::new(group_count as u64);
|
|
|
|
|
|
|
|
for (pos, group) in list.into_iter().enumerate() {
|
|
|
|
progress.done_groups = pos as u64;
|
|
|
|
progress.done_snapshots = 0;
|
|
|
|
progress.group_snapshots = 0;
|
|
|
|
|
2021-04-14 10:53:19 +00:00
|
|
|
let mut group_errors =
|
|
|
|
verify_backup_group(verify_worker, &group, &mut progress, upid, filter)?;
|
2020-07-30 07:09:05 +00:00
|
|
|
errors.append(&mut group_errors);
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
|
|
|
|
2020-07-30 07:09:05 +00:00
|
|
|
Ok(errors)
|
2020-06-24 11:11:45 +00:00
|
|
|
}
|
2021-06-22 07:56:18 +00:00
|
|
|
|
|
|
|
/// Filter for the verification of snapshots
|
|
|
|
pub fn verify_filter(
|
|
|
|
ignore_verified_snapshots: bool,
|
|
|
|
outdated_after: Option<i64>,
|
|
|
|
manifest: &BackupManifest,
|
|
|
|
) -> bool {
|
|
|
|
if !ignore_verified_snapshots {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
let raw_verify_state = manifest.unprotected["verify_state"].clone();
|
|
|
|
match serde_json::from_value::<SnapshotVerifyState>(raw_verify_state) {
|
|
|
|
Err(_) => true, // no last verification, always include
|
|
|
|
Ok(last_verify) => {
|
|
|
|
match outdated_after {
|
|
|
|
None => false, // never re-verify if ignored and no max age
|
|
|
|
Some(max_age) => {
|
2021-10-08 09:19:37 +00:00
|
|
|
let now = proxmox_time::epoch_i64();
|
2021-06-22 07:56:18 +00:00
|
|
|
let days_since_last_verify = (now - last_verify.upid.starttime) / 86400;
|
|
|
|
|
2022-01-27 14:13:19 +00:00
|
|
|
max_age == 0 || days_since_last_verify > max_age
|
2021-06-22 07:56:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-07-06 11:26:35 +00:00
|
|
|
}
|