fix #2988: allow verification after finishing a snapshot
To cater to the paranoid, a new datastore-wide setting "verify-new" is introduced. When set, a verify job will be spawned right after a new backup is added to the store (only verifying the added snapshot). Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
parent
bcc2880461
commit
0698f78df5
|
@ -149,7 +149,7 @@ async move {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let (path, is_new, _snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
|
||||||
if !is_new { bail!("backup directory already exists."); }
|
if !is_new { bail!("backup directory already exists."); }
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ async move {
|
||||||
async move {
|
async move {
|
||||||
// keep flock until task ends
|
// keep flock until task ends
|
||||||
let _group_guard = _group_guard;
|
let _group_guard = _group_guard;
|
||||||
let _snap_guard = _snap_guard;
|
let snap_guard = snap_guard;
|
||||||
let _last_guard = _last_guard;
|
let _last_guard = _last_guard;
|
||||||
|
|
||||||
let res = select!{
|
let res = select!{
|
||||||
|
@ -203,14 +203,26 @@ async move {
|
||||||
tools::runtime::block_in_place(|| env.remove_backup())?;
|
tools::runtime::block_in_place(|| env.remove_backup())?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let verify = |env: BackupEnvironment| {
|
||||||
|
if let Err(err) = env.verify_after_complete(snap_guard) {
|
||||||
|
env.log(format!(
|
||||||
|
"backup finished, but starting the requested verify task failed: {}",
|
||||||
|
err
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
match (res, env.ensure_finished()) {
|
match (res, env.ensure_finished()) {
|
||||||
(Ok(_), Ok(())) => {
|
(Ok(_), Ok(())) => {
|
||||||
env.log("backup finished successfully");
|
env.log("backup finished successfully");
|
||||||
|
verify(env);
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Err(err), Ok(())) => {
|
(Err(err), Ok(())) => {
|
||||||
// ignore errors after finish
|
// ignore errors after finish
|
||||||
env.log(format!("backup had errors but finished: {}", err));
|
env.log(format!("backup had errors but finished: {}", err));
|
||||||
|
verify(env);
|
||||||
Ok(())
|
Ok(())
|
||||||
},
|
},
|
||||||
(Ok(_), Err(err)) => {
|
(Ok(_), Err(err)) => {
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use anyhow::{bail, format_err, Error};
|
use anyhow::{bail, format_err, Error};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
|
use nix::dir::Dir;
|
||||||
|
|
||||||
use ::serde::{Serialize};
|
use ::serde::{Serialize};
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
@ -494,6 +495,54 @@ impl BackupEnvironment {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// If verify-new is set on the datastore, this will run a new verify task
|
||||||
|
/// for the backup. If not, this will return and also drop the passed lock
|
||||||
|
/// immediately.
|
||||||
|
pub fn verify_after_complete(&self, snap_lock: Dir) -> Result<(), Error> {
|
||||||
|
self.ensure_finished()?;
|
||||||
|
|
||||||
|
if !self.datastore.verify_new() {
|
||||||
|
// no verify requested, do nothing
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let worker_id = format!("{}_{}_{}_{:08X}",
|
||||||
|
self.datastore.name(),
|
||||||
|
self.backup_dir.group().backup_type(),
|
||||||
|
self.backup_dir.group().backup_id(),
|
||||||
|
self.backup_dir.backup_time());
|
||||||
|
|
||||||
|
let datastore = self.datastore.clone();
|
||||||
|
let backup_dir = self.backup_dir.clone();
|
||||||
|
|
||||||
|
WorkerTask::new_thread(
|
||||||
|
"verify",
|
||||||
|
Some(worker_id),
|
||||||
|
self.user.clone(),
|
||||||
|
false,
|
||||||
|
move |worker| {
|
||||||
|
worker.log("Automatically verifying newly added snapshot");
|
||||||
|
|
||||||
|
let verified_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*16)));
|
||||||
|
let corrupt_chunks = Arc::new(Mutex::new(HashSet::with_capacity(64)));
|
||||||
|
|
||||||
|
if !verify_backup_dir_with_lock(
|
||||||
|
datastore,
|
||||||
|
&backup_dir,
|
||||||
|
verified_chunks,
|
||||||
|
corrupt_chunks,
|
||||||
|
worker.clone(),
|
||||||
|
worker.upid().clone(),
|
||||||
|
snap_lock,
|
||||||
|
)? {
|
||||||
|
bail!("verification failed - please check the log for details");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
).map(|_| ())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
pub fn log<S: AsRef<str>>(&self, msg: S) {
|
||||||
self.worker.log(msg);
|
self.worker.log(msg);
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,6 +38,7 @@ pub struct DataStore {
|
||||||
chunk_store: Arc<ChunkStore>,
|
chunk_store: Arc<ChunkStore>,
|
||||||
gc_mutex: Mutex<bool>,
|
gc_mutex: Mutex<bool>,
|
||||||
last_gc_status: Mutex<GarbageCollectionStatus>,
|
last_gc_status: Mutex<GarbageCollectionStatus>,
|
||||||
|
verify_new: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataStore {
|
impl DataStore {
|
||||||
|
@ -52,7 +53,9 @@ impl DataStore {
|
||||||
|
|
||||||
if let Some(datastore) = map.get(name) {
|
if let Some(datastore) = map.get(name) {
|
||||||
// Compare Config - if changed, create new Datastore object!
|
// Compare Config - if changed, create new Datastore object!
|
||||||
if datastore.chunk_store.base == path {
|
if datastore.chunk_store.base == path &&
|
||||||
|
datastore.verify_new == config.verify_new.unwrap_or(false)
|
||||||
|
{
|
||||||
return Ok(datastore.clone());
|
return Ok(datastore.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -65,7 +68,7 @@ impl DataStore {
|
||||||
Ok(datastore)
|
Ok(datastore)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_with_path(store_name: &str, path: &Path, _config: DataStoreConfig) -> Result<Self, Error> {
|
fn open_with_path(store_name: &str, path: &Path, config: DataStoreConfig) -> Result<Self, Error> {
|
||||||
let chunk_store = ChunkStore::open(store_name, path)?;
|
let chunk_store = ChunkStore::open(store_name, path)?;
|
||||||
|
|
||||||
let gc_status = GarbageCollectionStatus::default();
|
let gc_status = GarbageCollectionStatus::default();
|
||||||
|
@ -74,6 +77,7 @@ impl DataStore {
|
||||||
chunk_store: Arc::new(chunk_store),
|
chunk_store: Arc::new(chunk_store),
|
||||||
gc_mutex: Mutex::new(false),
|
gc_mutex: Mutex::new(false),
|
||||||
last_gc_status: Mutex::new(gc_status),
|
last_gc_status: Mutex::new(gc_status),
|
||||||
|
verify_new: config.verify_new.unwrap_or(false),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -680,4 +684,8 @@ impl DataStore {
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn verify_new(&self) -> bool {
|
||||||
|
self.verify_new
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,6 +72,10 @@ pub const DIR_NAME_SCHEMA: Schema = StringSchema::new("Directory name").schema()
|
||||||
optional: true,
|
optional: true,
|
||||||
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
schema: PRUNE_SCHEMA_KEEP_YEARLY,
|
||||||
},
|
},
|
||||||
|
"verify-new": {
|
||||||
|
optional: true,
|
||||||
|
type: bool,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
)]
|
)]
|
||||||
#[serde(rename_all="kebab-case")]
|
#[serde(rename_all="kebab-case")]
|
||||||
|
@ -100,6 +104,9 @@ pub struct DataStoreConfig {
|
||||||
pub keep_monthly: Option<u64>,
|
pub keep_monthly: Option<u64>,
|
||||||
#[serde(skip_serializing_if="Option::is_none")]
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
pub keep_yearly: Option<u64>,
|
pub keep_yearly: Option<u64>,
|
||||||
|
/// If enabled, all backups will be verified right after completion.
|
||||||
|
#[serde(skip_serializing_if="Option::is_none")]
|
||||||
|
pub verify_new: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn init() -> SectionConfig {
|
fn init() -> SectionConfig {
|
||||||
|
|
Loading…
Reference in New Issue