backup: ensure base snapshots are still available after backup
This should never trigger if everything else works correctly, but it is still a very cheap check to avoid wrongly marking a backup as "OK" when in fact some chunks might be missing. Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
parent
95bda2f25d
commit
d53fbe2474
|
@ -660,6 +660,7 @@ fn download_previous(
|
||||||
};
|
};
|
||||||
if let Some(index) = index {
|
if let Some(index) = index {
|
||||||
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
env.log(format!("register chunks in '{}' from previous backup.", archive_name));
|
||||||
|
env.register_base_snapshot(last_backup.backup_dir.clone());
|
||||||
|
|
||||||
for pos in 0..index.index_count() {
|
for pos in 0..index.index_count() {
|
||||||
let info = index.chunk_info(pos).unwrap();
|
let info = index.chunk_info(pos).unwrap();
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use anyhow::{bail, Error};
|
use anyhow::{bail, Error};
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
use serde_json::{json, Value};
|
use serde_json::{json, Value};
|
||||||
|
|
||||||
|
@ -57,6 +57,7 @@ struct SharedBackupState {
|
||||||
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
dynamic_writers: HashMap<usize, DynamicWriterState>,
|
||||||
fixed_writers: HashMap<usize, FixedWriterState>,
|
fixed_writers: HashMap<usize, FixedWriterState>,
|
||||||
known_chunks: HashMap<[u8;32], u32>,
|
known_chunks: HashMap<[u8;32], u32>,
|
||||||
|
base_snapshots: HashSet<BackupDir>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SharedBackupState {
|
impl SharedBackupState {
|
||||||
|
@ -108,6 +109,7 @@ impl BackupEnvironment {
|
||||||
dynamic_writers: HashMap::new(),
|
dynamic_writers: HashMap::new(),
|
||||||
fixed_writers: HashMap::new(),
|
fixed_writers: HashMap::new(),
|
||||||
known_chunks: HashMap::new(),
|
known_chunks: HashMap::new(),
|
||||||
|
base_snapshots: HashSet::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
@ -124,6 +126,13 @@ impl BackupEnvironment {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Register a snapshot as a predecessor of the current backup.
|
||||||
|
/// It's existance will be ensured on finishing.
|
||||||
|
pub fn register_base_snapshot(&self, snap: BackupDir) {
|
||||||
|
let mut state = self.state.lock().unwrap();
|
||||||
|
state.base_snapshots.insert(snap);
|
||||||
|
}
|
||||||
|
|
||||||
/// Register a Chunk with associated length.
|
/// Register a Chunk with associated length.
|
||||||
///
|
///
|
||||||
/// We do not fully trust clients, so a client may only use registered
|
/// We do not fully trust clients, so a client may only use registered
|
||||||
|
@ -445,6 +454,16 @@ impl BackupEnvironment {
|
||||||
bail!("backup does not contain valid files (file count == 0)");
|
bail!("backup does not contain valid files (file count == 0)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for snap in &state.base_snapshots {
|
||||||
|
let path = self.datastore.snapshot_path(snap);
|
||||||
|
if !path.exists() {
|
||||||
|
bail!(
|
||||||
|
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
||||||
|
snap
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
state.finished = true;
|
state.finished = true;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -217,7 +217,7 @@ impl std::str::FromStr for BackupGroup {
|
||||||
/// Uniquely identify a Backup (relative to data store)
|
/// Uniquely identify a Backup (relative to data store)
|
||||||
///
|
///
|
||||||
/// We also call this a backup snaphost.
|
/// We also call this a backup snaphost.
|
||||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||||
pub struct BackupDir {
|
pub struct BackupDir {
|
||||||
/// Backup group
|
/// Backup group
|
||||||
group: BackupGroup,
|
group: BackupGroup,
|
||||||
|
|
Loading…
Reference in New Issue