Revert "backup: ensure base snapshots are still available after backup"

This reverts commit d53fbe2474.

The HashSet and "register" function are unnecessary, as we already know
which backup is the one we need to check: the last one, stored as
'last_backup'.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
Stefan Reiter 2020-08-11 10:50:40 +02:00 committed by Dietmar Maurer
parent f23f75433f
commit 8b5f72b176
3 changed files with 2 additions and 22 deletions

View File

@ -661,7 +661,6 @@ fn download_previous(
}; };
if let Some(index) = index { if let Some(index) = index {
env.log(format!("register chunks in '{}' from previous backup.", archive_name)); env.log(format!("register chunks in '{}' from previous backup.", archive_name));
env.register_base_snapshot(last_backup.backup_dir.clone());
for pos in 0..index.index_count() { for pos in 0..index.index_count() {
let info = index.chunk_info(pos).unwrap(); let info = index.chunk_info(pos).unwrap();

View File

@ -1,6 +1,6 @@
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::collections::{HashMap, HashSet}; use std::collections::HashMap;
use ::serde::{Serialize}; use ::serde::{Serialize};
use serde_json::{json, Value}; use serde_json::{json, Value};
@ -73,7 +73,6 @@ struct SharedBackupState {
dynamic_writers: HashMap<usize, DynamicWriterState>, dynamic_writers: HashMap<usize, DynamicWriterState>,
fixed_writers: HashMap<usize, FixedWriterState>, fixed_writers: HashMap<usize, FixedWriterState>,
known_chunks: HashMap<[u8;32], u32>, known_chunks: HashMap<[u8;32], u32>,
base_snapshots: HashSet<BackupDir>,
backup_size: u64, // sums up size of all files backup_size: u64, // sums up size of all files
backup_stat: UploadStatistic, backup_stat: UploadStatistic,
} }
@ -127,7 +126,6 @@ impl BackupEnvironment {
dynamic_writers: HashMap::new(), dynamic_writers: HashMap::new(),
fixed_writers: HashMap::new(), fixed_writers: HashMap::new(),
known_chunks: HashMap::new(), known_chunks: HashMap::new(),
base_snapshots: HashSet::new(),
backup_size: 0, backup_size: 0,
backup_stat: UploadStatistic::new(), backup_stat: UploadStatistic::new(),
}; };
@ -146,13 +144,6 @@ impl BackupEnvironment {
} }
} }
/// Register a snapshot as a predecessor of the current backup.
/// It's existance will be ensured on finishing.
pub fn register_base_snapshot(&self, snap: BackupDir) {
let mut state = self.state.lock().unwrap();
state.base_snapshots.insert(snap);
}
/// Register a Chunk with associated length. /// Register a Chunk with associated length.
/// ///
/// We do not fully trust clients, so a client may only use registered /// We do not fully trust clients, so a client may only use registered
@ -489,16 +480,6 @@ impl BackupEnvironment {
self.datastore.store_manifest(&self.backup_dir, manifest) self.datastore.store_manifest(&self.backup_dir, manifest)
.map_err(|err| format_err!("unable to store manifest blob - {}", err))?; .map_err(|err| format_err!("unable to store manifest blob - {}", err))?;
for snap in &state.base_snapshots {
let path = self.datastore.snapshot_path(snap);
if !path.exists() {
bail!(
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
snap
);
}
}
// marks the backup as successful // marks the backup as successful
state.finished = true; state.finished = true;

View File

@ -173,7 +173,7 @@ impl std::str::FromStr for BackupGroup {
/// Uniquely identify a Backup (relative to data store) /// Uniquely identify a Backup (relative to data store)
/// ///
/// We also call this a backup snaphost. /// We also call this a backup snaphost.
#[derive(Debug, Eq, PartialEq, Hash, Clone)] #[derive(Debug, Eq, PartialEq, Clone)]
pub struct BackupDir { pub struct BackupDir {
/// Backup group /// Backup group
group: BackupGroup, group: BackupGroup,