datastore: prevent deletion of snaps in use as "previous backup"

To prevent a race with a background GC operation, do not allow deletion
of backups who's index might currently be referenced as the "known chunk
list" for successive backups. Otherwise the GC could delete chunks it
thinks are no longer referenced, while at the same time telling the
client that it doesn't need to upload said chunks because they already
exist.

Additionally, prevent deletion of whole backup groups, if there are
snapshots contained that appear to be currently in-progress. This is
currently unlikely to trigger, as that function is only used for sync
jobs, but it's a useful safeguard either way.

Deleting a single snapshot has a 'force' parameter, which is necessary
to allow deleting incomplete snapshots on an aborted backup. Pruning
also sets force=true to avoid the check, since it calculates which
snapshots to keep on its own.

To avoid code duplication, the is_finished method is factored out.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
Stefan Reiter 2020-07-29 14:33:11 +02:00 committed by Dietmar Maurer
parent 8cd29fb24a
commit c9756b40d1
7 changed files with 54 additions and 10 deletions

View File

@ -272,7 +272,7 @@ fn delete_snapshot(
let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0; let allowed = (user_privs & PRIV_DATASTORE_MODIFY) != 0;
if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; } if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; }
datastore.remove_backup_dir(&snapshot)?; datastore.remove_backup_dir(&snapshot, false)?;
Ok(Value::Null) Ok(Value::Null)
} }
@ -661,7 +661,7 @@ fn prune(
})); }));
if !(dry_run || keep) { if !(dry_run || keep) {
datastore.remove_backup_dir(&info.backup_dir)?; datastore.remove_backup_dir(&info.backup_dir, true)?;
} }
} }

View File

@ -479,7 +479,7 @@ impl BackupEnvironment {
let mut state = self.state.lock().unwrap(); let mut state = self.state.lock().unwrap();
state.finished = true; state.finished = true;
self.datastore.remove_backup_dir(&self.backup_dir)?; self.datastore.remove_backup_dir(&self.backup_dir, true)?;
Ok(()) Ok(())
} }

View File

@ -173,7 +173,7 @@ impl std::str::FromStr for BackupGroup {
/// Uniquely identify a Backup (relative to data store) /// Uniquely identify a Backup (relative to data store)
/// ///
/// We also call this a backup snaphost. /// We also call this a backup snaphost.
#[derive(Debug, Clone)] #[derive(Debug, Eq, PartialEq, Clone)]
pub struct BackupDir { pub struct BackupDir {
/// Backup group /// Backup group
group: BackupGroup, group: BackupGroup,
@ -317,6 +317,11 @@ impl BackupInfo {
})?; })?;
Ok(list) Ok(list)
} }
pub fn is_finished(&self) -> bool {
// backup is considered unfinished if there is no manifest
self.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME)
}
} }
fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> { fn list_backup_files<P: ?Sized + nix::NixPath>(dirfd: RawFd, path: &P) -> Result<Vec<String>, Error> {

View File

@ -8,7 +8,7 @@ use anyhow::{bail, format_err, Error};
use lazy_static::lazy_static; use lazy_static::lazy_static;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use super::backup_info::{BackupGroup, BackupDir}; use super::backup_info::{BackupGroup, BackupDir, BackupInfo};
use super::chunk_store::ChunkStore; use super::chunk_store::ChunkStore;
use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter}; use super::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
use super::fixed_index::{FixedIndexReader, FixedIndexWriter}; use super::fixed_index::{FixedIndexReader, FixedIndexWriter};
@ -197,6 +197,20 @@ impl DataStore {
let full_path = self.group_path(backup_group); let full_path = self.group_path(backup_group);
let mut snap_list = backup_group.list_backups(&self.base_path())?;
BackupInfo::sort_list(&mut snap_list, false);
for snap in snap_list {
if snap.is_finished() {
break;
} else {
bail!(
"cannot remove backup group {:?}, contains potentially running backup: {}",
full_path,
snap.backup_dir
);
}
}
log::info!("removing backup group {:?}", full_path); log::info!("removing backup group {:?}", full_path);
std::fs::remove_dir_all(&full_path) std::fs::remove_dir_all(&full_path)
.map_err(|err| { .map_err(|err| {
@ -211,10 +225,35 @@ impl DataStore {
} }
/// Remove a backup directory including all content /// Remove a backup directory including all content
pub fn remove_backup_dir(&self, backup_dir: &BackupDir) -> Result<(), Error> { pub fn remove_backup_dir(&self, backup_dir: &BackupDir, force: bool) -> Result<(), Error> {
let full_path = self.snapshot_path(backup_dir); let full_path = self.snapshot_path(backup_dir);
if !force {
let mut snap_list = backup_dir.group().list_backups(&self.base_path())?;
BackupInfo::sort_list(&mut snap_list, false);
let mut prev_snap_finished = true;
for snap in snap_list {
let cur_snap_finished = snap.is_finished();
if &snap.backup_dir == backup_dir {
if !cur_snap_finished {
bail!(
"cannot remove currently running snapshot: {:?}",
backup_dir
);
}
if !prev_snap_finished {
bail!(
"cannot remove snapshot {:?}, successor is currently running and potentially based on it",
backup_dir
);
}
break;
}
prev_snap_finished = cur_snap_finished;
}
}
log::info!("removing backup snapshot {:?}", full_path); log::info!("removing backup snapshot {:?}", full_path);
std::fs::remove_dir_all(&full_path) std::fs::remove_dir_all(&full_path)
.map_err(|err| { .map_err(|err| {

View File

@ -53,7 +53,7 @@ fn remove_incomplete_snapshots(
let mut keep_unfinished = true; let mut keep_unfinished = true;
for info in list.iter() { for info in list.iter() {
// backup is considered unfinished if there is no manifest // backup is considered unfinished if there is no manifest
if info.files.iter().any(|name| name == super::MANIFEST_BLOB_NAME) { if info.is_finished() {
// There is a new finished backup, so there is no need // There is a new finished backup, so there is no need
// to keep older unfinished backups. // to keep older unfinished backups.
keep_unfinished = false; keep_unfinished = false;

View File

@ -455,7 +455,7 @@ async fn schedule_datastore_prune() {
BackupDir::backup_time_to_string(info.backup_dir.backup_time()))); BackupDir::backup_time_to_string(info.backup_dir.backup_time())));
if !keep { if !keep {
datastore.remove_backup_dir(&info.backup_dir)?; datastore.remove_backup_dir(&info.backup_dir, true)?;
} }
} }
} }

View File

@ -277,7 +277,7 @@ pub async fn pull_snapshot_from(
worker.log(format!("sync snapshot {:?}", snapshot.relative_path())); worker.log(format!("sync snapshot {:?}", snapshot.relative_path()));
if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await { if let Err(err) = pull_snapshot(worker, reader, tgt_store.clone(), &snapshot).await {
if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot) { if let Err(cleanup_err) = tgt_store.remove_backup_dir(&snapshot, true) {
worker.log(format!("cleanup error - {}", cleanup_err)); worker.log(format!("cleanup error - {}", cleanup_err));
} }
return Err(err); return Err(err);
@ -362,7 +362,7 @@ pub async fn pull_group(
let backup_time = info.backup_dir.backup_time(); let backup_time = info.backup_dir.backup_time();
if remote_snapshots.contains(&backup_time) { continue; } if remote_snapshots.contains(&backup_time) { continue; }
worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path())); worker.log(format!("delete vanished snapshot {:?}", info.backup_dir.relative_path()));
tgt_store.remove_backup_dir(&info.backup_dir)?; tgt_store.remove_backup_dir(&info.backup_dir, false)?;
} }
} }