backup: use flock on backup group to forbid multiple backups at once

Multiple backups within one backup group don't really make sense, but
break all sorts of guarantees (e.g. a second backup started after a
first would use a "known-chunks" list from the previous unfinished one,
which would be empty - but using the list from the last finished one is
not a fix either, as that one could be deleted or pruned once the first
simultaneous backup is finished).

Fix it by only allowing one backup per backup group at one time. This is
done via a flock on the backup group directory, thus remaining intact
even after a reload.

Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
Stefan Reiter
2020-07-29 14:33:13 +02:00
committed by Dietmar Maurer
parent c9756b40d1
commit 95bda2f25d
2 changed files with 51 additions and 4 deletions

View File

@ -95,17 +95,17 @@ async move {
}
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
bail!("backup timestamp is older than last backup.");
}
// fixme: abort if last backup is still running - howto test?
// Idea: write upid into a file inside snapshot dir. then test if
// it is still running here.
}
// lock backup group to only allow one backup per group at a time
let _group_guard = backup_group.lock(&datastore.base_path())?;
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
if !is_new { bail!("backup directory already exists."); }
@ -144,6 +144,9 @@ async move {
.map(|_| Err(format_err!("task aborted")));
async move {
// keep flock until task ends
let _group_guard = _group_guard;
let res = select!{
req = req_fut => req,
abrt = abort_future => abrt,