backup: use flock on backup group to forbid multiple backups at once
Multiple backups within one backup group don't really make sense, but break all sorts of guarantees (e.g. a second backup started after a first would use a "known-chunks" list from the previous unfinished one, which would be empty - but using the list from the last finished one is not a fix either, as that one could be deleted or pruned once the first simultaneous backup is finished). Fix it by only allowing one backup per backup group at one time. This is done via a flock on the backup group directory, thus remaining intact even after a reload. Signed-off-by: Stefan Reiter <s.reiter@proxmox.com>
This commit is contained in:
parent
c9756b40d1
commit
95bda2f25d
@ -95,17 +95,17 @@ async move {
|
||||
}
|
||||
|
||||
let last_backup = BackupInfo::last_backup(&datastore.base_path(), &backup_group).unwrap_or(None);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group, backup_time);
|
||||
let backup_dir = BackupDir::new_with_group(backup_group.clone(), backup_time);
|
||||
|
||||
if let Some(last) = &last_backup {
|
||||
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
|
||||
bail!("backup timestamp is older than last backup.");
|
||||
}
|
||||
// fixme: abort if last backup is still running - howto test?
|
||||
// Idea: write upid into a file inside snapshot dir. then test if
|
||||
// it is still running here.
|
||||
}
|
||||
|
||||
// lock backup group to only allow one backup per group at a time
|
||||
let _group_guard = backup_group.lock(&datastore.base_path())?;
|
||||
|
||||
let (path, is_new) = datastore.create_backup_dir(&backup_dir)?;
|
||||
if !is_new { bail!("backup directory already exists."); }
|
||||
|
||||
@ -144,6 +144,9 @@ async move {
|
||||
.map(|_| Err(format_err!("task aborted")));
|
||||
|
||||
async move {
|
||||
// keep flock until task ends
|
||||
let _group_guard = _group_guard;
|
||||
|
||||
let res = select!{
|
||||
req = req_fut => req,
|
||||
abrt = abort_future => abrt,
|
||||
|
@ -3,7 +3,9 @@ use crate::tools;
|
||||
use anyhow::{bail, format_err, Error};
|
||||
use regex::Regex;
|
||||
use std::os::unix::io::RawFd;
|
||||
use nix::dir::Dir;
|
||||
|
||||
use std::time::Duration;
|
||||
use chrono::{DateTime, TimeZone, SecondsFormat, Utc};
|
||||
|
||||
use std::path::{PathBuf, Path};
|
||||
@ -36,6 +38,9 @@ lazy_static!{
|
||||
|
||||
}
|
||||
|
||||
/// Opaque type releasing the corresponding flock when dropped
|
||||
pub type BackupGroupGuard = Dir;
|
||||
|
||||
/// BackupGroup is a directory containing a list of BackupDir
|
||||
#[derive(Debug, Eq, PartialEq, Hash, Clone)]
|
||||
pub struct BackupGroup {
|
||||
@ -130,6 +135,45 @@ impl BackupGroup {
|
||||
Ok(last)
|
||||
}
|
||||
|
||||
pub fn lock(&self, base_path: &Path) -> Result<BackupGroupGuard, Error> {
|
||||
use nix::fcntl::OFlag;
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
let mut path = base_path.to_owned();
|
||||
path.push(self.group_path());
|
||||
|
||||
let mut handle = Dir::open(&path, OFlag::O_RDONLY, Mode::empty())
|
||||
.map_err(|err| {
|
||||
format_err!(
|
||||
"unable to open backup group directory {:?} for locking - {}",
|
||||
self.group_path(),
|
||||
err,
|
||||
)
|
||||
})?;
|
||||
|
||||
// acquire in non-blocking mode, no point in waiting here since other
|
||||
// backups could still take a very long time
|
||||
tools::lock_file(&mut handle, true, Some(Duration::from_nanos(0)))
|
||||
.map_err(|err| {
|
||||
match err.downcast_ref::<nix::Error>() {
|
||||
Some(nix::Error::Sys(nix::errno::Errno::EAGAIN)) => {
|
||||
return format_err!(
|
||||
"unable to acquire lock on backup group {:?} - another backup is already running",
|
||||
self.group_path(),
|
||||
);
|
||||
},
|
||||
_ => ()
|
||||
}
|
||||
format_err!(
|
||||
"unable to acquire lock on backup group {:?} - {}",
|
||||
self.group_path(),
|
||||
err,
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok(handle)
|
||||
}
|
||||
|
||||
pub fn list_groups(base_path: &Path) -> Result<Vec<BackupGroup>, Error> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user