2022-04-14 11:27:53 +00:00
|
|
|
use std::collections::{HashMap, HashSet};
|
|
|
|
use std::convert::TryFrom;
|
2020-04-28 08:11:15 +00:00
|
|
|
use std::io::{self, Write};
|
2019-08-13 10:59:03 +00:00
|
|
|
use std::path::{Path, PathBuf};
|
2020-11-30 15:22:18 +00:00
|
|
|
use std::str::FromStr;
|
2022-04-14 11:27:53 +00:00
|
|
|
use std::sync::{Arc, Mutex};
|
2019-08-13 10:59:03 +00:00
|
|
|
|
2020-04-17 12:11:25 +00:00
|
|
|
use anyhow::{bail, format_err, Error};
|
2018-12-22 16:37:25 +00:00
|
|
|
use lazy_static::lazy_static;
|
2020-07-31 05:19:14 +00:00
|
|
|
|
2022-02-22 14:57:49 +00:00
|
|
|
use proxmox_schema::ApiType;
|
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
use proxmox_sys::fs::{file_read_optional_string, replace_file, CreateOptions};
|
|
|
|
use proxmox_sys::fs::{lock_dir_noblock, DirLockGuard};
|
2021-11-19 09:51:41 +00:00
|
|
|
use proxmox_sys::process_locker::ProcessLockSharedGuard;
|
2021-11-23 16:57:00 +00:00
|
|
|
use proxmox_sys::WorkerTaskContext;
|
2021-11-19 09:51:41 +00:00
|
|
|
use proxmox_sys::{task_log, task_warn};
|
2018-12-17 12:00:39 +00:00
|
|
|
|
2022-02-22 14:57:49 +00:00
|
|
|
use pbs_api_types::{
|
2022-04-14 13:05:58 +00:00
|
|
|
Authid, BackupType, ChunkOrder, DataStoreConfig, DatastoreTuning, GarbageCollectionStatus,
|
2022-04-20 11:24:57 +00:00
|
|
|
HumanByte, Operation, BACKUP_DATE_REGEX, BACKUP_ID_REGEX, UPID,
|
2022-02-22 14:57:49 +00:00
|
|
|
};
|
2022-04-24 16:37:15 +00:00
|
|
|
use pbs_config::ConfigVersionCache;
|
2018-12-17 12:00:39 +00:00
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
use crate::backup_info::{BackupDir, BackupGroup};
|
2021-09-27 06:24:26 +00:00
|
|
|
use crate::chunk_store::ChunkStore;
|
|
|
|
use crate::dynamic_index::{DynamicIndexReader, DynamicIndexWriter};
|
|
|
|
use crate::fixed_index::{FixedIndexReader, FixedIndexWriter};
|
|
|
|
use crate::index::IndexFile;
|
|
|
|
use crate::manifest::{
|
2022-04-14 11:27:53 +00:00
|
|
|
archive_type, ArchiveType, BackupManifest, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
|
2021-09-27 06:24:26 +00:00
|
|
|
};
|
2022-04-12 05:25:58 +00:00
|
|
|
use crate::task_tracking::update_active_operations;
|
2022-04-14 11:27:53 +00:00
|
|
|
use crate::DataBlob;
|
2021-09-27 06:24:26 +00:00
|
|
|
|
2019-08-13 10:59:03 +00:00
|
|
|
lazy_static! {
|
2022-04-14 11:27:53 +00:00
|
|
|
static ref DATASTORE_MAP: Mutex<HashMap<String, Arc<DataStoreImpl>>> =
|
|
|
|
Mutex::new(HashMap::new());
|
2019-03-05 06:18:12 +00:00
|
|
|
}
|
2019-01-18 11:01:37 +00:00
|
|
|
|
2021-07-16 08:53:22 +00:00
|
|
|
/// checks if auth_id is owner, or, if owner is a token, if
|
|
|
|
/// auth_id is the user of the token
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn check_backup_owner(owner: &Authid, auth_id: &Authid) -> Result<(), Error> {
|
|
|
|
let correct_owner =
|
|
|
|
owner == auth_id || (owner.is_token() && &Authid::from(owner.user().clone()) == auth_id);
|
2021-07-16 08:53:22 +00:00
|
|
|
if !correct_owner {
|
|
|
|
bail!("backup owner check failed ({} != {})", auth_id, owner);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2019-02-12 13:13:31 +00:00
|
|
|
/// Datastore Management
|
|
|
|
///
|
|
|
|
/// A Datastore can store severals backups, and provides the
|
|
|
|
/// management interface for backup.
|
2022-04-12 05:25:58 +00:00
|
|
|
pub struct DataStoreImpl {
|
2019-01-15 10:38:26 +00:00
|
|
|
chunk_store: Arc<ChunkStore>,
|
2021-01-20 16:23:49 +00:00
|
|
|
gc_mutex: Mutex<()>,
|
2019-04-11 10:04:25 +00:00
|
|
|
last_gc_status: Mutex<GarbageCollectionStatus>,
|
2020-10-20 08:08:25 +00:00
|
|
|
verify_new: bool,
|
2022-02-22 14:57:49 +00:00
|
|
|
chunk_order: ChunkOrder,
|
2022-02-25 14:26:11 +00:00
|
|
|
last_generation: usize,
|
|
|
|
last_update: i64,
|
2018-12-17 12:00:39 +00:00
|
|
|
}
|
|
|
|
|
2022-04-20 13:30:04 +00:00
|
|
|
impl DataStoreImpl {
|
|
|
|
// This one just panics on everything
|
|
|
|
#[doc(hidden)]
|
|
|
|
pub unsafe fn new_test() -> Arc<Self> {
|
|
|
|
Arc::new(Self {
|
|
|
|
chunk_store: Arc::new(unsafe { ChunkStore::panic_store() }),
|
|
|
|
gc_mutex: Mutex::new(()),
|
|
|
|
last_gc_status: Mutex::new(GarbageCollectionStatus::default()),
|
|
|
|
verify_new: false,
|
|
|
|
chunk_order: ChunkOrder::None,
|
|
|
|
last_generation: 0,
|
|
|
|
last_update: 0,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-12 05:25:58 +00:00
|
|
|
pub struct DataStore {
|
|
|
|
inner: Arc<DataStoreImpl>,
|
|
|
|
operation: Option<Operation>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Clone for DataStore {
|
|
|
|
fn clone(&self) -> Self {
|
|
|
|
let mut new_operation = self.operation;
|
|
|
|
if let Some(operation) = self.operation {
|
|
|
|
if let Err(e) = update_active_operations(self.name(), operation, 1) {
|
|
|
|
log::error!("could not update active operations - {}", e);
|
|
|
|
new_operation = None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DataStore {
|
|
|
|
inner: self.inner.clone(),
|
|
|
|
operation: new_operation,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for DataStore {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if let Some(operation) = self.operation {
|
|
|
|
if let Err(e) = update_active_operations(self.name(), operation, -1) {
|
|
|
|
log::error!("could not update active operations - {}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-17 12:00:39 +00:00
|
|
|
impl DataStore {
|
2022-04-20 13:30:04 +00:00
|
|
|
// This one just panics on everything
|
|
|
|
#[doc(hidden)]
|
|
|
|
pub unsafe fn new_test() -> Arc<Self> {
|
|
|
|
Arc::new(Self {
|
|
|
|
inner: unsafe { DataStoreImpl::new_test() },
|
|
|
|
operation: None,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-04-12 05:25:57 +00:00
|
|
|
pub fn lookup_datastore(
|
|
|
|
name: &str,
|
|
|
|
operation: Option<Operation>,
|
|
|
|
) -> Result<Arc<DataStore>, Error> {
|
2022-02-25 14:26:11 +00:00
|
|
|
let version_cache = ConfigVersionCache::new()?;
|
|
|
|
let generation = version_cache.datastore_generation();
|
|
|
|
let now = proxmox_time::epoch_i64();
|
2018-12-22 16:37:25 +00:00
|
|
|
|
2022-04-12 05:25:57 +00:00
|
|
|
let (config, _digest) = pbs_config::datastore::config()?;
|
|
|
|
let config: DataStoreConfig = config.lookup("datastore", name)?;
|
|
|
|
|
|
|
|
if let Some(maintenance_mode) = config.get_maintenance_mode() {
|
|
|
|
if let Err(error) = maintenance_mode.check(operation) {
|
|
|
|
bail!("datastore '{}' is in {}", name, error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-12 05:25:58 +00:00
|
|
|
if let Some(operation) = operation {
|
|
|
|
update_active_operations(name, operation, 1)?;
|
|
|
|
}
|
|
|
|
|
2019-03-18 09:00:58 +00:00
|
|
|
let mut map = DATASTORE_MAP.lock().unwrap();
|
2022-02-25 14:26:11 +00:00
|
|
|
let entry = map.get(name);
|
2018-12-22 16:37:25 +00:00
|
|
|
|
2022-02-25 14:26:11 +00:00
|
|
|
if let Some(datastore) = &entry {
|
|
|
|
if datastore.last_generation == generation && now < (datastore.last_update + 60) {
|
2022-04-12 05:25:58 +00:00
|
|
|
return Ok(Arc::new(Self {
|
|
|
|
inner: Arc::clone(datastore),
|
|
|
|
operation,
|
|
|
|
}));
|
2018-12-22 16:37:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-20 13:30:04 +00:00
|
|
|
let chunk_store = ChunkStore::open(name, &config.path)?;
|
|
|
|
let datastore = DataStore::with_store_and_config(chunk_store, config, generation, now)?;
|
2019-02-15 13:33:03 +00:00
|
|
|
|
|
|
|
let datastore = Arc::new(datastore);
|
|
|
|
map.insert(name.to_string(), datastore.clone());
|
2018-12-22 16:37:25 +00:00
|
|
|
|
2022-04-12 05:25:58 +00:00
|
|
|
Ok(Arc::new(Self {
|
|
|
|
inner: datastore,
|
|
|
|
operation,
|
|
|
|
}))
|
2018-12-22 16:37:25 +00:00
|
|
|
}
|
|
|
|
|
2021-06-02 11:27:01 +00:00
|
|
|
/// removes all datastores that are not configured anymore
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn remove_unused_datastores() -> Result<(), Error> {
|
2021-09-10 06:40:58 +00:00
|
|
|
let (config, _digest) = pbs_config::datastore::config()?;
|
2021-06-02 11:27:01 +00:00
|
|
|
|
|
|
|
let mut map = DATASTORE_MAP.lock().unwrap();
|
|
|
|
// removes all elements that are not in the config
|
2022-04-14 11:27:53 +00:00
|
|
|
map.retain(|key, _| config.sections.contains_key(key));
|
2021-06-02 11:27:01 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-04-20 13:30:04 +00:00
|
|
|
/// Open a raw database given a name and a path.
|
|
|
|
pub unsafe fn open_path(
|
|
|
|
name: &str,
|
|
|
|
path: impl AsRef<Path>,
|
|
|
|
operation: Option<Operation>,
|
|
|
|
) -> Result<Arc<Self>, Error> {
|
|
|
|
let path = path
|
|
|
|
.as_ref()
|
|
|
|
.to_str()
|
|
|
|
.ok_or_else(|| format_err!("non-utf8 paths not supported"))?
|
|
|
|
.to_owned();
|
|
|
|
unsafe { Self::open_from_config(DataStoreConfig::new(name.to_owned(), path), operation) }
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Open a datastore given a raw configuration.
|
|
|
|
pub unsafe fn open_from_config(
|
|
|
|
config: DataStoreConfig,
|
|
|
|
operation: Option<Operation>,
|
|
|
|
) -> Result<Arc<Self>, Error> {
|
|
|
|
let name = config.name.clone();
|
|
|
|
|
|
|
|
let chunk_store = ChunkStore::open(&name, &config.path)?;
|
|
|
|
let inner = Arc::new(Self::with_store_and_config(chunk_store, config, 0, 0)?);
|
|
|
|
|
|
|
|
if let Some(operation) = operation {
|
|
|
|
update_active_operations(&name, operation, 1)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Arc::new(Self { inner, operation }))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn with_store_and_config(
|
|
|
|
chunk_store: ChunkStore,
|
2022-02-25 14:26:11 +00:00
|
|
|
config: DataStoreConfig,
|
|
|
|
last_generation: usize,
|
|
|
|
last_update: i64,
|
2022-04-12 05:25:58 +00:00
|
|
|
) -> Result<DataStoreImpl, Error> {
|
2020-10-23 14:32:32 +00:00
|
|
|
let mut gc_status_path = chunk_store.base_path();
|
|
|
|
gc_status_path.push(".gc-status");
|
|
|
|
|
|
|
|
let gc_status = if let Some(state) = file_read_optional_string(gc_status_path)? {
|
|
|
|
match serde_json::from_str(&state) {
|
|
|
|
Ok(state) => state,
|
|
|
|
Err(err) => {
|
|
|
|
eprintln!("error reading gc-status: {}", err);
|
|
|
|
GarbageCollectionStatus::default()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
GarbageCollectionStatus::default()
|
|
|
|
};
|
2019-04-11 10:04:25 +00:00
|
|
|
|
2022-02-22 14:57:49 +00:00
|
|
|
let tuning: DatastoreTuning = serde_json::from_value(
|
2022-04-14 11:27:53 +00:00
|
|
|
DatastoreTuning::API_SCHEMA
|
|
|
|
.parse_property_string(config.tuning.as_deref().unwrap_or(""))?,
|
2022-02-22 14:57:49 +00:00
|
|
|
)?;
|
|
|
|
let chunk_order = tuning.chunk_order.unwrap_or(ChunkOrder::Inode);
|
|
|
|
|
2022-04-12 05:25:58 +00:00
|
|
|
Ok(DataStoreImpl {
|
2019-01-15 10:38:26 +00:00
|
|
|
chunk_store: Arc::new(chunk_store),
|
2021-01-20 16:23:49 +00:00
|
|
|
gc_mutex: Mutex::new(()),
|
2019-04-11 10:04:25 +00:00
|
|
|
last_gc_status: Mutex::new(gc_status),
|
2020-10-20 08:08:25 +00:00
|
|
|
verify_new: config.verify_new.unwrap_or(false),
|
2022-02-22 14:57:49 +00:00
|
|
|
chunk_order,
|
2022-02-25 14:26:11 +00:00
|
|
|
last_generation,
|
|
|
|
last_update,
|
2018-12-17 12:00:39 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-02-14 10:39:09 +00:00
|
|
|
pub fn get_chunk_iterator(
|
|
|
|
&self,
|
|
|
|
) -> Result<
|
2021-11-23 16:57:00 +00:00
|
|
|
impl Iterator<Item = (Result<proxmox_sys::fs::ReadDirEntry, Error>, usize, bool)>,
|
2022-04-14 11:27:53 +00:00
|
|
|
Error,
|
2019-02-14 10:39:09 +00:00
|
|
|
> {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.get_chunk_iterator()
|
2019-02-14 10:39:09 +00:00
|
|
|
}
|
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn create_fixed_writer<P: AsRef<Path>>(
|
|
|
|
&self,
|
|
|
|
filename: P,
|
|
|
|
size: usize,
|
|
|
|
chunk_size: usize,
|
|
|
|
) -> Result<FixedIndexWriter, Error> {
|
|
|
|
let index = FixedIndexWriter::create(
|
|
|
|
self.inner.chunk_store.clone(),
|
|
|
|
filename.as_ref(),
|
|
|
|
size,
|
|
|
|
chunk_size,
|
|
|
|
)?;
|
2018-12-17 12:00:39 +00:00
|
|
|
|
|
|
|
Ok(index)
|
|
|
|
}
|
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn open_fixed_reader<P: AsRef<Path>>(
|
|
|
|
&self,
|
|
|
|
filename: P,
|
|
|
|
) -> Result<FixedIndexReader, Error> {
|
|
|
|
let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
|
2019-07-04 06:09:48 +00:00
|
|
|
|
|
|
|
let index = FixedIndexReader::open(&full_path)?;
|
2018-12-17 12:00:39 +00:00
|
|
|
|
|
|
|
Ok(index)
|
|
|
|
}
|
2018-12-18 10:06:03 +00:00
|
|
|
|
2019-02-12 11:05:33 +00:00
|
|
|
pub fn create_dynamic_writer<P: AsRef<Path>>(
|
2022-04-14 11:27:53 +00:00
|
|
|
&self,
|
|
|
|
filename: P,
|
2019-02-12 11:05:33 +00:00
|
|
|
) -> Result<DynamicIndexWriter, Error> {
|
2022-04-14 11:27:53 +00:00
|
|
|
let index = DynamicIndexWriter::create(self.inner.chunk_store.clone(), filename.as_ref())?;
|
2018-12-31 16:30:08 +00:00
|
|
|
|
|
|
|
Ok(index)
|
|
|
|
}
|
2019-01-18 11:01:37 +00:00
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn open_dynamic_reader<P: AsRef<Path>>(
|
|
|
|
&self,
|
|
|
|
filename: P,
|
|
|
|
) -> Result<DynamicIndexReader, Error> {
|
|
|
|
let full_path = self.inner.chunk_store.relative_path(filename.as_ref());
|
2019-06-28 14:35:00 +00:00
|
|
|
|
|
|
|
let index = DynamicIndexReader::open(&full_path)?;
|
2019-01-02 13:27:04 +00:00
|
|
|
|
|
|
|
Ok(index)
|
|
|
|
}
|
|
|
|
|
2019-02-28 09:21:56 +00:00
|
|
|
pub fn open_index<P>(&self, filename: P) -> Result<Box<dyn IndexFile + Send>, Error>
|
|
|
|
where
|
|
|
|
P: AsRef<Path>,
|
|
|
|
{
|
|
|
|
let filename = filename.as_ref();
|
2022-04-14 11:27:53 +00:00
|
|
|
let out: Box<dyn IndexFile + Send> = match archive_type(filename)? {
|
|
|
|
ArchiveType::DynamicIndex => Box::new(self.open_dynamic_reader(filename)?),
|
|
|
|
ArchiveType::FixedIndex => Box::new(self.open_fixed_reader(filename)?),
|
|
|
|
_ => bail!("cannot open index file of unknown type: {:?}", filename),
|
|
|
|
};
|
2019-02-28 09:21:56 +00:00
|
|
|
Ok(out)
|
|
|
|
}
|
|
|
|
|
2021-04-16 10:20:44 +00:00
|
|
|
/// Fast index verification - only check if chunks exists
|
2021-04-16 11:17:17 +00:00
|
|
|
pub fn fast_index_verification(
|
|
|
|
&self,
|
|
|
|
index: &dyn IndexFile,
|
2022-04-14 11:27:53 +00:00
|
|
|
checked: &mut HashSet<[u8; 32]>,
|
2021-04-16 11:17:17 +00:00
|
|
|
) -> Result<(), Error> {
|
2021-04-16 10:20:44 +00:00
|
|
|
for pos in 0..index.index_count() {
|
|
|
|
let info = index.chunk_info(pos).unwrap();
|
2021-04-16 11:17:17 +00:00
|
|
|
if checked.contains(&info.digest) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
self.stat_chunk(&info.digest).map_err(|err| {
|
|
|
|
format_err!(
|
|
|
|
"fast_index_verification error, stat_chunk {} failed - {}",
|
|
|
|
hex::encode(&info.digest),
|
|
|
|
err,
|
|
|
|
)
|
|
|
|
})?;
|
2021-04-16 11:17:17 +00:00
|
|
|
|
|
|
|
checked.insert(info.digest);
|
2021-04-16 10:20:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-06-24 04:58:14 +00:00
|
|
|
pub fn name(&self) -> &str {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.name()
|
2020-06-24 04:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-01-18 11:01:37 +00:00
|
|
|
pub fn base_path(&self) -> PathBuf {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.base_path()
|
2019-01-18 11:01:37 +00:00
|
|
|
}
|
|
|
|
|
2020-07-22 13:04:14 +00:00
|
|
|
/// Cleanup a backup directory
|
2020-01-05 14:15:12 +00:00
|
|
|
///
|
|
|
|
/// Removes all files not mentioned in the manifest.
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn cleanup_backup_dir(
|
|
|
|
&self,
|
2022-04-19 08:38:46 +00:00
|
|
|
backup_dir: impl AsRef<pbs_api_types::BackupDir>,
|
|
|
|
manifest: &BackupManifest,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
self.cleanup_backup_dir_do(backup_dir.as_ref(), manifest)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn cleanup_backup_dir_do(
|
|
|
|
&self,
|
|
|
|
backup_dir: &pbs_api_types::BackupDir,
|
2022-04-14 11:27:53 +00:00
|
|
|
manifest: &BackupManifest,
|
|
|
|
) -> Result<(), Error> {
|
2020-01-05 14:15:12 +00:00
|
|
|
let mut full_path = self.base_path();
|
2022-04-19 08:38:46 +00:00
|
|
|
full_path.push(backup_dir.to_string());
|
2020-01-05 14:15:12 +00:00
|
|
|
|
|
|
|
let mut wanted_files = HashSet::new();
|
|
|
|
wanted_files.insert(MANIFEST_BLOB_NAME.to_string());
|
2020-05-30 12:39:38 +00:00
|
|
|
wanted_files.insert(CLIENT_LOG_BLOB_NAME.to_string());
|
2022-04-14 11:27:53 +00:00
|
|
|
manifest.files().iter().for_each(|item| {
|
|
|
|
wanted_files.insert(item.filename.clone());
|
|
|
|
});
|
2020-01-05 14:15:12 +00:00
|
|
|
|
2021-12-30 13:35:24 +00:00
|
|
|
for item in proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &full_path)?.flatten() {
|
|
|
|
if let Some(file_type) = item.file_type() {
|
2022-04-14 11:27:53 +00:00
|
|
|
if file_type != nix::dir::Type::File {
|
|
|
|
continue;
|
|
|
|
}
|
2021-12-30 13:35:24 +00:00
|
|
|
}
|
|
|
|
let file_name = item.file_name().to_bytes();
|
2022-04-14 11:27:53 +00:00
|
|
|
if file_name == b"." || file_name == b".." {
|
|
|
|
continue;
|
|
|
|
};
|
2021-12-30 13:35:24 +00:00
|
|
|
if let Ok(name) = std::str::from_utf8(file_name) {
|
2022-04-14 11:27:53 +00:00
|
|
|
if wanted_files.contains(name) {
|
|
|
|
continue;
|
|
|
|
}
|
2020-01-05 14:15:12 +00:00
|
|
|
}
|
2021-12-30 13:35:24 +00:00
|
|
|
println!("remove unused file {:?}", item.file_name());
|
|
|
|
let dirfd = item.parent_fd();
|
|
|
|
let _res = unsafe { libc::unlinkat(dirfd, item.file_name().as_ptr(), 0) };
|
2020-01-05 14:15:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-01-17 10:24:55 +00:00
|
|
|
|
2020-01-22 14:04:08 +00:00
|
|
|
/// Returns the absolute path for a backup_group
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn group_path(&self, backup_group: &pbs_api_types::BackupGroup) -> PathBuf {
|
2020-01-17 10:24:55 +00:00
|
|
|
let mut full_path = self.base_path();
|
2022-04-19 08:38:46 +00:00
|
|
|
full_path.push(backup_group.to_string());
|
2020-01-22 14:04:08 +00:00
|
|
|
full_path
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the absolute path for backup_dir
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn snapshot_path(&self, backup_dir: &pbs_api_types::BackupDir) -> PathBuf {
|
2020-01-22 14:04:08 +00:00
|
|
|
let mut full_path = self.base_path();
|
2022-04-19 08:38:46 +00:00
|
|
|
full_path.push(backup_dir.to_string());
|
2020-01-22 14:04:08 +00:00
|
|
|
full_path
|
|
|
|
}
|
|
|
|
|
2022-04-24 16:49:09 +00:00
|
|
|
/// Remove a complete backup group including all snapshots.
|
|
|
|
///
|
|
|
|
/// Returns true if all snapshots were removed, and false if some were protected
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn remove_backup_group(
|
2022-04-20 13:30:04 +00:00
|
|
|
self: &Arc<Self>,
|
2022-04-19 08:38:46 +00:00
|
|
|
backup_group: &pbs_api_types::BackupGroup,
|
|
|
|
) -> Result<bool, Error> {
|
2022-04-20 11:24:57 +00:00
|
|
|
let backup_group = self.backup_group(backup_group.clone());
|
2022-04-19 08:38:46 +00:00
|
|
|
|
2022-04-24 16:49:09 +00:00
|
|
|
backup_group.destroy()
|
2020-01-17 10:24:55 +00:00
|
|
|
}
|
|
|
|
|
2019-02-28 11:51:27 +00:00
|
|
|
/// Remove a backup directory including all content
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn remove_backup_dir(
|
2022-04-20 13:30:04 +00:00
|
|
|
self: &Arc<Self>,
|
2022-04-19 08:38:46 +00:00
|
|
|
backup_dir: &pbs_api_types::BackupDir,
|
|
|
|
force: bool,
|
|
|
|
) -> Result<(), Error> {
|
2022-04-20 11:24:57 +00:00
|
|
|
let backup_dir = self.backup_dir(backup_dir.clone())?;
|
2022-04-19 08:38:46 +00:00
|
|
|
|
2022-04-24 16:49:09 +00:00
|
|
|
backup_dir.destroy(force)
|
2019-02-28 11:51:27 +00:00
|
|
|
}
|
|
|
|
|
2020-01-22 14:04:08 +00:00
|
|
|
/// Returns the time of the last successful backup
|
|
|
|
///
|
|
|
|
/// Or None if there is no backup in the group (or the group dir does not exist).
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn last_successful_backup(
|
2022-04-20 13:30:04 +00:00
|
|
|
self: &Arc<Self>,
|
2022-04-19 08:38:46 +00:00
|
|
|
backup_group: &pbs_api_types::BackupGroup,
|
|
|
|
) -> Result<Option<i64>, Error> {
|
2022-04-20 11:24:57 +00:00
|
|
|
let backup_group = self.backup_group(backup_group.clone());
|
2022-04-19 08:38:46 +00:00
|
|
|
|
2022-04-24 16:03:27 +00:00
|
|
|
let group_path = backup_group.full_group_path();
|
2020-01-22 14:04:08 +00:00
|
|
|
|
|
|
|
if group_path.exists() {
|
2022-04-20 13:30:04 +00:00
|
|
|
backup_group.last_successful_backup()
|
2020-01-22 14:04:08 +00:00
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-28 08:11:15 +00:00
|
|
|
/// Returns the backup owner.
|
|
|
|
///
|
2020-10-23 11:33:21 +00:00
|
|
|
/// The backup owner is the entity who first created the backup group.
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn get_owner(&self, backup_group: &pbs_api_types::BackupGroup) -> Result<Authid, Error> {
|
2020-04-28 08:11:15 +00:00
|
|
|
let mut full_path = self.base_path();
|
2022-04-19 08:38:46 +00:00
|
|
|
full_path.push(backup_group.to_string());
|
2020-04-28 08:11:15 +00:00
|
|
|
full_path.push("owner");
|
2021-11-23 16:57:00 +00:00
|
|
|
let owner = proxmox_sys::fs::file_read_firstline(full_path)?;
|
2022-02-08 13:57:16 +00:00
|
|
|
owner.trim_end().parse() // remove trailing newline
|
2020-04-28 08:11:15 +00:00
|
|
|
}
|
|
|
|
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn owns_backup(
|
|
|
|
&self,
|
|
|
|
backup_group: &pbs_api_types::BackupGroup,
|
|
|
|
auth_id: &Authid,
|
|
|
|
) -> Result<bool, Error> {
|
2021-07-16 08:53:22 +00:00
|
|
|
let owner = self.get_owner(backup_group)?;
|
|
|
|
|
2021-07-16 08:53:25 +00:00
|
|
|
Ok(check_backup_owner(&owner, auth_id).is_ok())
|
2021-07-16 08:53:22 +00:00
|
|
|
}
|
|
|
|
|
2020-04-28 08:11:15 +00:00
|
|
|
/// Set the backup owner.
|
2020-08-06 13:46:01 +00:00
|
|
|
pub fn set_owner(
|
|
|
|
&self,
|
2022-04-19 08:38:46 +00:00
|
|
|
backup_group: &pbs_api_types::BackupGroup,
|
2020-10-23 11:33:21 +00:00
|
|
|
auth_id: &Authid,
|
2020-08-06 13:46:01 +00:00
|
|
|
force: bool,
|
|
|
|
) -> Result<(), Error> {
|
2020-04-28 08:11:15 +00:00
|
|
|
let mut path = self.base_path();
|
2022-04-19 08:38:46 +00:00
|
|
|
path.push(backup_group.to_string());
|
2020-04-28 08:11:15 +00:00
|
|
|
path.push("owner");
|
|
|
|
|
|
|
|
let mut open_options = std::fs::OpenOptions::new();
|
|
|
|
open_options.write(true);
|
|
|
|
open_options.truncate(true);
|
|
|
|
|
|
|
|
if force {
|
|
|
|
open_options.create(true);
|
|
|
|
} else {
|
|
|
|
open_options.create_new(true);
|
|
|
|
}
|
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
let mut file = open_options
|
|
|
|
.open(&path)
|
2020-04-28 08:11:15 +00:00
|
|
|
.map_err(|err| format_err!("unable to create owner file {:?} - {}", path, err))?;
|
|
|
|
|
2020-10-23 11:33:21 +00:00
|
|
|
writeln!(file, "{}", auth_id)
|
2020-04-28 08:11:15 +00:00
|
|
|
.map_err(|err| format_err!("unable to write owner file {:?} - {}", path, err))?;
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-07-30 08:48:33 +00:00
|
|
|
/// Create (if it does not already exists) and lock a backup group
|
2020-04-28 08:11:15 +00:00
|
|
|
///
|
|
|
|
/// And set the owner to 'userid'. If the group already exists, it returns the
|
|
|
|
/// current owner (instead of setting the owner).
|
2020-07-30 08:48:33 +00:00
|
|
|
///
|
2020-08-25 16:52:31 +00:00
|
|
|
/// This also acquires an exclusive lock on the directory and returns the lock guard.
|
2020-08-06 13:46:01 +00:00
|
|
|
pub fn create_locked_backup_group(
|
|
|
|
&self,
|
2022-04-19 08:38:46 +00:00
|
|
|
backup_group: &pbs_api_types::BackupGroup,
|
2020-10-23 11:33:21 +00:00
|
|
|
auth_id: &Authid,
|
|
|
|
) -> Result<(Authid, DirLockGuard), Error> {
|
2019-02-27 09:02:22 +00:00
|
|
|
// create intermediate path first:
|
2021-01-15 13:38:27 +00:00
|
|
|
let mut full_path = self.base_path();
|
2022-04-19 08:38:46 +00:00
|
|
|
full_path.push(backup_group.ty.as_str());
|
2019-02-27 09:02:22 +00:00
|
|
|
std::fs::create_dir_all(&full_path)?;
|
|
|
|
|
2022-04-19 08:38:46 +00:00
|
|
|
full_path.push(&backup_group.id);
|
2020-04-28 08:11:15 +00:00
|
|
|
|
|
|
|
// create the last component now
|
|
|
|
match std::fs::create_dir(&full_path) {
|
|
|
|
Ok(_) => {
|
2022-04-14 11:27:53 +00:00
|
|
|
let guard = lock_dir_noblock(
|
|
|
|
&full_path,
|
|
|
|
"backup group",
|
|
|
|
"another backup is already running",
|
|
|
|
)?;
|
2020-10-23 11:33:21 +00:00
|
|
|
self.set_owner(backup_group, auth_id, false)?;
|
2020-04-28 08:11:15 +00:00
|
|
|
let owner = self.get_owner(backup_group)?; // just to be sure
|
2020-07-30 08:48:33 +00:00
|
|
|
Ok((owner, guard))
|
2020-04-28 08:11:15 +00:00
|
|
|
}
|
|
|
|
Err(ref err) if err.kind() == io::ErrorKind::AlreadyExists => {
|
2022-04-14 11:27:53 +00:00
|
|
|
let guard = lock_dir_noblock(
|
|
|
|
&full_path,
|
|
|
|
"backup group",
|
|
|
|
"another backup is already running",
|
|
|
|
)?;
|
2020-04-28 08:11:15 +00:00
|
|
|
let owner = self.get_owner(backup_group)?; // just to be sure
|
2020-07-30 08:48:33 +00:00
|
|
|
Ok((owner, guard))
|
2020-04-28 08:11:15 +00:00
|
|
|
}
|
|
|
|
Err(err) => bail!("unable to create backup group {:?} - {}", full_path, err),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Creates a new backup snapshot inside a BackupGroup
|
|
|
|
///
|
|
|
|
/// The BackupGroup directory needs to exist.
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn create_locked_backup_dir(
|
|
|
|
&self,
|
2022-04-19 08:38:46 +00:00
|
|
|
backup_dir: &pbs_api_types::BackupDir,
|
2022-04-14 11:27:53 +00:00
|
|
|
) -> Result<(PathBuf, bool, DirLockGuard), Error> {
|
2022-04-19 08:38:46 +00:00
|
|
|
let relative_path = PathBuf::from(backup_dir.to_string());
|
2019-03-05 06:18:12 +00:00
|
|
|
let mut full_path = self.base_path();
|
|
|
|
full_path.push(&relative_path);
|
2019-01-18 11:01:37 +00:00
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
let lock = || {
|
|
|
|
lock_dir_noblock(
|
|
|
|
&full_path,
|
|
|
|
"snapshot",
|
|
|
|
"internal error - tried creating snapshot that's already in use",
|
|
|
|
)
|
|
|
|
};
|
2020-08-11 08:50:39 +00:00
|
|
|
|
2019-02-27 09:02:22 +00:00
|
|
|
match std::fs::create_dir(&full_path) {
|
2020-08-11 08:50:39 +00:00
|
|
|
Ok(_) => Ok((relative_path, true, lock()?)),
|
2022-04-14 11:27:53 +00:00
|
|
|
Err(ref e) if e.kind() == io::ErrorKind::AlreadyExists => {
|
|
|
|
Ok((relative_path, false, lock()?))
|
|
|
|
}
|
|
|
|
Err(e) => Err(e.into()),
|
2019-02-27 09:02:22 +00:00
|
|
|
}
|
2019-01-18 11:01:37 +00:00
|
|
|
}
|
|
|
|
|
2022-04-15 10:15:54 +00:00
|
|
|
/// Get a streaming iter over top-level backup groups of a datatstore
|
|
|
|
///
|
|
|
|
/// The iterated item is still a Result that can contain errors from rather unexptected FS or
|
|
|
|
/// parsing errors.
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn iter_backup_groups(self: &Arc<DataStore>) -> Result<ListGroups, Error> {
|
|
|
|
ListGroups::new(Arc::clone(self))
|
2022-04-15 10:15:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get a streaming iter over top-level backup groups of a datatstore, filtered by Ok results
|
|
|
|
///
|
|
|
|
/// The iterated item's result is already unwrapped, if it contained an error it will be
|
|
|
|
/// logged. Can be useful in iterator chain commands
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn iter_backup_groups_ok(
|
|
|
|
self: &Arc<DataStore>,
|
|
|
|
) -> Result<impl Iterator<Item = BackupGroup> + 'static, Error> {
|
|
|
|
let this = Arc::clone(self);
|
2022-04-15 10:15:54 +00:00
|
|
|
Ok(
|
2022-04-20 13:30:04 +00:00
|
|
|
ListGroups::new(Arc::clone(&self))?.filter_map(move |group| match group {
|
2022-04-15 10:15:54 +00:00
|
|
|
Ok(group) => Some(group),
|
|
|
|
Err(err) => {
|
2022-04-20 13:30:04 +00:00
|
|
|
log::error!("list groups error on datastore {} - {}", this.name(), err);
|
2022-04-15 10:15:54 +00:00
|
|
|
None
|
|
|
|
}
|
|
|
|
}),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2022-04-15 07:03:13 +00:00
|
|
|
/// Get a in-memory vector for all top-level backup groups of a datatstore
|
2022-04-15 10:15:54 +00:00
|
|
|
///
|
|
|
|
/// NOTE: using the iterator directly is most often more efficient w.r.t. memory usage
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn list_backup_groups(self: &Arc<DataStore>) -> Result<Vec<BackupGroup>, Error> {
|
|
|
|
ListGroups::new(Arc::clone(self))?.collect()
|
2022-04-15 07:03:13 +00:00
|
|
|
}
|
|
|
|
|
2018-12-18 10:06:03 +00:00
|
|
|
pub fn list_images(&self) -> Result<Vec<PathBuf>, Error> {
|
2019-01-18 11:01:37 +00:00
|
|
|
let base = self.base_path();
|
2018-12-18 10:06:03 +00:00
|
|
|
|
|
|
|
let mut list = vec![];
|
|
|
|
|
2019-01-18 11:24:58 +00:00
|
|
|
use walkdir::WalkDir;
|
|
|
|
|
2020-11-30 15:22:20 +00:00
|
|
|
let walker = WalkDir::new(&base).into_iter();
|
2019-01-18 11:24:58 +00:00
|
|
|
|
|
|
|
// make sure we skip .chunks (and other hidden files to keep it simple)
|
|
|
|
fn is_hidden(entry: &walkdir::DirEntry) -> bool {
|
2022-04-14 11:27:53 +00:00
|
|
|
entry
|
|
|
|
.file_name()
|
2019-01-18 11:24:58 +00:00
|
|
|
.to_str()
|
2021-01-19 09:38:00 +00:00
|
|
|
.map(|s| s.starts_with('.'))
|
2019-01-18 11:24:58 +00:00
|
|
|
.unwrap_or(false)
|
|
|
|
}
|
2020-07-22 14:01:50 +00:00
|
|
|
let handle_entry_err = |err: walkdir::Error| {
|
|
|
|
if let Some(inner) = err.io_error() {
|
2021-01-20 16:23:53 +00:00
|
|
|
if let Some(path) = err.path() {
|
|
|
|
if inner.kind() == io::ErrorKind::PermissionDenied {
|
2020-07-22 14:01:50 +00:00
|
|
|
// only allow to skip ext4 fsck directory, avoid GC if, for example,
|
|
|
|
// a user got file permissions wrong on datastore rsync to new server
|
|
|
|
if err.depth() > 1 || !path.ends_with("lost+found") {
|
2021-01-20 16:23:53 +00:00
|
|
|
bail!("cannot continue garbage-collection safely, permission denied on: {:?}", path)
|
2020-07-22 14:01:50 +00:00
|
|
|
}
|
2021-01-20 16:23:53 +00:00
|
|
|
} else {
|
2022-04-14 11:27:53 +00:00
|
|
|
bail!(
|
|
|
|
"unexpected error on datastore traversal: {} - {:?}",
|
|
|
|
inner,
|
|
|
|
path
|
|
|
|
)
|
2021-01-20 16:23:53 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
bail!("unexpected error on datastore traversal: {}", inner)
|
2020-07-22 14:01:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
};
|
2019-01-18 11:24:58 +00:00
|
|
|
for entry in walker.filter_entry(|e| !is_hidden(e)) {
|
2020-07-22 14:01:50 +00:00
|
|
|
let path = match entry {
|
|
|
|
Ok(entry) => entry.into_path(),
|
|
|
|
Err(err) => {
|
|
|
|
handle_entry_err(err)?;
|
2022-04-14 11:27:53 +00:00
|
|
|
continue;
|
|
|
|
}
|
2020-07-22 14:01:50 +00:00
|
|
|
};
|
2019-12-31 14:23:41 +00:00
|
|
|
if let Ok(archive_type) = archive_type(&path) {
|
2022-04-14 11:27:53 +00:00
|
|
|
if archive_type == ArchiveType::FixedIndex
|
|
|
|
|| archive_type == ArchiveType::DynamicIndex
|
|
|
|
{
|
2019-01-18 11:24:58 +00:00
|
|
|
list.push(path);
|
2018-12-18 10:06:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(list)
|
|
|
|
}
|
|
|
|
|
2019-07-04 05:57:43 +00:00
|
|
|
// mark chunks used by ``index`` as used
|
|
|
|
fn index_mark_used_chunks<I: IndexFile>(
|
|
|
|
&self,
|
|
|
|
index: I,
|
|
|
|
file_name: &Path, // only used for error reporting
|
|
|
|
status: &mut GarbageCollectionStatus,
|
2021-09-24 05:40:49 +00:00
|
|
|
worker: &dyn WorkerTaskContext,
|
2019-07-04 05:57:43 +00:00
|
|
|
) -> Result<(), Error> {
|
|
|
|
status.index_file_count += 1;
|
|
|
|
status.index_data_bytes += index.index_bytes();
|
|
|
|
|
|
|
|
for pos in 0..index.index_count() {
|
2020-10-12 09:46:34 +00:00
|
|
|
worker.check_abort()?;
|
2021-09-24 09:56:53 +00:00
|
|
|
worker.fail_on_shutdown()?;
|
2019-07-04 05:57:43 +00:00
|
|
|
let digest = index.index_digest(pos).unwrap();
|
2022-04-12 05:25:58 +00:00
|
|
|
if !self.inner.chunk_store.cond_touch_chunk(digest, false)? {
|
2021-07-07 12:37:47 +00:00
|
|
|
task_warn!(
|
2020-10-12 09:46:34 +00:00
|
|
|
worker,
|
2021-03-10 15:37:09 +00:00
|
|
|
"warning: unable to access non-existent chunk {}, required by {:?}",
|
2021-11-23 16:57:00 +00:00
|
|
|
hex::encode(digest),
|
2020-10-12 09:46:34 +00:00
|
|
|
file_name,
|
|
|
|
);
|
2020-11-12 14:50:08 +00:00
|
|
|
|
|
|
|
// touch any corresponding .bad files to keep them around, meaning if a chunk is
|
|
|
|
// rewritten correctly they will be removed automatically, as well as if no index
|
|
|
|
// file requires the chunk anymore (won't get to this loop then)
|
|
|
|
for i in 0..=9 {
|
|
|
|
let bad_ext = format!("{}.bad", i);
|
|
|
|
let mut bad_path = PathBuf::new();
|
|
|
|
bad_path.push(self.chunk_path(digest).0);
|
|
|
|
bad_path.set_extension(bad_ext);
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.cond_touch_path(&bad_path, false)?;
|
2020-11-12 14:50:08 +00:00
|
|
|
}
|
2019-07-04 05:57:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-10-12 09:46:34 +00:00
|
|
|
fn mark_used_chunks(
|
|
|
|
&self,
|
|
|
|
status: &mut GarbageCollectionStatus,
|
2021-09-24 05:40:49 +00:00
|
|
|
worker: &dyn WorkerTaskContext,
|
2020-10-12 09:46:34 +00:00
|
|
|
) -> Result<(), Error> {
|
2018-12-18 10:06:03 +00:00
|
|
|
let image_list = self.list_images()?;
|
2020-09-02 08:03:53 +00:00
|
|
|
let image_count = image_list.len();
|
|
|
|
|
|
|
|
let mut last_percentage: usize = 0;
|
|
|
|
|
2020-11-30 15:22:18 +00:00
|
|
|
let mut strange_paths_count: u64 = 0;
|
|
|
|
|
2021-01-19 10:37:49 +00:00
|
|
|
for (i, img) in image_list.into_iter().enumerate() {
|
2020-10-12 09:46:34 +00:00
|
|
|
worker.check_abort()?;
|
2021-09-24 09:56:53 +00:00
|
|
|
worker.fail_on_shutdown()?;
|
2019-04-01 10:13:02 +00:00
|
|
|
|
2020-11-30 15:22:18 +00:00
|
|
|
if let Some(backup_dir_path) = img.parent() {
|
|
|
|
let backup_dir_path = backup_dir_path.strip_prefix(self.base_path())?;
|
|
|
|
if let Some(backup_dir_str) = backup_dir_path.to_str() {
|
2022-04-19 08:38:46 +00:00
|
|
|
if pbs_api_types::BackupDir::from_str(backup_dir_str).is_err() {
|
2020-11-30 15:22:18 +00:00
|
|
|
strange_paths_count += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-30 15:22:19 +00:00
|
|
|
match std::fs::File::open(&img) {
|
2020-10-16 06:01:38 +00:00
|
|
|
Ok(file) => {
|
2020-11-02 19:50:24 +00:00
|
|
|
if let Ok(archive_type) = archive_type(&img) {
|
2020-10-16 06:01:38 +00:00
|
|
|
if archive_type == ArchiveType::FixedIndex {
|
2020-11-02 19:50:24 +00:00
|
|
|
let index = FixedIndexReader::new(file).map_err(|e| {
|
2020-11-30 15:22:19 +00:00
|
|
|
format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
|
2020-11-02 11:34:35 +00:00
|
|
|
})?;
|
2020-11-02 19:50:24 +00:00
|
|
|
self.index_mark_used_chunks(index, &img, status, worker)?;
|
2020-10-16 06:01:38 +00:00
|
|
|
} else if archive_type == ArchiveType::DynamicIndex {
|
2020-11-02 19:50:24 +00:00
|
|
|
let index = DynamicIndexReader::new(file).map_err(|e| {
|
2020-11-30 15:22:19 +00:00
|
|
|
format_err!("can't read index '{}' - {}", img.to_string_lossy(), e)
|
2020-11-02 11:34:35 +00:00
|
|
|
})?;
|
2020-11-02 19:50:24 +00:00
|
|
|
self.index_mark_used_chunks(index, &img, status, worker)?;
|
2020-10-16 06:01:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-11-02 19:50:24 +00:00
|
|
|
Err(err) if err.kind() == io::ErrorKind::NotFound => (), // ignore vanished files
|
2020-11-30 15:22:19 +00:00
|
|
|
Err(err) => bail!("can't open index {} - {}", img.to_string_lossy(), err),
|
2019-01-02 13:27:04 +00:00
|
|
|
}
|
2020-09-02 08:03:53 +00:00
|
|
|
|
2021-01-19 10:37:49 +00:00
|
|
|
let percentage = (i + 1) * 100 / image_count;
|
2020-09-02 08:03:53 +00:00
|
|
|
if percentage > last_percentage {
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(
|
2020-10-12 09:46:34 +00:00
|
|
|
worker,
|
2020-11-30 15:22:17 +00:00
|
|
|
"marked {}% ({} of {} index files)",
|
2020-10-12 09:46:34 +00:00
|
|
|
percentage,
|
2021-01-19 10:37:49 +00:00
|
|
|
i + 1,
|
2020-10-12 09:46:34 +00:00
|
|
|
image_count,
|
|
|
|
);
|
2020-09-02 08:03:53 +00:00
|
|
|
last_percentage = percentage;
|
|
|
|
}
|
2018-12-18 10:06:03 +00:00
|
|
|
}
|
|
|
|
|
2020-11-30 15:22:18 +00:00
|
|
|
if strange_paths_count > 0 {
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(
|
2020-11-30 15:22:18 +00:00
|
|
|
worker,
|
|
|
|
"found (and marked) {} index files outside of expected directory scheme",
|
|
|
|
strange_paths_count,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2018-12-18 10:06:03 +00:00
|
|
|
Ok(())
|
2019-04-11 10:04:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn last_gc_status(&self) -> GarbageCollectionStatus {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.last_gc_status.lock().unwrap().clone()
|
2019-04-11 10:04:25 +00:00
|
|
|
}
|
2018-12-18 10:06:03 +00:00
|
|
|
|
2020-05-20 06:59:45 +00:00
|
|
|
pub fn garbage_collection_running(&self) -> bool {
|
2022-04-12 05:25:58 +00:00
|
|
|
!matches!(self.inner.gc_mutex.try_lock(), Ok(_))
|
2020-05-20 06:59:45 +00:00
|
|
|
}
|
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn garbage_collection(
|
|
|
|
&self,
|
|
|
|
worker: &dyn WorkerTaskContext,
|
|
|
|
upid: &UPID,
|
|
|
|
) -> Result<(), Error> {
|
2022-04-12 05:25:58 +00:00
|
|
|
if let Ok(ref mut _mutex) = self.inner.gc_mutex.try_lock() {
|
2020-10-01 10:38:04 +00:00
|
|
|
// avoids that we run GC if an old daemon process has still a
|
|
|
|
// running backup writer, which is not save as we have no "oldest
|
|
|
|
// writer" information and thus no safe atime cutoff
|
2022-04-14 11:27:53 +00:00
|
|
|
let _exclusive_lock = self.inner.chunk_store.try_exclusive_lock()?;
|
2019-03-22 08:42:15 +00:00
|
|
|
|
2021-10-08 09:19:37 +00:00
|
|
|
let phase1_start_time = proxmox_time::epoch_i64();
|
2022-04-14 11:27:53 +00:00
|
|
|
let oldest_writer = self
|
|
|
|
.inner
|
|
|
|
.chunk_store
|
|
|
|
.oldest_writer()
|
|
|
|
.unwrap_or(phase1_start_time);
|
2019-03-31 15:21:36 +00:00
|
|
|
|
2018-12-22 15:58:16 +00:00
|
|
|
let mut gc_status = GarbageCollectionStatus::default();
|
2020-10-12 09:46:34 +00:00
|
|
|
gc_status.upid = Some(upid.to_string());
|
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "Start GC phase1 (mark used chunks)");
|
2020-10-12 09:46:34 +00:00
|
|
|
|
|
|
|
self.mark_used_chunks(&mut gc_status, worker)?;
|
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "Start GC phase2 (sweep unused chunks)");
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.sweep_unused_chunks(
|
2020-10-12 09:46:34 +00:00
|
|
|
oldest_writer,
|
|
|
|
phase1_start_time,
|
|
|
|
&mut gc_status,
|
|
|
|
worker,
|
|
|
|
)?;
|
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(
|
2020-10-12 09:46:34 +00:00
|
|
|
worker,
|
|
|
|
"Removed garbage: {}",
|
|
|
|
HumanByte::from(gc_status.removed_bytes),
|
|
|
|
);
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "Removed chunks: {}", gc_status.removed_chunks);
|
2020-04-06 07:50:40 +00:00
|
|
|
if gc_status.pending_bytes > 0 {
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(
|
2020-10-12 09:46:34 +00:00
|
|
|
worker,
|
|
|
|
"Pending removals: {} (in {} chunks)",
|
|
|
|
HumanByte::from(gc_status.pending_bytes),
|
|
|
|
gc_status.pending_chunks,
|
|
|
|
);
|
2020-04-06 07:50:40 +00:00
|
|
|
}
|
2020-09-07 15:30:34 +00:00
|
|
|
if gc_status.removed_bad > 0 {
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "Removed bad chunks: {}", gc_status.removed_bad);
|
2020-09-07 15:30:34 +00:00
|
|
|
}
|
2020-04-06 07:50:40 +00:00
|
|
|
|
2020-10-29 09:24:31 +00:00
|
|
|
if gc_status.still_bad > 0 {
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "Leftover bad chunks: {}", gc_status.still_bad);
|
2020-10-29 09:24:31 +00:00
|
|
|
}
|
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(
|
2020-10-12 09:46:34 +00:00
|
|
|
worker,
|
|
|
|
"Original data usage: {}",
|
|
|
|
HumanByte::from(gc_status.index_data_bytes),
|
|
|
|
);
|
2019-12-19 06:09:39 +00:00
|
|
|
|
|
|
|
if gc_status.index_data_bytes > 0 {
|
2022-04-14 11:27:53 +00:00
|
|
|
let comp_per =
|
|
|
|
(gc_status.disk_bytes as f64 * 100.) / gc_status.index_data_bytes as f64;
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(
|
2020-10-12 09:46:34 +00:00
|
|
|
worker,
|
|
|
|
"On-Disk usage: {} ({:.2}%)",
|
|
|
|
HumanByte::from(gc_status.disk_bytes),
|
|
|
|
comp_per,
|
|
|
|
);
|
2019-12-19 06:09:39 +00:00
|
|
|
}
|
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "On-Disk chunks: {}", gc_status.disk_chunks);
|
2019-12-19 06:09:39 +00:00
|
|
|
|
2020-10-29 09:37:43 +00:00
|
|
|
let deduplication_factor = if gc_status.disk_bytes > 0 {
|
2022-04-14 11:27:53 +00:00
|
|
|
(gc_status.index_data_bytes as f64) / (gc_status.disk_bytes as f64)
|
2020-10-29 09:37:43 +00:00
|
|
|
} else {
|
|
|
|
1.0
|
|
|
|
};
|
|
|
|
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "Deduplication factor: {:.2}", deduplication_factor);
|
2020-10-29 09:37:43 +00:00
|
|
|
|
2019-12-19 06:09:39 +00:00
|
|
|
if gc_status.disk_chunks > 0 {
|
2022-04-14 11:27:53 +00:00
|
|
|
let avg_chunk = gc_status.disk_bytes / (gc_status.disk_chunks as u64);
|
2021-07-07 12:37:47 +00:00
|
|
|
task_log!(worker, "Average chunk size: {}", HumanByte::from(avg_chunk));
|
2019-12-19 06:09:39 +00:00
|
|
|
}
|
2018-12-22 15:58:16 +00:00
|
|
|
|
2020-10-23 14:32:32 +00:00
|
|
|
if let Ok(serialized) = serde_json::to_string(&gc_status) {
|
|
|
|
let mut path = self.base_path();
|
|
|
|
path.push(".gc-status");
|
|
|
|
|
2021-09-02 10:47:11 +00:00
|
|
|
let backup_user = pbs_config::backup_user()?;
|
2020-10-23 14:32:32 +00:00
|
|
|
let mode = nix::sys::stat::Mode::from_bits_truncate(0o0644);
|
|
|
|
// set the correct owner/group/permissions while saving file
|
|
|
|
// owner(rw) = backup, group(r)= backup
|
|
|
|
let options = CreateOptions::new()
|
|
|
|
.perm(mode)
|
|
|
|
.owner(backup_user.uid)
|
|
|
|
.group(backup_user.gid);
|
|
|
|
|
|
|
|
// ignore errors
|
2021-10-20 12:56:15 +00:00
|
|
|
let _ = replace_file(path, serialized.as_bytes(), options, false);
|
2020-10-23 14:32:32 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 05:25:58 +00:00
|
|
|
*self.inner.last_gc_status.lock().unwrap() = gc_status;
|
2018-12-22 15:58:16 +00:00
|
|
|
} else {
|
2019-04-06 15:57:38 +00:00
|
|
|
bail!("Start GC failed - (already running/locked)");
|
2018-12-22 15:58:16 +00:00
|
|
|
}
|
2018-12-18 10:06:03 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2019-03-06 09:19:07 +00:00
|
|
|
|
2021-09-14 06:35:43 +00:00
|
|
|
pub fn try_shared_chunk_store_lock(&self) -> Result<ProcessLockSharedGuard, Error> {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.try_shared_lock()
|
2020-01-02 10:00:33 +00:00
|
|
|
}
|
|
|
|
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn chunk_path(&self, digest: &[u8; 32]) -> (PathBuf, String) {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.chunk_path(digest)
|
2019-06-28 14:35:00 +00:00
|
|
|
}
|
|
|
|
|
2022-04-24 17:50:51 +00:00
|
|
|
pub fn cond_touch_chunk(&self, digest: &[u8; 32], assert_exists: bool) -> Result<bool, Error> {
|
2022-04-14 11:27:53 +00:00
|
|
|
self.inner
|
|
|
|
.chunk_store
|
2022-04-24 17:50:51 +00:00
|
|
|
.cond_touch_chunk(digest, assert_exists)
|
2022-04-14 11:27:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn insert_chunk(&self, chunk: &DataBlob, digest: &[u8; 32]) -> Result<(bool, u64), Error> {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.chunk_store.insert_chunk(chunk, digest)
|
2019-03-06 09:19:07 +00:00
|
|
|
}
|
2020-06-24 04:58:14 +00:00
|
|
|
|
2020-07-28 08:23:16 +00:00
|
|
|
pub fn load_blob(&self, backup_dir: &BackupDir, filename: &str) -> Result<DataBlob, Error> {
|
2022-04-24 16:03:27 +00:00
|
|
|
let mut path = backup_dir.full_path();
|
2020-06-24 04:58:14 +00:00
|
|
|
path.push(filename);
|
|
|
|
|
2021-10-08 09:19:37 +00:00
|
|
|
proxmox_lang::try_block!({
|
2020-07-28 08:23:16 +00:00
|
|
|
let mut file = std::fs::File::open(&path)?;
|
|
|
|
DataBlob::load_from_reader(&mut file)
|
2022-04-14 11:27:53 +00:00
|
|
|
})
|
|
|
|
.map_err(|err| format_err!("unable to load blob '{:?}' - {}", path, err))
|
2020-07-28 08:23:16 +00:00
|
|
|
}
|
2020-07-31 05:19:14 +00:00
|
|
|
|
2021-04-13 14:35:36 +00:00
|
|
|
pub fn stat_chunk(&self, digest: &[u8; 32]) -> Result<std::fs::Metadata, Error> {
|
2022-04-12 05:25:58 +00:00
|
|
|
let (chunk_path, _digest_str) = self.inner.chunk_store.chunk_path(digest);
|
2021-04-13 14:35:36 +00:00
|
|
|
std::fs::metadata(chunk_path).map_err(Error::from)
|
|
|
|
}
|
|
|
|
|
2020-07-28 08:23:16 +00:00
|
|
|
pub fn load_chunk(&self, digest: &[u8; 32]) -> Result<DataBlob, Error> {
|
2022-04-12 05:25:58 +00:00
|
|
|
let (chunk_path, digest_str) = self.inner.chunk_store.chunk_path(digest);
|
2020-07-28 08:23:16 +00:00
|
|
|
|
2021-10-08 09:19:37 +00:00
|
|
|
proxmox_lang::try_block!({
|
2020-07-28 08:23:16 +00:00
|
|
|
let mut file = std::fs::File::open(&chunk_path)?;
|
|
|
|
DataBlob::load_from_reader(&mut file)
|
2022-04-14 11:27:53 +00:00
|
|
|
})
|
|
|
|
.map_err(|err| {
|
|
|
|
format_err!(
|
|
|
|
"store '{}', unable to load chunk '{}' - {}",
|
|
|
|
self.name(),
|
|
|
|
digest_str,
|
|
|
|
err,
|
|
|
|
)
|
|
|
|
})
|
2020-10-16 07:31:12 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Load the manifest without a lock. Must not be written back.
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn load_manifest(&self, backup_dir: &BackupDir) -> Result<(BackupManifest, u64), Error> {
|
2020-07-28 08:23:16 +00:00
|
|
|
let blob = self.load_blob(backup_dir, MANIFEST_BLOB_NAME)?;
|
|
|
|
let raw_size = blob.raw_size();
|
2020-06-24 04:58:14 +00:00
|
|
|
let manifest = BackupManifest::try_from(blob)?;
|
2020-07-31 08:25:30 +00:00
|
|
|
Ok((manifest, raw_size))
|
2020-06-24 04:58:14 +00:00
|
|
|
}
|
2020-07-31 05:19:14 +00:00
|
|
|
|
2020-10-16 07:31:12 +00:00
|
|
|
/// Update the manifest of the specified snapshot. Never write a manifest directly,
|
|
|
|
/// only use this method - anything else may break locking guarantees.
|
|
|
|
pub fn update_manifest(
|
2020-07-31 05:19:14 +00:00
|
|
|
&self,
|
|
|
|
backup_dir: &BackupDir,
|
2020-10-16 07:31:12 +00:00
|
|
|
update_fn: impl FnOnce(&mut BackupManifest),
|
2020-07-31 05:19:14 +00:00
|
|
|
) -> Result<(), Error> {
|
2022-04-24 16:37:15 +00:00
|
|
|
let _guard = backup_dir.lock_manifest()?;
|
2021-12-30 11:57:37 +00:00
|
|
|
let (mut manifest, _) = self.load_manifest(backup_dir)?;
|
2020-10-16 07:31:12 +00:00
|
|
|
|
|
|
|
update_fn(&mut manifest);
|
|
|
|
|
2020-10-14 12:16:35 +00:00
|
|
|
let manifest = serde_json::to_value(manifest)?;
|
2020-07-31 05:19:14 +00:00
|
|
|
let manifest = serde_json::to_string_pretty(&manifest)?;
|
|
|
|
let blob = DataBlob::encode(manifest.as_bytes(), None, true)?;
|
|
|
|
let raw_data = blob.raw_data();
|
|
|
|
|
2022-04-24 16:03:27 +00:00
|
|
|
let mut path = backup_dir.full_path();
|
2020-07-31 05:19:14 +00:00
|
|
|
path.push(MANIFEST_BLOB_NAME);
|
|
|
|
|
2020-10-16 07:31:12 +00:00
|
|
|
// atomic replace invalidates flock - no other writes past this point!
|
2021-10-20 12:56:15 +00:00
|
|
|
replace_file(&path, raw_data, CreateOptions::new(), false)?;
|
2020-07-31 05:19:14 +00:00
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-10-20 08:08:25 +00:00
|
|
|
|
2021-10-27 11:22:33 +00:00
|
|
|
/// Updates the protection status of the specified snapshot.
|
2022-04-14 11:27:53 +00:00
|
|
|
pub fn update_protection(&self, backup_dir: &BackupDir, protection: bool) -> Result<(), Error> {
|
2022-04-20 13:30:04 +00:00
|
|
|
let full_path = backup_dir.full_path();
|
2021-10-27 11:22:33 +00:00
|
|
|
|
|
|
|
let _guard = lock_dir_noblock(&full_path, "snapshot", "possibly running or in use")?;
|
|
|
|
|
2022-04-20 13:30:04 +00:00
|
|
|
let protected_path = backup_dir.protected_file();
|
2021-10-27 11:22:33 +00:00
|
|
|
if protection {
|
|
|
|
std::fs::File::create(protected_path)
|
|
|
|
.map_err(|err| format_err!("could not create protection file: {}", err))?;
|
|
|
|
} else if let Err(err) = std::fs::remove_file(protected_path) {
|
|
|
|
// ignore error for non-existing file
|
|
|
|
if err.kind() != std::io::ErrorKind::NotFound {
|
|
|
|
bail!("could not remove protection file: {}", err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-10-20 08:08:25 +00:00
|
|
|
pub fn verify_new(&self) -> bool {
|
2022-04-12 05:25:58 +00:00
|
|
|
self.inner.verify_new
|
2020-10-20 08:08:25 +00:00
|
|
|
}
|
2021-06-18 09:29:09 +00:00
|
|
|
|
|
|
|
/// returns a list of chunks sorted by their inode number on disk
|
|
|
|
/// chunks that could not be stat'ed are at the end of the list
|
|
|
|
pub fn get_chunks_in_order<F, A>(
|
|
|
|
&self,
|
|
|
|
index: &Box<dyn IndexFile + Send>,
|
|
|
|
skip_chunk: F,
|
|
|
|
check_abort: A,
|
|
|
|
) -> Result<Vec<(usize, u64)>, Error>
|
|
|
|
where
|
|
|
|
F: Fn(&[u8; 32]) -> bool,
|
|
|
|
A: Fn(usize) -> Result<(), Error>,
|
|
|
|
{
|
|
|
|
let index_count = index.index_count();
|
|
|
|
let mut chunk_list = Vec::with_capacity(index_count);
|
|
|
|
use std::os::unix::fs::MetadataExt;
|
|
|
|
for pos in 0..index_count {
|
|
|
|
check_abort(pos)?;
|
|
|
|
|
|
|
|
let info = index.chunk_info(pos).unwrap();
|
|
|
|
|
|
|
|
if skip_chunk(&info.digest) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-04-12 05:25:58 +00:00
|
|
|
let ino = match self.inner.chunk_order {
|
2022-02-22 14:57:49 +00:00
|
|
|
ChunkOrder::Inode => {
|
|
|
|
match self.stat_chunk(&info.digest) {
|
|
|
|
Err(_) => u64::MAX, // could not stat, move to end of list
|
|
|
|
Ok(metadata) => metadata.ino(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ChunkOrder::None => 0,
|
2021-06-18 09:29:09 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
chunk_list.push((pos, ino));
|
|
|
|
}
|
|
|
|
|
2022-04-12 05:25:58 +00:00
|
|
|
match self.inner.chunk_order {
|
2022-02-22 14:57:49 +00:00
|
|
|
// sorting by inode improves data locality, which makes it lots faster on spinners
|
|
|
|
ChunkOrder::Inode => {
|
|
|
|
chunk_list.sort_unstable_by(|(_, ino_a), (_, ino_b)| ino_a.cmp(ino_b))
|
|
|
|
}
|
|
|
|
ChunkOrder::None => {}
|
|
|
|
}
|
2021-06-18 09:29:09 +00:00
|
|
|
|
|
|
|
Ok(chunk_list)
|
|
|
|
}
|
2022-04-19 08:38:46 +00:00
|
|
|
|
2022-04-20 11:24:57 +00:00
|
|
|
/// Open a backup group from this datastore.
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn backup_group(self: &Arc<Self>, group: pbs_api_types::BackupGroup) -> BackupGroup {
|
|
|
|
BackupGroup::new(Arc::clone(&self), group)
|
2022-04-19 08:38:46 +00:00
|
|
|
}
|
|
|
|
|
2022-04-20 11:24:57 +00:00
|
|
|
/// Open a backup group from this datastore.
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn backup_group_from_parts<T>(self: &Arc<Self>, ty: BackupType, id: T) -> BackupGroup
|
2022-04-20 11:24:57 +00:00
|
|
|
where
|
|
|
|
T: Into<String>,
|
|
|
|
{
|
|
|
|
self.backup_group((ty, id.into()).into())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Open a backup group from this datastore by backup group path such as `vm/100`.
|
|
|
|
///
|
|
|
|
/// Convenience method for `store.backup_group(path.parse()?)`
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn backup_group_from_path(self: &Arc<Self>, path: &str) -> Result<BackupGroup, Error> {
|
2022-04-20 11:24:57 +00:00
|
|
|
Ok(self.backup_group(path.parse()?))
|
2022-04-19 08:38:46 +00:00
|
|
|
}
|
|
|
|
|
2022-04-20 11:24:57 +00:00
|
|
|
/// Open a snapshot (backup directory) from this datastore.
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn backup_dir(self: &Arc<Self>, dir: pbs_api_types::BackupDir) -> Result<BackupDir, Error> {
|
2022-04-20 11:24:57 +00:00
|
|
|
BackupDir::with_group(self.backup_group(dir.group), dir.time)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Open a snapshot (backup directory) from this datastore.
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn backup_dir_from_parts<T>(
|
2022-04-20 13:30:04 +00:00
|
|
|
self: &Arc<Self>,
|
2022-04-19 08:38:46 +00:00
|
|
|
ty: BackupType,
|
|
|
|
id: T,
|
|
|
|
time: i64,
|
|
|
|
) -> Result<BackupDir, Error>
|
|
|
|
where
|
|
|
|
T: Into<String>,
|
|
|
|
{
|
2022-04-20 11:24:57 +00:00
|
|
|
self.backup_dir((ty, id.into(), time).into())
|
2022-04-19 08:38:46 +00:00
|
|
|
}
|
|
|
|
|
2022-04-20 11:24:57 +00:00
|
|
|
/// Open a snapshot (backup directory) from this datastore with a cached rfc3339 time string.
|
2022-04-19 08:38:46 +00:00
|
|
|
pub fn backup_dir_with_rfc3339<T: Into<String>>(
|
2022-04-20 13:30:04 +00:00
|
|
|
self: &Arc<Self>,
|
2022-04-19 08:38:46 +00:00
|
|
|
group: BackupGroup,
|
|
|
|
time_string: T,
|
|
|
|
) -> Result<BackupDir, Error> {
|
|
|
|
BackupDir::with_rfc3339(group, time_string.into())
|
|
|
|
}
|
|
|
|
|
2022-04-20 11:24:57 +00:00
|
|
|
/// Open a snapshot (backup directory) from this datastore by a snapshot path.
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn backup_dir_from_path(self: &Arc<Self>, path: &str) -> Result<BackupDir, Error> {
|
2022-04-20 11:24:57 +00:00
|
|
|
self.backup_dir(path.parse()?)
|
2022-04-19 08:38:46 +00:00
|
|
|
}
|
2018-12-17 12:00:39 +00:00
|
|
|
}
|
2022-04-15 09:02:36 +00:00
|
|
|
|
2022-04-15 10:24:56 +00:00
|
|
|
/// A iterator for all BackupDir's (Snapshots) in a BackupGroup
|
|
|
|
pub struct ListSnapshots {
|
|
|
|
group: BackupGroup,
|
|
|
|
fd: proxmox_sys::fs::ReadDir,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ListSnapshots {
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn new(group: BackupGroup) -> Result<Self, Error> {
|
2022-04-24 15:58:12 +00:00
|
|
|
let group_path = group.full_group_path();
|
2022-04-15 10:24:56 +00:00
|
|
|
Ok(ListSnapshots {
|
2022-04-24 15:58:12 +00:00
|
|
|
fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &group_path)
|
|
|
|
.map_err(|err| format_err!("read dir {group_path:?} - {err}"))?,
|
2022-04-15 10:24:56 +00:00
|
|
|
group,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Iterator for ListSnapshots {
|
|
|
|
type Item = Result<BackupDir, Error>;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
loop {
|
2022-04-24 16:06:17 +00:00
|
|
|
let item = self.fd.next()?; // either get a entry to check or return None if exhausted
|
|
|
|
let entry = match item {
|
2022-04-15 10:24:56 +00:00
|
|
|
Ok(ref entry) => {
|
2022-04-24 16:06:17 +00:00
|
|
|
match entry.file_type() {
|
|
|
|
Some(nix::dir::Type::Directory) => entry, // OK
|
|
|
|
_ => continue,
|
2022-04-15 10:24:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(err) => return Some(Err(err)),
|
2022-04-24 16:06:17 +00:00
|
|
|
};
|
|
|
|
if let Ok(name) = entry.file_name().to_str() {
|
|
|
|
if BACKUP_DATE_REGEX.is_match(name) {
|
|
|
|
let backup_time = match proxmox_time::parse_rfc3339(&name) {
|
|
|
|
Ok(time) => time,
|
|
|
|
Err(err) => return Some(Err(err)),
|
|
|
|
};
|
|
|
|
|
|
|
|
return Some(BackupDir::with_group(self.group.clone(), backup_time));
|
|
|
|
}
|
2022-04-15 10:24:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-15 09:02:36 +00:00
|
|
|
/// A iterator for a (single) level of Backup Groups
|
|
|
|
pub struct ListGroups {
|
2022-04-20 13:30:04 +00:00
|
|
|
store: Arc<DataStore>,
|
2022-04-15 09:02:36 +00:00
|
|
|
type_fd: proxmox_sys::fs::ReadDir,
|
2022-04-14 13:05:58 +00:00
|
|
|
id_state: Option<(BackupType, proxmox_sys::fs::ReadDir)>,
|
2022-04-15 09:02:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ListGroups {
|
2022-04-20 13:30:04 +00:00
|
|
|
pub fn new(store: Arc<DataStore>) -> Result<Self, Error> {
|
2022-04-15 09:02:36 +00:00
|
|
|
Ok(ListGroups {
|
2022-04-20 13:30:04 +00:00
|
|
|
type_fd: proxmox_sys::fs::read_subdir(libc::AT_FDCWD, &store.base_path())?,
|
|
|
|
store,
|
2022-04-15 09:02:36 +00:00
|
|
|
id_state: None,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Iterator for ListGroups {
|
|
|
|
type Item = Result<BackupGroup, Error>;
|
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
loop {
|
2022-04-14 13:05:58 +00:00
|
|
|
if let Some((group_type, ref mut id_fd)) = self.id_state {
|
2022-04-15 09:02:36 +00:00
|
|
|
let item = match id_fd.next() {
|
|
|
|
Some(item) => item,
|
|
|
|
None => {
|
|
|
|
self.id_state = None;
|
|
|
|
continue; // exhausted all IDs for the current group type, try others
|
|
|
|
}
|
|
|
|
};
|
2022-04-24 16:06:17 +00:00
|
|
|
let entry = match item {
|
2022-04-15 09:02:36 +00:00
|
|
|
Ok(ref entry) => {
|
2022-04-24 16:06:17 +00:00
|
|
|
match entry.file_type() {
|
|
|
|
Some(nix::dir::Type::Directory) => entry, // OK
|
|
|
|
_ => continue,
|
2022-04-15 09:02:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(err) => return Some(Err(err)),
|
2022-04-24 16:06:17 +00:00
|
|
|
};
|
|
|
|
if let Ok(name) = entry.file_name().to_str() {
|
|
|
|
if BACKUP_ID_REGEX.is_match(name) {
|
|
|
|
return Some(Ok(BackupGroup::new(
|
|
|
|
Arc::clone(&self.store),
|
|
|
|
(group_type, name.to_owned()).into(),
|
|
|
|
)));
|
|
|
|
}
|
2022-04-15 09:02:36 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
let item = self.type_fd.next()?;
|
2022-04-24 16:06:17 +00:00
|
|
|
let entry = match item {
|
|
|
|
// filter directories
|
2022-04-15 09:02:36 +00:00
|
|
|
Ok(ref entry) => {
|
2022-04-24 16:06:17 +00:00
|
|
|
match entry.file_type() {
|
|
|
|
Some(nix::dir::Type::Directory) => entry, // OK
|
|
|
|
_ => continue,
|
2022-04-15 09:02:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Err(err) => return Some(Err(err)),
|
2022-04-24 16:06:17 +00:00
|
|
|
};
|
|
|
|
if let Ok(name) = entry.file_name().to_str() {
|
|
|
|
if let Ok(group_type) = BackupType::from_str(name) {
|
|
|
|
// found a backup group type, descend into it to scan all IDs in it
|
|
|
|
// by switching to the id-state branch
|
|
|
|
let base_fd = entry.parent_fd();
|
|
|
|
let id_dirfd = match proxmox_sys::fs::read_subdir(base_fd, name) {
|
|
|
|
Ok(dirfd) => dirfd,
|
|
|
|
Err(err) => return Some(Err(err.into())),
|
|
|
|
};
|
|
|
|
self.id_state = Some((group_type, id_dirfd));
|
|
|
|
}
|
2022-04-15 09:02:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|