make datastore BackupGroup/Dir ctors private

And use the api-types for their contents.

These are supposed to be instances for a datastore, the pure
specifications are the ones in pbs_api_types which should be
preferred in crates like clients which do not need to deal
with the datastore directly.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
This commit is contained in:
Wolfgang Bumiller
2022-04-19 10:38:46 +02:00
parent 38aa71fcc8
commit db87d93efc
24 changed files with 440 additions and 408 deletions

View File

@ -63,16 +63,16 @@ use crate::server::jobstate::Job;
const GROUP_NOTES_FILE_NAME: &str = "notes";
fn get_group_note_path(store: &DataStore, group: &BackupGroup) -> PathBuf {
fn get_group_note_path(store: &DataStore, group: &pbs_api_types::BackupGroup) -> PathBuf {
let mut note_path = store.base_path();
note_path.push(group.relative_group_path());
note_path.push(group.to_string());
note_path.push(GROUP_NOTES_FILE_NAME);
note_path
}
fn check_priv_or_backup_owner(
store: &DataStore,
group: &BackupGroup,
group: &pbs_api_types::BackupGroup,
auth_id: &Authid,
required_privs: u64,
) -> Result<(), Error> {
@ -170,7 +170,7 @@ pub fn list_groups(
.iter_backup_groups()?
.try_fold(Vec::new(), |mut group_info, group| {
let group = group?;
let owner = match datastore.get_owner(&group) {
let owner = match datastore.get_owner(group.as_ref()) {
Ok(auth_id) => auth_id,
Err(err) => {
let id = &store;
@ -203,7 +203,7 @@ pub fn list_groups(
})
.to_owned();
let note_path = get_group_note_path(&datastore, &group);
let note_path = get_group_note_path(&datastore, group.as_ref());
let comment = file_read_firstline(&note_path).ok();
group_info.push(GroupListItem {
@ -244,7 +244,7 @@ pub fn delete_group(
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let group = BackupGroup::new(backup_type, backup_id);
let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
@ -285,11 +285,11 @@ pub fn list_snapshot_files(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let snapshot = datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
snapshot.group(),
snapshot.as_ref(),
&auth_id,
PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ,
)?;
@ -328,17 +328,17 @@ pub fn delete_snapshot(
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let snapshot = BackupDir::new(backup_type, backup_id, backup_time)?;
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let snapshot = datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
snapshot.group(),
snapshot.as_ref(),
&auth_id,
PRIV_DATASTORE_MODIFY,
)?;
datastore.remove_backup_dir(&snapshot, false)?;
datastore.remove_backup_dir(snapshot.as_ref(), false)?;
Ok(Value::Null)
}
@ -386,7 +386,9 @@ pub fn list_snapshots(
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
// backup group and provide an error free (Err -> None) accessor
let groups = match (backup_type, backup_id) {
(Some(backup_type), Some(backup_id)) => vec![BackupGroup::new(backup_type, backup_id)],
(Some(backup_type), Some(backup_id)) => {
vec![datastore.backup_group(backup_type, backup_id)]
}
(Some(backup_type), None) => datastore
.iter_backup_groups_ok()?
.filter(|group| group.backup_type() == backup_type)
@ -471,7 +473,7 @@ pub fn list_snapshots(
};
groups.iter().try_fold(Vec::new(), |mut snapshots, group| {
let owner = match datastore.get_owner(group) {
let owner = match datastore.get_owner(group.as_ref()) {
Ok(auth_id) => auth_id,
Err(err) => {
eprintln!(
@ -502,7 +504,7 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
store
.iter_backup_groups_ok()?
.filter(|group| {
let owner = match store.get_owner(group) {
let owner = match store.get_owner(group.as_ref()) {
Ok(owner) => owner,
Err(err) => {
let id = store.name();
@ -658,20 +660,20 @@ pub fn verify(
"{}:{}/{}/{:08X}",
store, backup_type, backup_id, backup_time
);
let dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(&datastore, dir.group(), &auth_id, PRIV_DATASTORE_VERIFY)?;
check_priv_or_backup_owner(&datastore, dir.as_ref(), &auth_id, PRIV_DATASTORE_VERIFY)?;
backup_dir = Some(dir);
worker_type = "verify_snapshot";
}
(Some(backup_type), Some(backup_id), None) => {
worker_id = format!("{}:{}/{}", store, backup_type, backup_id);
let group = BackupGroup::new(backup_type, backup_id);
let group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_VERIFY)?;
backup_group = Some(group);
backup_group = Some(datastore.backup_group_from_spec(group));
worker_type = "verify_group";
}
(None, None, None) => {
@ -776,11 +778,11 @@ pub fn prune(
) -> Result<Value, Error> {
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let group = BackupGroup::new(backup_type, &backup_id);
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
let group = datastore.backup_group(backup_type, &backup_id);
check_priv_or_backup_owner(&datastore, group.as_ref(), &auth_id, PRIV_DATASTORE_MODIFY)?;
let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
@ -798,13 +800,10 @@ pub fn prune(
for (info, mark) in prune_info {
let keep = keep_all || mark.keep();
let backup_time = info.backup_dir.backup_time();
let group = info.backup_dir.group();
prune_result.push(json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
"backup-time": backup_time,
"backup-type": info.backup_dir.backup_type(),
"backup-id": info.backup_dir.backup_id(),
"backup-time": info.backup_dir.backup_time(),
"keep": keep,
"protected": mark.protected(),
}));
@ -837,28 +836,22 @@ pub fn prune(
let backup_time = info.backup_dir.backup_time();
let timestamp = info.backup_dir.backup_time_string();
let group = info.backup_dir.group();
let group: &pbs_api_types::BackupGroup = info.backup_dir.as_ref();
let msg = format!(
"{}/{}/{} {}",
group.backup_type(),
group.backup_id(),
timestamp,
mark,
);
let msg = format!("{}/{}/{} {}", group.ty, group.id, timestamp, mark,);
task_log!(worker, "{}", msg);
prune_result.push(json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
"backup-type": group.ty,
"backup-id": group.id,
"backup-time": backup_time,
"keep": keep,
"protected": mark.protected(),
}));
if !(dry_run || keep) {
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
task_warn!(
worker,
"failed to remove dir {:?}: {}",
@ -1079,14 +1072,14 @@ pub fn download_file(
let file_name = required_string_param(&param, "file-name")?.to_owned();
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
let backup_id = required_string_param(&param, "backup-id")?;
let backup_id = required_string_param(&param, "backup-id")?.to_owned();
let backup_time = required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_READ,
)?;
@ -1162,14 +1155,14 @@ pub fn download_file_decoded(
let file_name = required_string_param(&param, "file-name")?.to_owned();
let backup_type: BackupType = required_string_param(&param, "backup-type")?.parse()?;
let backup_id = required_string_param(&param, "backup-id")?;
let backup_id = required_string_param(&param, "backup-id")?.to_owned();
let backup_time = required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_READ,
)?;
@ -1291,10 +1284,10 @@ pub fn upload_backup_log(
let backup_id = required_string_param(&param, "backup-id")?;
let backup_time = required_integer_param(&param, "backup-time")?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let owner = datastore.get_owner(backup_dir.group())?;
let owner = datastore.get_owner(backup_dir.as_ref())?;
check_backup_owner(&owner, &auth_id)?;
let mut path = datastore.base_path();
@ -1363,11 +1356,12 @@ pub fn catalog(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir =
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_READ,
)?;
@ -1446,11 +1440,12 @@ pub fn pxar_file_download(
let tar = param["tar"].as_bool().unwrap_or(false);
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir = datastore
.backup_dir_from_spec((backup_type, backup_id.to_owned(), backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_READ,
)?;
@ -1637,7 +1632,7 @@ pub fn get_group_notes(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_group = BackupGroup::new(backup_type, backup_id);
let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_AUDIT)?;
@ -1673,7 +1668,7 @@ pub fn set_group_notes(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_group = BackupGroup::new(backup_type, backup_id);
let backup_group = pbs_api_types::BackupGroup::from((backup_type, backup_id));
check_priv_or_backup_owner(&datastore, &backup_group, &auth_id, PRIV_DATASTORE_MODIFY)?;
@ -1707,11 +1702,12 @@ pub fn get_notes(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir =
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_AUDIT,
)?;
@ -1753,11 +1749,12 @@ pub fn set_notes(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir =
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_MODIFY,
)?;
@ -1795,11 +1792,12 @@ pub fn get_protection(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir =
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_AUDIT,
)?;
@ -1837,11 +1835,12 @@ pub fn set_protection(
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir =
datastore.backup_dir_from_spec((backup_type, backup_id, backup_time).into())?;
check_priv_or_backup_owner(
&datastore,
backup_dir.group(),
backup_dir.as_ref(),
&auth_id,
PRIV_DATASTORE_MODIFY,
)?;
@ -1875,7 +1874,7 @@ pub fn set_backup_owner(
) -> Result<(), Error> {
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
let backup_group = BackupGroup::new(backup_type, backup_id);
let backup_group = datastore.backup_group(backup_type, backup_id);
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
@ -1887,7 +1886,7 @@ pub fn set_backup_owner(
// High-privilege user/token
true
} else if (privs & PRIV_DATASTORE_BACKUP) != 0 {
let owner = datastore.get_owner(&backup_group)?;
let owner = datastore.get_owner(backup_group.as_ref())?;
match (owner.is_token(), new_owner.is_token()) {
(true, true) => {
@ -1935,7 +1934,7 @@ pub fn set_backup_owner(
);
}
datastore.set_owner(&backup_group, &new_owner, true)?;
datastore.set_owner(backup_group.as_ref(), &new_owner, true)?;
Ok(())
}

View File

@ -614,7 +614,7 @@ impl BackupEnvironment {
.map_err(|err| format_err!("unable to update manifest blob - {}", err))?;
if let Some(base) = &self.last_backup {
let path = self.datastore.snapshot_path(&base.backup_dir);
let path = self.datastore.snapshot_path(base.backup_dir.as_ref());
if !path.exists() {
bail!(
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
@ -643,8 +643,8 @@ impl BackupEnvironment {
let worker_id = format!(
"{}:{}/{}/{:08X}",
self.datastore.name(),
self.backup_dir.group().backup_type(),
self.backup_dir.group().backup_id(),
self.backup_dir.backup_type(),
self.backup_dir.backup_id(),
self.backup_dir.backup_time()
);
@ -710,7 +710,8 @@ impl BackupEnvironment {
let mut state = self.state.lock().unwrap();
state.finished = true;
self.datastore.remove_backup_dir(&self.backup_dir, true)?;
self.datastore
.remove_backup_dir(self.backup_dir.as_ref(), true)?;
Ok(())
}

View File

@ -21,7 +21,6 @@ use pbs_api_types::{
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::{BackupDir, BackupGroup};
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType};
use pbs_datastore::{DataStore, PROXMOX_BACKUP_PROTOCOL_ID_V1};
@ -107,7 +106,7 @@ fn upgrade_to_backup_protocol(
let env_type = rpcenv.env_type();
let backup_group = BackupGroup::new(backup_type, backup_id);
let backup_group = datastore.backup_group(backup_type, backup_id);
let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
if !benchmark {
@ -123,7 +122,7 @@ fn upgrade_to_backup_protocol(
// lock backup group to only allow one backup per group at a time
let (owner, _group_guard) =
datastore.create_locked_backup_group(&backup_group, &auth_id)?;
datastore.create_locked_backup_group(backup_group.as_ref(), &auth_id)?;
// permission check
let correct_owner =
@ -155,7 +154,7 @@ fn upgrade_to_backup_protocol(
}
};
let backup_dir = BackupDir::with_group(backup_group, backup_time)?;
let backup_dir = backup_group.backup_dir(backup_time)?;
let _last_guard = if let Some(last) = &last_backup {
if backup_dir.backup_time() <= last.backup_dir.backup_time() {
@ -163,7 +162,7 @@ fn upgrade_to_backup_protocol(
}
// lock last snapshot to prevent forgetting/pruning it during backup
let full_path = datastore.snapshot_path(&last.backup_dir);
let full_path = datastore.snapshot_path(last.backup_dir.as_ref());
Some(lock_dir_noblock_shared(
&full_path,
"snapshot",
@ -173,7 +172,7 @@ fn upgrade_to_backup_protocol(
None
};
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(&backup_dir)?;
let (path, is_new, snap_guard) = datastore.create_locked_backup_dir(backup_dir.as_ref())?;
if !is_new {
bail!("backup directory already exists.");
}
@ -812,7 +811,7 @@ fn download_previous(
None => bail!("no valid previous backup"),
};
let mut path = env.datastore.snapshot_path(&last_backup.backup_dir);
let mut path = env.datastore.snapshot_path(last_backup.backup_dir.as_ref());
path.push(&archive_name);
{

View File

@ -21,7 +21,6 @@ use pbs_api_types::{
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::BackupDir;
use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{archive_type, ArchiveType};
use pbs_datastore::{DataStore, PROXMOX_BACKUP_READER_PROTOCOL_ID_V1};
@ -113,9 +112,9 @@ fn upgrade_to_backup_reader_protocol(
let env_type = rpcenv.env_type();
let backup_dir = BackupDir::new(backup_type, backup_id, backup_time)?;
let backup_dir = datastore.backup_dir_from_parts(backup_type, backup_id, backup_time)?;
if !priv_read {
let owner = datastore.get_owner(backup_dir.group())?;
let owner = datastore.get_owner(backup_dir.as_ref())?;
let correct_owner = owner == auth_id
|| (owner.is_token() && Authid::from(owner.user().clone()) == auth_id);
if !correct_owner {
@ -124,7 +123,7 @@ fn upgrade_to_backup_reader_protocol(
}
let _guard = lock_dir_noblock_shared(
&datastore.snapshot_path(&backup_dir),
&backup_dir.full_path(datastore.base_path()),
"snapshot",
"locked by another operation",
)?;

View File

@ -576,7 +576,7 @@ pub fn backup_snapshot(
) -> Result<bool, Error> {
task_log!(worker, "backup snapshot {}", snapshot);
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
let snapshot_reader = match SnapshotReader::new(datastore.clone(), (&snapshot).into()) {
Ok(reader) => reader,
Err(err) => {
// ignore missing snapshots and continue

View File

@ -13,7 +13,6 @@ use pbs_api_types::{
MEDIA_POOL_NAME_SCHEMA, MEDIA_UUID_SCHEMA, PRIV_TAPE_AUDIT, VAULT_NAME_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::BackupDir;
use crate::tape::{
changer::update_online_status, media_catalog_snapshot_list, Inventory, MediaCatalog, MediaPool,
@ -439,15 +438,15 @@ pub fn list_content(
.unwrap_or_else(|_| set.uuid.to_string());
for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
let backup_dir: BackupDir = snapshot.parse()?;
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
if let Some(backup_type) = filter.backup_type {
if backup_dir.group().backup_type() != backup_type {
if backup_dir.ty() != backup_type {
continue;
}
}
if let Some(ref backup_id) = filter.backup_id {
if backup_dir.group().backup_id() != backup_id {
if backup_dir.id() != backup_id {
continue;
}
}
@ -462,7 +461,7 @@ pub fn list_content(
seq_nr: set.seq_nr,
snapshot: snapshot.to_owned(),
store: store.to_owned(),
backup_time: backup_dir.backup_time(),
backup_time: backup_dir.time,
});
}
}

View File

@ -22,7 +22,6 @@ use pbs_api_types::{
TAPE_RESTORE_SNAPSHOT_SCHEMA, UPID_SCHEMA,
};
use pbs_config::CachedUserInfo;
use pbs_datastore::backup_info::BackupDir;
use pbs_datastore::dynamic_index::DynamicIndexReader;
use pbs_datastore::fixed_index::FixedIndexReader;
use pbs_datastore::index::IndexFile;
@ -423,7 +422,7 @@ fn restore_list_worker(
let snapshot = split
.next()
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
let backup_dir: BackupDir = snapshot.parse()?;
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
format_err!(
@ -433,7 +432,7 @@ fn restore_list_worker(
})?;
let (owner, _group_lock) =
datastore.create_locked_backup_group(backup_dir.group(), restore_owner)?;
datastore.create_locked_backup_group(backup_dir.as_ref(), restore_owner)?;
if restore_owner != &owner {
// only the owner is allowed to create additional snapshots
bail!(
@ -577,7 +576,7 @@ fn restore_list_worker(
let snapshot = split
.next()
.ok_or_else(|| format_err!("invalid snapshot:{}", store_snapshot))?;
let backup_dir: BackupDir = snapshot.parse()?;
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
let datastore = store_map.get_datastore(source_datastore).ok_or_else(|| {
format_err!("unexpected source datastore: {}", source_datastore)
@ -1037,12 +1036,12 @@ fn restore_archive<'a>(
snapshot
);
let backup_dir: BackupDir = snapshot.parse()?;
let backup_dir: pbs_api_types::BackupDir = snapshot.parse()?;
if let Some((store_map, authid)) = target.as_ref() {
if let Some(datastore) = store_map.get_datastore(&datastore_name) {
let (owner, _group_lock) =
datastore.create_locked_backup_group(backup_dir.group(), authid)?;
datastore.create_locked_backup_group(backup_dir.as_ref(), authid)?;
if *authid != &owner {
// only the owner is allowed to create additional snapshots
bail!(
@ -1054,7 +1053,7 @@ fn restore_archive<'a>(
}
let (rel_path, is_new, _snap_lock) =
datastore.create_locked_backup_dir(&backup_dir)?;
datastore.create_locked_backup_dir(backup_dir.as_ref())?;
let mut path = datastore.base_path();
path.push(rel_path);

View File

@ -328,7 +328,7 @@ pub fn verify_backup_dir(
filter: Option<&dyn Fn(&BackupManifest) -> bool>,
) -> Result<bool, Error> {
let snap_lock = lock_dir_noblock_shared(
&verify_worker.datastore.snapshot_path(backup_dir),
&verify_worker.datastore.snapshot_path(backup_dir.as_ref()),
"snapshot",
"locked by another operation",
);
@ -514,7 +514,7 @@ pub fn verify_all_backups(
}
let filter_by_owner = |group: &BackupGroup| {
match (verify_worker.datastore.get_owner(group), &owner) {
match (verify_worker.datastore.get_owner(group.as_ref()), &owner) {
(Ok(ref group_owner), Some(owner)) => {
group_owner == owner
|| (group_owner.is_token()
@ -530,7 +530,7 @@ pub fn verify_all_backups(
}
(Err(err), None) => {
// we don't filter by owner, but we want to log the error
task_log!(worker, "Failed to get owner of group '{} - {}", group, err,);
task_log!(worker, "Failed to get owner of group '{} - {}", group, err);
errors.push(group.to_string());
true
}

View File

@ -46,7 +46,7 @@ pub fn prune_datastore(
let group = group?;
let list = group.list_backups(&datastore.base_path())?;
if !has_privs && !datastore.owns_backup(&group, &auth_id)? {
if !has_privs && !datastore.owns_backup(group.as_ref(), &auth_id)? {
continue;
}
@ -72,7 +72,7 @@ pub fn prune_datastore(
info.backup_dir.backup_time_string()
);
if !keep && !dry_run {
if let Err(err) = datastore.remove_backup_dir(&info.backup_dir, false) {
if let Err(err) = datastore.remove_backup_dir(info.backup_dir.as_ref(), false) {
task_warn!(
worker,
"failed to remove dir {:?}: {}",

View File

@ -28,7 +28,7 @@ use pbs_datastore::index::IndexFile;
use pbs_datastore::manifest::{
archive_type, ArchiveType, BackupManifest, FileInfo, CLIENT_LOG_BLOB_NAME, MANIFEST_BLOB_NAME,
};
use pbs_datastore::{BackupDir, BackupGroup, DataStore, StoreProgress};
use pbs_datastore::{DataStore, StoreProgress};
use pbs_tools::sha::sha256;
use proxmox_rest_server::WorkerTask;
@ -223,13 +223,13 @@ async fn pull_single_archive(
reader: &BackupReader,
chunk_reader: &mut RemoteChunkReader,
tgt_store: Arc<DataStore>,
snapshot: &BackupDir,
snapshot: &pbs_api_types::BackupDir,
archive_info: &FileInfo,
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<(), Error> {
let archive_name = &archive_info.filename;
let mut path = tgt_store.base_path();
path.push(snapshot.relative_path());
path.push(snapshot.to_string());
path.push(archive_name);
let mut tmp_path = path.clone();
@ -321,15 +321,17 @@ async fn pull_snapshot(
worker: &WorkerTask,
reader: Arc<BackupReader>,
tgt_store: Arc<DataStore>,
snapshot: &BackupDir,
snapshot: &pbs_api_types::BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<(), Error> {
let snapshot_relative_path = snapshot.to_string();
let mut manifest_name = tgt_store.base_path();
manifest_name.push(snapshot.relative_path());
manifest_name.push(&snapshot_relative_path);
manifest_name.push(MANIFEST_BLOB_NAME);
let mut client_log_name = tgt_store.base_path();
client_log_name.push(snapshot.relative_path());
client_log_name.push(&snapshot_relative_path);
client_log_name.push(CLIENT_LOG_BLOB_NAME);
let mut tmp_manifest_name = manifest_name.clone();
@ -396,7 +398,7 @@ async fn pull_snapshot(
for item in manifest.files() {
let mut path = tgt_store.base_path();
path.push(snapshot.relative_path());
path.push(&snapshot_relative_path);
path.push(&item.filename);
if path.exists() {
@ -471,13 +473,14 @@ pub async fn pull_snapshot_from(
worker: &WorkerTask,
reader: Arc<BackupReader>,
tgt_store: Arc<DataStore>,
snapshot: &BackupDir,
snapshot: &pbs_api_types::BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<(), Error> {
let (_path, is_new, _snap_lock) = tgt_store.create_locked_backup_dir(snapshot)?;
let snapshot_path = snapshot.to_string();
if is_new {
task_log!(worker, "sync snapshot {:?}", snapshot.relative_path());
task_log!(worker, "sync snapshot {:?}", snapshot_path);
if let Err(err) = pull_snapshot(
worker,
@ -493,9 +496,9 @@ pub async fn pull_snapshot_from(
}
return Err(err);
}
task_log!(worker, "sync snapshot {:?} done", snapshot.relative_path());
task_log!(worker, "sync snapshot {:?} done", snapshot_path);
} else {
task_log!(worker, "re-sync snapshot {:?}", snapshot.relative_path());
task_log!(worker, "re-sync snapshot {:?}", snapshot_path);
pull_snapshot(
worker,
reader,
@ -504,11 +507,7 @@ pub async fn pull_snapshot_from(
downloaded_chunks,
)
.await?;
task_log!(
worker,
"re-sync snapshot {:?} done",
snapshot.relative_path()
);
task_log!(worker, "re-sync snapshot {:?} done", snapshot_path);
}
Ok(())
@ -561,7 +560,7 @@ pub async fn pull_group(
worker: &WorkerTask,
client: &HttpClient,
params: &PullParameters,
group: &BackupGroup,
group: &pbs_api_types::BackupGroup,
progress: &mut StoreProgress,
) -> Result<(), Error> {
let path = format!(
@ -570,8 +569,8 @@ pub async fn pull_group(
);
let args = json!({
"backup-type": group.backup_type(),
"backup-id": group.backup_id(),
"backup-type": group.ty,
"backup-id": group.id,
});
let mut result = client.get(&path, Some(args)).await?;
@ -599,7 +598,7 @@ pub async fn pull_group(
};
for (pos, item) in list.into_iter().enumerate() {
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
let snapshot = item.backup;
// in-progress backups can't be synced
if item.size.is_none() {
@ -611,7 +610,7 @@ pub async fn pull_group(
continue;
}
let backup_time = snapshot.backup_time();
let backup_time = snapshot.time;
remote_snapshots.insert(backup_time);
@ -640,8 +639,8 @@ pub async fn pull_group(
new_client,
None,
params.source.store(),
snapshot.group().backup_type(),
snapshot.group().backup_id(),
snapshot.group.ty,
&snapshot.group.id,
backup_time,
true,
)
@ -663,6 +662,7 @@ pub async fn pull_group(
}
if params.remove_vanished {
let group = params.store.backup_group_from_spec(group.clone());
let local_list = group.list_backups(&params.store.base_path())?;
for info in local_list {
let backup_time = info.backup_dir.backup_time();
@ -682,7 +682,9 @@ pub async fn pull_group(
"delete vanished snapshot {:?}",
info.backup_dir.relative_path()
);
params.store.remove_backup_dir(&info.backup_dir, false)?;
params
.store
.remove_backup_dir(info.backup_dir.as_ref(), false)?;
}
}
@ -720,18 +722,15 @@ pub async fn pull_store(
}
});
let apply_filters = |group: &BackupGroup, filters: &[GroupFilter]| -> bool {
let apply_filters = |group: &pbs_api_types::BackupGroup, filters: &[GroupFilter]| -> bool {
filters.iter().any(|filter| group.matches(filter))
};
let list: Vec<BackupGroup> = list
.into_iter()
.map(|item| BackupGroup::new(item.backup.ty, item.backup.id))
.collect();
let list: Vec<pbs_api_types::BackupGroup> = list.into_iter().map(|item| item.backup).collect();
let list = if let Some(ref group_filter) = &params.group_filter {
let unfiltered_count = list.len();
let list: Vec<BackupGroup> = list
let list: Vec<pbs_api_types::BackupGroup> = list
.into_iter()
.filter(|group| apply_filters(group, group_filter))
.collect();
@ -799,11 +798,11 @@ pub async fn pull_store(
let result: Result<(), Error> = proxmox_lang::try_block!({
for local_group in params.store.iter_backup_groups()? {
let local_group = local_group?;
if new_groups.contains(&local_group) {
if new_groups.contains(local_group.as_ref()) {
continue;
}
if let Some(ref group_filter) = &params.group_filter {
if !apply_filters(&local_group, group_filter) {
if !apply_filters(local_group.as_ref(), group_filter) {
continue;
}
}
@ -813,7 +812,7 @@ pub async fn pull_store(
local_group.backup_type(),
local_group.backup_id()
);
match params.store.remove_backup_group(&local_group) {
match params.store.remove_backup_group(local_group.as_ref()) {
Ok(true) => {}
Ok(false) => {
task_log!(

View File

@ -8,7 +8,6 @@ use std::path::{Path, PathBuf};
use anyhow::{bail, format_err, Error};
use endian_trait::Endian;
use pbs_datastore::backup_info::BackupDir;
use proxmox_sys::fs::read_subdir;
use proxmox_io::{ReadExt, WriteExt};
@ -682,7 +681,7 @@ impl MediaCatalog {
);
}
if let Err(err) = snapshot.parse::<BackupDir>() {
if let Err(err) = snapshot.parse::<pbs_api_types::BackupDir>() {
bail!(
"register_snapshot failed: unable to parse snapshot '{}' - {}",
snapshot,