tree-wide: prefer api-type BackupDir for logging

in combination with DatastoreWithNamespace to not lose the namespace
information.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2022-05-16 10:33:59 +02:00 committed by Thomas Lamprecht
parent f15601f1c9
commit 1afce610c7
6 changed files with 65 additions and 51 deletions

View File

@ -28,7 +28,8 @@ fn run() -> Result<(), Error> {
println!(" found group {}", group); println!(" found group {}", group);
for snapshot in group.iter_snapshots()? { for snapshot in group.iter_snapshots()? {
println!("\t{}", snapshot?); let snapshot = snapshot?;
println!("\t{}", snapshot.dir());
} }
} }
} }

View File

@ -8,7 +8,7 @@ use nix::dir::Dir;
use proxmox_sys::fs::lock_dir_noblock_shared; use proxmox_sys::fs::lock_dir_noblock_shared;
use pbs_api_types::{BackupNamespace, Operation}; use pbs_api_types::{BackupNamespace, DatastoreWithNamespace, Operation};
use crate::backup_info::BackupDir; use crate::backup_info::BackupDir;
use crate::dynamic_index::DynamicIndexReader; use crate::dynamic_index::DynamicIndexReader;
@ -39,20 +39,23 @@ impl SnapshotReader {
pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> { pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
let datastore = snapshot.datastore(); let datastore = snapshot.datastore();
let store_with_ns = DatastoreWithNamespace {
store: datastore.name().to_owned(),
ns: snapshot.backup_ns().clone(),
};
let snapshot_path = snapshot.full_path(); let snapshot_path = snapshot.full_path();
let locked_dir = let locked_dir =
lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?; lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
let datastore_name = datastore.name().to_string(); let datastore_name = datastore.name().to_string();
let manifest = match snapshot.load_manifest() { let manifest = match snapshot.load_manifest() {
Ok((manifest, _)) => manifest, Ok((manifest, _)) => manifest,
Err(err) => { Err(err) => {
bail!( bail!(
"manifest load error on datastore '{}' snapshot '{}' - {}", "manifest load error on {}, snapshot '{}' - {}",
datastore_name, store_with_ns,
snapshot, snapshot.dir(),
err err
); );
} }

View File

@ -33,12 +33,13 @@ use pxar::EntryKind;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus, GroupListItem, Operation,
RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA,
VERIFICATION_OUTDATED_AFTER_SCHEMA,
}; };
use pbs_client::pxar::{create_tar, create_zip}; use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -1257,6 +1258,11 @@ pub fn download_file(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?; let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?; let backup_ns = optional_ns_param(&param)?;
let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?; let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store( let datastore = check_privs_and_load_store(
&store, &store,
@ -1268,15 +1274,15 @@ pub fn download_file(
&backup_dir.group, &backup_dir.group,
)?; )?;
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name = required_string_param(&param, "file-name")?.to_owned();
println!( println!(
"Download {} from {} ({}/{})", "Download {} from {} ({}/{})",
file_name, store, backup_dir, file_name file_name, store_with_ns, backup_dir, file_name
); );
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let mut path = datastore.base_path(); let mut path = datastore.base_path();
path.push(backup_dir.relative_path()); path.push(backup_dir.relative_path());
path.push(&file_name); path.push(&file_name);
@ -1338,7 +1344,11 @@ pub fn download_file_decoded(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?; let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?; let backup_ns = optional_ns_param(&param)?;
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?; let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store( let datastore = check_privs_and_load_store(
&store, &store,
&backup_ns, &backup_ns,
@ -1346,12 +1356,11 @@ pub fn download_file_decoded(
PRIV_DATASTORE_READ, PRIV_DATASTORE_READ,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_BACKUP,
Some(Operation::Read), Some(Operation::Read),
&backup_dir.group, &backup_dir_api.group,
)?; )?;
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
let file_name = required_string_param(&param, "file-name")?.to_owned(); let file_name = required_string_param(&param, "file-name")?.to_owned();
let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
let (manifest, files) = read_backup_index(&backup_dir)?; let (manifest, files) = read_backup_index(&backup_dir)?;
for file in files { for file in files {
@ -1362,7 +1371,7 @@ pub fn download_file_decoded(
println!( println!(
"Download {} from {} ({}/{})", "Download {} from {} ({}/{})",
file_name, store, backup_dir, file_name file_name, store_with_ns, backup_dir_api, file_name
); );
let mut path = datastore.base_path(); let mut path = datastore.base_path();
@ -1465,7 +1474,11 @@ pub fn upload_backup_log(
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?; let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
let store = required_string_param(&param, "store")?; let store = required_string_param(&param, "store")?;
let backup_ns = optional_ns_param(&param)?; let backup_ns = optional_ns_param(&param)?;
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?; let store_with_ns = DatastoreWithNamespace {
store: store.to_owned(),
ns: backup_ns.clone(),
};
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(&param)?;
let datastore = check_privs_and_load_store( let datastore = check_privs_and_load_store(
&store, &store,
@ -1474,9 +1487,9 @@ pub fn upload_backup_log(
0, 0,
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_BACKUP,
Some(Operation::Write), Some(Operation::Write),
&backup_dir.group, &backup_dir_api.group,
)?; )?;
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?; let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
let file_name = CLIENT_LOG_BLOB_NAME; let file_name = CLIENT_LOG_BLOB_NAME;
@ -1487,7 +1500,7 @@ pub fn upload_backup_log(
bail!("backup already contains a log."); bail!("backup already contains a log.");
} }
println!("Upload backup log to {store}/{backup_dir}/{file_name}"); println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
let data = req_body let data = req_body
.map_err(Error::from) .map_err(Error::from)

View File

@ -618,7 +618,7 @@ impl BackupEnvironment {
if !path.exists() { if !path.exists() {
bail!( bail!(
"base snapshot {} was removed during backup, cannot finish as chunks might be missing", "base snapshot {} was removed during backup, cannot finish as chunks might be missing",
base.backup_dir base.backup_dir.dir(),
); );
} }
} }

View File

@ -341,7 +341,7 @@ pub fn verify_backup_dir(
verify_worker.worker, verify_worker.worker,
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}", "SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
verify_worker.datastore.name(), verify_worker.datastore.name(),
backup_dir, backup_dir.dir(),
err, err,
); );
Ok(true) Ok(true)
@ -364,7 +364,7 @@ pub fn verify_backup_dir_with_lock(
verify_worker.worker, verify_worker.worker,
"verify {}:{} - manifest load error: {}", "verify {}:{} - manifest load error: {}",
verify_worker.datastore.name(), verify_worker.datastore.name(),
backup_dir, backup_dir.dir(),
err, err,
); );
return Ok(false); return Ok(false);
@ -377,7 +377,7 @@ pub fn verify_backup_dir_with_lock(
verify_worker.worker, verify_worker.worker,
"SKIPPED: verify {}:{} (recently verified)", "SKIPPED: verify {}:{} (recently verified)",
verify_worker.datastore.name(), verify_worker.datastore.name(),
backup_dir, backup_dir.dir(),
); );
return Ok(true); return Ok(true);
} }
@ -387,7 +387,7 @@ pub fn verify_backup_dir_with_lock(
verify_worker.worker, verify_worker.worker,
"verify {}:{}", "verify {}:{}",
verify_worker.datastore.name(), verify_worker.datastore.name(),
backup_dir backup_dir.dir()
); );
let mut error_count = 0; let mut error_count = 0;
@ -411,7 +411,7 @@ pub fn verify_backup_dir_with_lock(
verify_worker.worker, verify_worker.worker,
"verify {}:{}/{} failed: {}", "verify {}:{}/{} failed: {}",
verify_worker.datastore.name(), verify_worker.datastore.name(),
backup_dir, backup_dir.dir(),
info.filename, info.filename,
err, err,
); );

View File

@ -384,8 +384,6 @@ async fn pull_snapshot(
let mut tmp_manifest_name = manifest_name.clone(); let mut tmp_manifest_name = manifest_name.clone();
tmp_manifest_name.set_extension("tmp"); tmp_manifest_name.set_extension("tmp");
let dir: &pbs_api_types::BackupDir = snapshot.as_ref();
let download_res = download_manifest(&reader, &tmp_manifest_name).await; let download_res = download_manifest(&reader, &tmp_manifest_name).await;
let mut tmp_manifest_file = match download_res { let mut tmp_manifest_file = match download_res {
Ok(manifest_file) => manifest_file, Ok(manifest_file) => manifest_file,
@ -395,7 +393,8 @@ async fn pull_snapshot(
StatusCode::NOT_FOUND => { StatusCode::NOT_FOUND => {
task_log!( task_log!(
worker, worker,
"skipping snapshot {dir} - vanished since start of sync", "skipping snapshot {} - vanished since start of sync",
snapshot.dir(),
); );
return Ok(()); return Ok(());
} }
@ -518,30 +517,28 @@ async fn pull_snapshot_from(
snapshot: &pbs_datastore::BackupDir, snapshot: &pbs_datastore::BackupDir,
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>, downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let dir: &pbs_api_types::BackupDir = snapshot.as_ref();
let (_path, is_new, _snap_lock) = snapshot let (_path, is_new, _snap_lock) = snapshot
.datastore() .datastore()
.create_locked_backup_dir(snapshot.backup_ns(), dir)?; .create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
if is_new { if is_new {
task_log!(worker, "sync snapshot {}", dir); task_log!(worker, "sync snapshot {}", snapshot.dir());
if let Err(err) = pull_snapshot(worker, reader, snapshot, downloaded_chunks).await { if let Err(err) = pull_snapshot(worker, reader, snapshot, downloaded_chunks).await {
if let Err(cleanup_err) = if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
snapshot snapshot.backup_ns(),
.datastore() snapshot.as_ref(),
.remove_backup_dir(snapshot.backup_ns(), dir, true) true,
{ ) {
task_log!(worker, "cleanup error - {}", cleanup_err); task_log!(worker, "cleanup error - {}", cleanup_err);
} }
return Err(err); return Err(err);
} }
task_log!(worker, "sync snapshot {} done", dir); task_log!(worker, "sync snapshot {} done", snapshot.dir());
} else { } else {
task_log!(worker, "re-sync snapshot {}", dir); task_log!(worker, "re-sync snapshot {}", snapshot.dir());
pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?; pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?;
task_log!(worker, "re-sync snapshot {} done", dir); task_log!(worker, "re-sync snapshot {} done", snapshot.dir());
} }
Ok(()) Ok(())
@ -716,22 +713,22 @@ async fn pull_group(
let group = params.store.backup_group(target_ns.clone(), group.clone()); let group = params.store.backup_group(target_ns.clone(), group.clone());
let local_list = group.list_backups()?; let local_list = group.list_backups()?;
for info in local_list { for info in local_list {
let snapshot: &pbs_api_types::BackupDir = info.backup_dir.as_ref(); let snapshot = info.backup_dir;
if remote_snapshots.contains(&snapshot.time) { if remote_snapshots.contains(&snapshot.backup_time()) {
continue; continue;
} }
if info.backup_dir.is_protected() { if snapshot.is_protected() {
task_log!( task_log!(
worker, worker,
"don't delete vanished snapshot {} (protected)", "don't delete vanished snapshot {} (protected)",
snapshot snapshot.dir()
); );
continue; continue;
} }
task_log!(worker, "delete vanished snapshot {}", snapshot); task_log!(worker, "delete vanished snapshot {}", snapshot.dir());
params params
.store .store
.remove_backup_dir(&target_ns, snapshot, false)?; .remove_backup_dir(&target_ns, snapshot.as_ref(), false)?;
} }
} }