tree-wide: prefer api-type BackupDir for logging
in combination with DatastoreWithNamespace to not lose the namespace information. Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
parent
f15601f1c9
commit
1afce610c7
|
@ -28,7 +28,8 @@ fn run() -> Result<(), Error> {
|
|||
println!(" found group {}", group);
|
||||
|
||||
for snapshot in group.iter_snapshots()? {
|
||||
println!("\t{}", snapshot?);
|
||||
let snapshot = snapshot?;
|
||||
println!("\t{}", snapshot.dir());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ use nix::dir::Dir;
|
|||
|
||||
use proxmox_sys::fs::lock_dir_noblock_shared;
|
||||
|
||||
use pbs_api_types::{BackupNamespace, Operation};
|
||||
use pbs_api_types::{BackupNamespace, DatastoreWithNamespace, Operation};
|
||||
|
||||
use crate::backup_info::BackupDir;
|
||||
use crate::dynamic_index::DynamicIndexReader;
|
||||
|
@ -39,20 +39,23 @@ impl SnapshotReader {
|
|||
|
||||
pub(crate) fn new_do(snapshot: BackupDir) -> Result<Self, Error> {
|
||||
let datastore = snapshot.datastore();
|
||||
let store_with_ns = DatastoreWithNamespace {
|
||||
store: datastore.name().to_owned(),
|
||||
ns: snapshot.backup_ns().clone(),
|
||||
};
|
||||
let snapshot_path = snapshot.full_path();
|
||||
|
||||
let locked_dir =
|
||||
lock_dir_noblock_shared(&snapshot_path, "snapshot", "locked by another operation")?;
|
||||
|
||||
let datastore_name = datastore.name().to_string();
|
||||
|
||||
let manifest = match snapshot.load_manifest() {
|
||||
Ok((manifest, _)) => manifest,
|
||||
Err(err) => {
|
||||
bail!(
|
||||
"manifest load error on datastore '{}' snapshot '{}' - {}",
|
||||
datastore_name,
|
||||
snapshot,
|
||||
"manifest load error on {}, snapshot '{}' - {}",
|
||||
store_with_ns,
|
||||
snapshot.dir(),
|
||||
err
|
||||
);
|
||||
}
|
||||
|
|
|
@ -33,12 +33,13 @@ use pxar::EntryKind;
|
|||
|
||||
use pbs_api_types::{
|
||||
Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem,
|
||||
DataStoreStatus, GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode,
|
||||
RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA,
|
||||
DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA,
|
||||
PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE,
|
||||
PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus, GroupListItem, Operation,
|
||||
PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState,
|
||||
BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
|
||||
BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH,
|
||||
NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY,
|
||||
PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA,
|
||||
VERIFICATION_OUTDATED_AFTER_SCHEMA,
|
||||
};
|
||||
use pbs_client::pxar::{create_tar, create_zip};
|
||||
use pbs_config::CachedUserInfo;
|
||||
|
@ -1257,6 +1258,11 @@ pub fn download_file(
|
|||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
|
||||
let store_with_ns = DatastoreWithNamespace {
|
||||
store: store.to_owned(),
|
||||
ns: backup_ns.clone(),
|
||||
};
|
||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
let datastore = check_privs_and_load_store(
|
||||
&store,
|
||||
|
@ -1268,15 +1274,15 @@ pub fn download_file(
|
|||
&backup_dir.group,
|
||||
)?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
println!(
|
||||
"Download {} from {} ({}/{})",
|
||||
file_name, store, backup_dir, file_name
|
||||
file_name, store_with_ns, backup_dir, file_name
|
||||
);
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
path.push(backup_dir.relative_path());
|
||||
path.push(&file_name);
|
||||
|
@ -1338,7 +1344,11 @@ pub fn download_file_decoded(
|
|||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
let store_with_ns = DatastoreWithNamespace {
|
||||
store: store.to_owned(),
|
||||
ns: backup_ns.clone(),
|
||||
};
|
||||
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
let datastore = check_privs_and_load_store(
|
||||
&store,
|
||||
&backup_ns,
|
||||
|
@ -1346,12 +1356,11 @@ pub fn download_file_decoded(
|
|||
PRIV_DATASTORE_READ,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
Some(Operation::Read),
|
||||
&backup_dir.group,
|
||||
&backup_dir_api.group,
|
||||
)?;
|
||||
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
|
||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
|
||||
|
||||
let (manifest, files) = read_backup_index(&backup_dir)?;
|
||||
for file in files {
|
||||
|
@ -1362,7 +1371,7 @@ pub fn download_file_decoded(
|
|||
|
||||
println!(
|
||||
"Download {} from {} ({}/{})",
|
||||
file_name, store, backup_dir, file_name
|
||||
file_name, store_with_ns, backup_dir_api, file_name
|
||||
);
|
||||
|
||||
let mut path = datastore.base_path();
|
||||
|
@ -1465,7 +1474,11 @@ pub fn upload_backup_log(
|
|||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
let store = required_string_param(¶m, "store")?;
|
||||
let backup_ns = optional_ns_param(¶m)?;
|
||||
let backup_dir: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
let store_with_ns = DatastoreWithNamespace {
|
||||
store: store.to_owned(),
|
||||
ns: backup_ns.clone(),
|
||||
};
|
||||
let backup_dir_api: pbs_api_types::BackupDir = Deserialize::deserialize(¶m)?;
|
||||
|
||||
let datastore = check_privs_and_load_store(
|
||||
&store,
|
||||
|
@ -1474,9 +1487,9 @@ pub fn upload_backup_log(
|
|||
0,
|
||||
PRIV_DATASTORE_BACKUP,
|
||||
Some(Operation::Write),
|
||||
&backup_dir.group,
|
||||
&backup_dir_api.group,
|
||||
)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir)?;
|
||||
let backup_dir = datastore.backup_dir(backup_ns, backup_dir_api.clone())?;
|
||||
|
||||
let file_name = CLIENT_LOG_BLOB_NAME;
|
||||
|
||||
|
@ -1487,7 +1500,7 @@ pub fn upload_backup_log(
|
|||
bail!("backup already contains a log.");
|
||||
}
|
||||
|
||||
println!("Upload backup log to {store}/{backup_dir}/{file_name}");
|
||||
println!("Upload backup log to {store_with_ns} {backup_dir_api}/{file_name}");
|
||||
|
||||
let data = req_body
|
||||
.map_err(Error::from)
|
||||
|
|
|
@ -618,7 +618,7 @@ impl BackupEnvironment {
|
|||
if !path.exists() {
|
||||
bail!(
|
||||
"base snapshot {} was removed during backup, cannot finish as chunks might be missing",
|
||||
base.backup_dir
|
||||
base.backup_dir.dir(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -341,7 +341,7 @@ pub fn verify_backup_dir(
|
|||
verify_worker.worker,
|
||||
"SKIPPED: verify {}:{} - could not acquire snapshot lock: {}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir,
|
||||
backup_dir.dir(),
|
||||
err,
|
||||
);
|
||||
Ok(true)
|
||||
|
@ -364,7 +364,7 @@ pub fn verify_backup_dir_with_lock(
|
|||
verify_worker.worker,
|
||||
"verify {}:{} - manifest load error: {}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir,
|
||||
backup_dir.dir(),
|
||||
err,
|
||||
);
|
||||
return Ok(false);
|
||||
|
@ -377,7 +377,7 @@ pub fn verify_backup_dir_with_lock(
|
|||
verify_worker.worker,
|
||||
"SKIPPED: verify {}:{} (recently verified)",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir,
|
||||
backup_dir.dir(),
|
||||
);
|
||||
return Ok(true);
|
||||
}
|
||||
|
@ -387,7 +387,7 @@ pub fn verify_backup_dir_with_lock(
|
|||
verify_worker.worker,
|
||||
"verify {}:{}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir
|
||||
backup_dir.dir()
|
||||
);
|
||||
|
||||
let mut error_count = 0;
|
||||
|
@ -411,7 +411,7 @@ pub fn verify_backup_dir_with_lock(
|
|||
verify_worker.worker,
|
||||
"verify {}:{}/{} failed: {}",
|
||||
verify_worker.datastore.name(),
|
||||
backup_dir,
|
||||
backup_dir.dir(),
|
||||
info.filename,
|
||||
err,
|
||||
);
|
||||
|
|
|
@ -384,8 +384,6 @@ async fn pull_snapshot(
|
|||
let mut tmp_manifest_name = manifest_name.clone();
|
||||
tmp_manifest_name.set_extension("tmp");
|
||||
|
||||
let dir: &pbs_api_types::BackupDir = snapshot.as_ref();
|
||||
|
||||
let download_res = download_manifest(&reader, &tmp_manifest_name).await;
|
||||
let mut tmp_manifest_file = match download_res {
|
||||
Ok(manifest_file) => manifest_file,
|
||||
|
@ -395,7 +393,8 @@ async fn pull_snapshot(
|
|||
StatusCode::NOT_FOUND => {
|
||||
task_log!(
|
||||
worker,
|
||||
"skipping snapshot {dir} - vanished since start of sync",
|
||||
"skipping snapshot {} - vanished since start of sync",
|
||||
snapshot.dir(),
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -518,30 +517,28 @@ async fn pull_snapshot_from(
|
|||
snapshot: &pbs_datastore::BackupDir,
|
||||
downloaded_chunks: Arc<Mutex<HashSet<[u8; 32]>>>,
|
||||
) -> Result<(), Error> {
|
||||
let dir: &pbs_api_types::BackupDir = snapshot.as_ref();
|
||||
|
||||
let (_path, is_new, _snap_lock) = snapshot
|
||||
.datastore()
|
||||
.create_locked_backup_dir(snapshot.backup_ns(), dir)?;
|
||||
.create_locked_backup_dir(snapshot.backup_ns(), snapshot.as_ref())?;
|
||||
|
||||
if is_new {
|
||||
task_log!(worker, "sync snapshot {}", dir);
|
||||
task_log!(worker, "sync snapshot {}", snapshot.dir());
|
||||
|
||||
if let Err(err) = pull_snapshot(worker, reader, snapshot, downloaded_chunks).await {
|
||||
if let Err(cleanup_err) =
|
||||
snapshot
|
||||
.datastore()
|
||||
.remove_backup_dir(snapshot.backup_ns(), dir, true)
|
||||
{
|
||||
if let Err(cleanup_err) = snapshot.datastore().remove_backup_dir(
|
||||
snapshot.backup_ns(),
|
||||
snapshot.as_ref(),
|
||||
true,
|
||||
) {
|
||||
task_log!(worker, "cleanup error - {}", cleanup_err);
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
task_log!(worker, "sync snapshot {} done", dir);
|
||||
task_log!(worker, "sync snapshot {} done", snapshot.dir());
|
||||
} else {
|
||||
task_log!(worker, "re-sync snapshot {}", dir);
|
||||
task_log!(worker, "re-sync snapshot {}", snapshot.dir());
|
||||
pull_snapshot(worker, reader, snapshot, downloaded_chunks).await?;
|
||||
task_log!(worker, "re-sync snapshot {} done", dir);
|
||||
task_log!(worker, "re-sync snapshot {} done", snapshot.dir());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -716,22 +713,22 @@ async fn pull_group(
|
|||
let group = params.store.backup_group(target_ns.clone(), group.clone());
|
||||
let local_list = group.list_backups()?;
|
||||
for info in local_list {
|
||||
let snapshot: &pbs_api_types::BackupDir = info.backup_dir.as_ref();
|
||||
if remote_snapshots.contains(&snapshot.time) {
|
||||
let snapshot = info.backup_dir;
|
||||
if remote_snapshots.contains(&snapshot.backup_time()) {
|
||||
continue;
|
||||
}
|
||||
if info.backup_dir.is_protected() {
|
||||
if snapshot.is_protected() {
|
||||
task_log!(
|
||||
worker,
|
||||
"don't delete vanished snapshot {} (protected)",
|
||||
snapshot
|
||||
snapshot.dir()
|
||||
);
|
||||
continue;
|
||||
}
|
||||
task_log!(worker, "delete vanished snapshot {}", snapshot);
|
||||
task_log!(worker, "delete vanished snapshot {}", snapshot.dir());
|
||||
params
|
||||
.store
|
||||
.remove_backup_dir(&target_ns, snapshot, false)?;
|
||||
.remove_backup_dir(&target_ns, snapshot.as_ref(), false)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue