tape/verify: use print_ns_and_snapshot

in those few places where we actually want to use/print the full,
NS-included path.

Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
This commit is contained in:
Fabian Grünbichler 2022-05-16 10:40:06 +02:00 committed by Thomas Lamprecht
parent f2fe00f1e2
commit 5ae393af15
3 changed files with 46 additions and 25 deletions

View File

@ -32,14 +32,14 @@ use pxar::accessor::aio::Accessor;
use pxar::EntryKind; use pxar::EntryKind;
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode, DataStoreListItem, print_ns_and_snapshot, Authid, BackupContent, BackupNamespace, BackupType, Counts, CryptMode,
DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus, GroupListItem, Operation, DataStoreListItem, DataStoreStatus, DatastoreWithNamespace, GarbageCollectionStatus,
PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem, SnapshotVerifyState, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame, SnapshotListItem,
BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA,
BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA, MAX_NAMESPACE_DEPTH, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_MODIFY, MAX_NAMESPACE_DEPTH, NS_MAX_DEPTH_SCHEMA, PRIV_DATASTORE_AUDIT, PRIV_DATASTORE_BACKUP,
PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY, UPID_SCHEMA, PRIV_DATASTORE_MODIFY, PRIV_DATASTORE_PRUNE, PRIV_DATASTORE_READ, PRIV_DATASTORE_VERIFY,
VERIFICATION_OUTDATED_AFTER_SCHEMA, UPID_SCHEMA, VERIFICATION_OUTDATED_AFTER_SCHEMA,
}; };
use pbs_client::pxar::{create_tar, create_zip}; use pbs_client::pxar::{create_tar, create_zip};
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -831,7 +831,10 @@ pub fn verify(
worker.upid().clone(), worker.upid().clone(),
Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)), Some(&move |manifest| verify_filter(ignore_verified, outdated_after, manifest)),
)? { )? {
res.push(backup_dir.to_string()); res.push(print_ns_and_snapshot(
backup_dir.backup_ns(),
backup_dir.as_ref(),
));
} }
res res
} else if let Some(backup_group) = backup_group { } else if let Some(backup_group) = backup_group {

View File

@ -10,9 +10,9 @@ use proxmox_schema::api;
use proxmox_sys::{task_log, task_warn, WorkerTaskContext}; use proxmox_sys::{task_log, task_warn, WorkerTaskContext};
use pbs_api_types::{ use pbs_api_types::{
Authid, GroupFilter, MediaPoolConfig, Operation, TapeBackupJobConfig, TapeBackupJobSetup, print_ns_and_snapshot, Authid, GroupFilter, MediaPoolConfig, Operation, TapeBackupJobConfig,
TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, PRIV_DATASTORE_READ, PRIV_TAPE_AUDIT, TapeBackupJobSetup, TapeBackupJobStatus, Userid, JOB_ID_SCHEMA, PRIV_DATASTORE_READ,
PRIV_TAPE_WRITE, UPID_SCHEMA, PRIV_TAPE_AUDIT, PRIV_TAPE_WRITE, UPID_SCHEMA,
}; };
use pbs_config::CachedUserInfo; use pbs_config::CachedUserInfo;
@ -484,18 +484,19 @@ fn backup_worker(
if latest_only { if latest_only {
progress.group_snapshots = 1; progress.group_snapshots = 1;
if let Some(info) = snapshot_list.pop() { if let Some(info) = snapshot_list.pop() {
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { let rel_path =
task_log!(worker, "skip snapshot {}", info.backup_dir); print_ns_and_snapshot(info.backup_dir.backup_ns(), info.backup_dir.as_ref());
if pool_writer.contains_snapshot(datastore_name, &rel_path) {
task_log!(worker, "skip snapshot {}", rel_path);
continue; continue;
} }
need_catalog = true; need_catalog = true;
let snapshot_name = info.backup_dir.to_string();
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true; errors = true;
} else { } else {
summary.snapshot_list.push(snapshot_name); summary.snapshot_list.push(rel_path);
} }
progress.done_snapshots = 1; progress.done_snapshots = 1;
task_log!(worker, "percentage done: {}", progress); task_log!(worker, "percentage done: {}", progress);
@ -503,18 +504,20 @@ fn backup_worker(
} else { } else {
progress.group_snapshots = snapshot_list.len() as u64; progress.group_snapshots = snapshot_list.len() as u64;
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() { for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
if pool_writer.contains_snapshot(datastore_name, &info.backup_dir.to_string()) { let rel_path =
task_log!(worker, "skip snapshot {}", info.backup_dir); print_ns_and_snapshot(info.backup_dir.backup_ns(), info.backup_dir.as_ref());
if pool_writer.contains_snapshot(datastore_name, &rel_path) {
task_log!(worker, "skip snapshot {}", rel_path);
continue; continue;
} }
need_catalog = true; need_catalog = true;
let snapshot_name = info.backup_dir.to_string();
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? { if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true; errors = true;
} else { } else {
summary.snapshot_list.push(snapshot_name); summary.snapshot_list.push(rel_path);
} }
progress.done_snapshots = snapshot_number as u64 + 1; progress.done_snapshots = snapshot_number as u64 + 1;
task_log!(worker, "percentage done: {}", progress); task_log!(worker, "percentage done: {}", progress);
@ -582,13 +585,19 @@ pub fn backup_snapshot(
datastore: Arc<DataStore>, datastore: Arc<DataStore>,
snapshot: BackupDir, snapshot: BackupDir,
) -> Result<bool, Error> { ) -> Result<bool, Error> {
task_log!(worker, "backup snapshot {}", snapshot); let snapshot_path = snapshot.relative_path();
task_log!(worker, "backup snapshot {:?}", snapshot_path);
let snapshot_reader = match snapshot.locked_reader() { let snapshot_reader = match snapshot.locked_reader() {
Ok(reader) => reader, Ok(reader) => reader,
Err(err) => { Err(err) => {
// ignore missing snapshots and continue // ignore missing snapshots and continue
task_warn!(worker, "failed opening snapshot '{}': {}", snapshot, err); task_warn!(
worker,
"failed opening snapshot {:?}: {}",
snapshot_path,
err
);
return Ok(false); return Ok(false);
} }
}; };
@ -650,7 +659,12 @@ pub fn backup_snapshot(
} }
} }
task_log!(worker, "end backup {}:{}", datastore.name(), snapshot); task_log!(
worker,
"end backup {}:{:?}",
datastore.name(),
snapshot_path
);
Ok(true) Ok(true)
} }

View File

@ -9,7 +9,8 @@ use anyhow::{bail, format_err, Error};
use proxmox_sys::{task_log, WorkerTaskContext}; use proxmox_sys::{task_log, WorkerTaskContext};
use pbs_api_types::{ use pbs_api_types::{
Authid, BackupNamespace, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID, print_ns_and_snapshot, Authid, BackupNamespace, BackupType, CryptMode, SnapshotVerifyState,
VerifyState, UPID,
}; };
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo}; use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
use pbs_datastore::index::IndexFile; use pbs_datastore::index::IndexFile;
@ -477,7 +478,10 @@ pub fn verify_backup_group(
BackupInfo::sort_list(&mut list, false); // newest first BackupInfo::sort_list(&mut list, false); // newest first
for (pos, info) in list.into_iter().enumerate() { for (pos, info) in list.into_iter().enumerate() {
if !verify_backup_dir(verify_worker, &info.backup_dir, upid.clone(), filter)? { if !verify_backup_dir(verify_worker, &info.backup_dir, upid.clone(), filter)? {
errors.push(info.backup_dir.to_string()); errors.push(print_ns_and_snapshot(
info.backup_dir.backup_ns(),
info.backup_dir.as_ref(),
));
} }
progress.done_snapshots = pos as u64 + 1; progress.done_snapshots = pos as u64 + 1;
task_log!(verify_worker.worker, "percentage done: {}", progress); task_log!(verify_worker.worker, "percentage done: {}", progress);