tape: improve backup task log

This commit is contained in:
Dietmar Maurer 2021-03-11 08:43:13 +01:00
parent d1d74c4367
commit 2c10410b0d
2 changed files with 7 additions and 6 deletions

View File

@ -417,9 +417,9 @@ fn backup_worker(
progress.group_snapshots = 1;
if let Some(info) = snapshot_list.pop() {
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
task_log!(worker, "skip snapshot {}", info.backup_dir);
continue;
}
task_log!(worker, "backup snapshot {}", info.backup_dir);
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true;
}
@ -434,9 +434,9 @@ fn backup_worker(
progress.group_snapshots = snapshot_list.len() as u64;
for (snapshot_number, info) in snapshot_list.into_iter().enumerate() {
if pool_writer.contains_snapshot(&info.backup_dir.to_string()) {
task_log!(worker, "skip snapshot {}", info.backup_dir);
continue;
}
task_log!(worker, "backup snapshot {}", info.backup_dir);
if !backup_snapshot(worker, &mut pool_writer, datastore.clone(), info.backup_dir)? {
errors = true;
}
@ -497,7 +497,7 @@ pub fn backup_snapshot(
snapshot: BackupDir,
) -> Result<bool, Error> {
task_log!(worker, "start backup {}:{}", datastore.name(), snapshot);
task_log!(worker, "backup snapshot {}", snapshot);
let snapshot_reader = match SnapshotReader::new(datastore.clone(), snapshot.clone()) {
Ok(reader) => reader,

View File

@ -374,7 +374,8 @@ impl PoolWriter {
let elapsed = start_time.elapsed()?.as_secs_f64();
worker.log(format!(
"wrote {:.2} MB ({:.2} MB/s)",
"wrote {} chunks ({:.2} MiB at {:.2} MiB/s)",
saved_chunks.len(),
bytes_written as f64 / (1024.0*1024.0),
(bytes_written as f64)/(1024.0*1024.0*elapsed),
));
@ -398,7 +399,7 @@ impl PoolWriter {
/// write up to <max_size> of chunks
fn write_chunk_archive<'a>(
worker: &WorkerTask,
_worker: &WorkerTask,
writer: Box<dyn 'a + TapeWrite>,
datastore: &DataStore,
chunk_iter: &mut std::iter::Peekable<SnapshotChunkIterator>,
@ -444,7 +445,7 @@ fn write_chunk_archive<'a>(
}
if writer.bytes_written() > max_size {
worker.log("Chunk Archive max size reached, closing archive".to_string());
//worker.log("Chunk Archive max size reached, closing archive".to_string());
break;
}
}