From 31cf625af5e251dc71851bbbda45ce432f0ba77f Mon Sep 17 00:00:00 2001 From: Dietmar Maurer Date: Mon, 11 Jan 2021 13:22:31 +0100 Subject: [PATCH] tape: improve backup logs --- src/api2/tape/backup.rs | 2 +- src/tape/pool_writer.rs | 23 ++++++++++++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/api2/tape/backup.rs b/src/api2/tape/backup.rs index c7b508c7..3764f895 100644 --- a/src/api2/tape/backup.rs +++ b/src/api2/tape/backup.rs @@ -209,7 +209,7 @@ pub fn backup_snapshot( let uuid = pool_writer.load_writable_media(worker)?; - let (leom, _bytes) = pool_writer.append_chunk_archive(&datastore, &mut chunk_iter)?; + let (leom, _bytes) = pool_writer.append_chunk_archive(worker, &datastore, &mut chunk_iter)?; if leom { pool_writer.set_media_status_full(&uuid)?; diff --git a/src/tape/pool_writer.rs b/src/tape/pool_writer.rs index 479e9f6f..c132a8df 100644 --- a/src/tape/pool_writer.rs +++ b/src/tape/pool_writer.rs @@ -1,5 +1,6 @@ use std::collections::HashSet; use std::path::Path; +use std::time::SystemTime; use anyhow::{bail, Error}; @@ -294,6 +295,7 @@ impl PoolWriter { /// (4GB). Written chunks are registered in the media catalog. pub fn append_chunk_archive( &mut self, + worker: &WorkerTask, datastore: &DataStore, chunk_iter: &mut std::iter::Peekable, ) -> Result<(bool, usize), Error> { @@ -314,7 +316,10 @@ impl PoolWriter { } let writer = status.drive.write_file()?; + let start_time = SystemTime::now(); + let (saved_chunks, content_uuid, leom, bytes_written) = write_chunk_archive( + worker, writer, datastore, chunk_iter, @@ -325,6 +330,13 @@ impl PoolWriter { status.bytes_written += bytes_written; + let elapsed = start_time.elapsed()?.as_secs_f64(); + worker.log(format!( + "wrote {:.2} MB ({} MB/s)", + bytes_written as f64 / (1024.0*1024.0), + (bytes_written as f64)/(1024.0*1024.0*elapsed), + )); + let request_sync = if status.bytes_written >= COMMIT_BLOCK_SIZE { true } else { false }; // register chunks in media_catalog @@ -344,6 +356,7 @@ impl PoolWriter { /// write up to of chunks fn write_chunk_archive<'a>( + worker: &WorkerTask, writer: Box, datastore: &DataStore, chunk_iter: &mut std::iter::Peekable, @@ -374,10 +387,10 @@ fn write_chunk_archive<'a>( } let blob = datastore.load_chunk(&digest)?; - println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(&digest), blob.raw_size()); + //println!("CHUNK {} size {}", proxmox::tools::digest_to_hex(&digest), blob.raw_size()); match writer.try_write_chunk(&digest, &blob) { - Ok(true) => { + Ok(true) => { chunk_index.insert(digest); chunk_list.push(digest); } @@ -389,7 +402,7 @@ fn write_chunk_archive<'a>( } if writer.bytes_written() > max_size { - println!("Chunk Archive max size reached, closing archive"); + worker.log(format!("Chunk Archive max size reached, closing archive")); break; } } @@ -422,7 +435,7 @@ fn update_media_set_label( None => { worker.log(format!("wrinting new media set label")); drive.write_media_set_label(new_set)?; - media_catalog = MediaCatalog::overwrite(status_path, media_id, true)?; + media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?; } Some(media_set_label) => { if new_set.uuid == media_set_label.uuid { @@ -438,7 +451,7 @@ fn update_media_set_label( ); drive.write_media_set_label(new_set)?; - media_catalog = MediaCatalog::overwrite(status_path, media_id, true)?; + media_catalog = MediaCatalog::overwrite(status_path, media_id, false)?; } } }