client/backup_writer: introduce UploadStats struct

instead of using a big anonymous tuple. This way the returned values
are properly named.

Signed-off-by: Dominik Csapak <d.csapak@proxmox.com>
This commit is contained in:
Dominik Csapak 2021-03-24 17:17:18 +01:00 committed by Thomas Lamprecht
parent 4abb3edd9f
commit 3b60b5098f
1 changed files with 51 additions and 44 deletions

View File

@ -47,6 +47,15 @@ pub struct UploadOptions {
pub fixed_size: Option<u64>, pub fixed_size: Option<u64>,
} }
struct UploadStats {
chunk_count: usize,
chunk_reused: usize,
size: usize,
size_reused: usize,
duration: std::time::Duration,
csum: [u8; 32],
}
type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>; type UploadQueueSender = mpsc::Sender<(MergedChunkInfo, Option<h2::client::ResponseFuture>)>;
type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>; type UploadResultReceiver = oneshot::Receiver<Result<(), Error>>;
@ -302,8 +311,7 @@ impl BackupWriter {
.as_u64() .as_u64()
.unwrap(); .unwrap();
let (chunk_count, chunk_reused, size, size_reused, duration, csum) = let upload_stats = Self::upload_chunk_info_stream(
Self::upload_chunk_info_stream(
self.h2.clone(), self.h2.clone(),
wid, wid,
stream, stream,
@ -319,8 +327,8 @@ impl BackupWriter {
) )
.await?; .await?;
let uploaded = size - size_reused; let uploaded = upload_stats.size - upload_stats.size_reused;
let vsize_h: HumanByte = size.into(); let vsize_h: HumanByte = upload_stats.size.into();
let archive = if self.verbose { let archive = if self.verbose {
archive_name.to_string() archive_name.to_string()
} else { } else {
@ -328,55 +336,55 @@ impl BackupWriter {
}; };
if archive_name != CATALOG_NAME { if archive_name != CATALOG_NAME {
let speed: HumanByte = let speed: HumanByte =
((uploaded * 1_000_000) / (duration.as_micros() as usize)).into(); ((uploaded * 1_000_000) / (upload_stats.duration.as_micros() as usize)).into();
let uploaded: HumanByte = uploaded.into(); let uploaded: HumanByte = uploaded.into();
println!( println!(
"{}: had to upload {} of {} in {:.2}s, average speed {}/s).", "{}: had to upload {} of {} in {:.2}s, average speed {}/s).",
archive, archive,
uploaded, uploaded,
vsize_h, vsize_h,
duration.as_secs_f64(), upload_stats.duration.as_secs_f64(),
speed speed
); );
} else { } else {
println!("Uploaded backup catalog ({})", vsize_h); println!("Uploaded backup catalog ({})", vsize_h);
} }
if size_reused > 0 && size > 1024 * 1024 { if upload_stats.size_reused > 0 && upload_stats.size > 1024 * 1024 {
let reused_percent = size_reused as f64 * 100. / size as f64; let reused_percent = upload_stats.size_reused as f64 * 100. / upload_stats.size as f64;
let reused: HumanByte = size_reused.into(); let reused: HumanByte = upload_stats.size_reused.into();
println!( println!(
"{}: backup was done incrementally, reused {} ({:.1}%)", "{}: backup was done incrementally, reused {} ({:.1}%)",
archive, reused, reused_percent archive, reused, reused_percent
); );
} }
if self.verbose && chunk_count > 0 { if self.verbose && upload_stats.chunk_count > 0 {
println!( println!(
"{}: Reused {} from {} chunks.", "{}: Reused {} from {} chunks.",
archive, chunk_reused, chunk_count archive, upload_stats.chunk_reused, upload_stats.chunk_count
); );
println!( println!(
"{}: Average chunk size was {}.", "{}: Average chunk size was {}.",
archive, archive,
HumanByte::from(size / chunk_count) HumanByte::from(upload_stats.size / upload_stats.chunk_count)
); );
println!( println!(
"{}: Average time per request: {} microseconds.", "{}: Average time per request: {} microseconds.",
archive, archive,
(duration.as_micros()) / (chunk_count as u128) (upload_stats.duration.as_micros()) / (upload_stats.chunk_count as u128)
); );
} }
let param = json!({ let param = json!({
"wid": wid , "wid": wid ,
"chunk-count": chunk_count, "chunk-count": upload_stats.chunk_count,
"size": size, "size": upload_stats.size,
"csum": proxmox::tools::digest_to_hex(&csum), "csum": proxmox::tools::digest_to_hex(&upload_stats.csum),
}); });
let _value = self.h2.post(&close_path, Some(param)).await?; let _value = self.h2.post(&close_path, Some(param)).await?;
Ok(BackupStats { Ok(BackupStats {
size: size as u64, size: upload_stats.size as u64,
csum, csum: upload_stats.csum,
}) })
} }
@ -617,8 +625,7 @@ impl BackupWriter {
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
compress: bool, compress: bool,
verbose: bool, verbose: bool,
) -> impl Future<Output = Result<(usize, usize, usize, usize, std::time::Duration, [u8; 32]), Error>> ) -> impl Future<Output = Result<UploadStats, Error>> {
{
let total_chunks = Arc::new(AtomicUsize::new(0)); let total_chunks = Arc::new(AtomicUsize::new(0));
let total_chunks2 = total_chunks.clone(); let total_chunks2 = total_chunks.clone();
let known_chunk_count = Arc::new(AtomicUsize::new(0)); let known_chunk_count = Arc::new(AtomicUsize::new(0));
@ -743,22 +750,22 @@ impl BackupWriter {
.then(move |result| async move { upload_result.await?.and(result) }.boxed()) .then(move |result| async move { upload_result.await?.and(result) }.boxed())
.and_then(move |_| { .and_then(move |_| {
let duration = start_time.elapsed(); let duration = start_time.elapsed();
let total_chunks = total_chunks2.load(Ordering::SeqCst); let chunk_count = total_chunks2.load(Ordering::SeqCst);
let known_chunk_count = known_chunk_count2.load(Ordering::SeqCst); let chunk_reused = known_chunk_count2.load(Ordering::SeqCst);
let stream_len = stream_len2.load(Ordering::SeqCst); let size = stream_len2.load(Ordering::SeqCst);
let reused_len = reused_len2.load(Ordering::SeqCst); let size_reused = reused_len2.load(Ordering::SeqCst);
let mut guard = index_csum_2.lock().unwrap(); let mut guard = index_csum_2.lock().unwrap();
let csum = guard.take().unwrap().finish(); let csum = guard.take().unwrap().finish();
futures::future::ok(( futures::future::ok(UploadStats {
total_chunks, chunk_count,
known_chunk_count, chunk_reused,
stream_len, size,
reused_len, size_reused,
duration, duration,
csum, csum,
)) })
}) })
} }