src/client/backup_writer.rs: prefix upload stats with archive name

This commit is contained in:
Dietmar Maurer 2020-02-24 13:24:46 +01:00
parent e02c3d461f
commit 6da73c823f

View File

@ -237,7 +237,7 @@ impl BackupWriter {
let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap(); let wid = self.h2.post(&index_path, Some(param)).await?.as_u64().unwrap();
let (chunk_count, size, _speed, csum) = let (chunk_count, size, duration, speed, csum) =
Self::upload_chunk_info_stream( Self::upload_chunk_info_stream(
self.h2.clone(), self.h2.clone(),
wid, wid,
@ -249,6 +249,12 @@ impl BackupWriter {
) )
.await?; .await?;
println!("{}: Uploaded {} chunks in {} seconds ({} MB/s).", archive_name, chunk_count, duration.as_secs(), speed);
if chunk_count > 0 {
println!("{}: Average chunk size was {} bytes.", archive_name, size/chunk_count);
println!("{}: Time per request: {} microseconds.", archive_name, (duration.as_micros())/(chunk_count as u128));
}
let param = json!({ let param = json!({
"wid": wid , "wid": wid ,
"chunk-count": chunk_count, "chunk-count": chunk_count,
@ -399,7 +405,7 @@ impl BackupWriter {
} }
if self.verbose { if self.verbose {
println!("known chunks list length: {}", known_chunks.lock().unwrap().len()); println!("{}: known chunks list length is {}", archive_name, known_chunks.lock().unwrap().len());
} }
Ok(()) Ok(())
@ -413,7 +419,7 @@ impl BackupWriter {
known_chunks: Arc<Mutex<HashSet<[u8;32]>>>, known_chunks: Arc<Mutex<HashSet<[u8;32]>>>,
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
verbose: bool, verbose: bool,
) -> impl Future<Output = Result<(usize, usize, usize, [u8; 32]), Error>> { ) -> impl Future<Output = Result<(usize, usize, std::time::Duration, usize, [u8; 32]), Error>> {
let repeat = Arc::new(AtomicUsize::new(0)); let repeat = Arc::new(AtomicUsize::new(0));
let repeat2 = repeat.clone(); let repeat2 = repeat.clone();
@ -529,16 +535,11 @@ impl BackupWriter {
let repeat = repeat2.load(Ordering::SeqCst); let repeat = repeat2.load(Ordering::SeqCst);
let stream_len = stream_len2.load(Ordering::SeqCst); let stream_len = stream_len2.load(Ordering::SeqCst);
let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize); let speed = ((stream_len*1_000_000)/(1024*1024))/(start_time.elapsed().as_micros() as usize);
println!("Uploaded {} chunks in {} seconds ({} MB/s).", repeat, start_time.elapsed().as_secs(), speed);
if repeat > 0 {
println!("Average chunk size was {} bytes.", stream_len/repeat);
println!("Time per request: {} microseconds.", (start_time.elapsed().as_micros())/(repeat as u128));
}
let mut guard = index_csum_2.lock().unwrap(); let mut guard = index_csum_2.lock().unwrap();
let csum = guard.take().unwrap().finish(); let csum = guard.take().unwrap().finish();
futures::future::ok((repeat, stream_len, speed, csum)) futures::future::ok((repeat, stream_len, start_time.elapsed(), speed, csum))
}) })
} }