diff --git a/src/backup/datastore.rs b/src/backup/datastore.rs index a4c437e1..c8a33043 100644 --- a/src/backup/datastore.rs +++ b/src/backup/datastore.rs @@ -759,3 +759,50 @@ impl DataStore { self.verify_new } } + +#[derive(Debug, Clone, Default)] +/// Tracker for progress of operations iterating over `Datastore` contents. +pub struct StoreProgress { + /// Completed groups + pub done_groups: u64, + /// Total groups + pub total_groups: u64, + /// Completed snapshots within current group + pub done_snapshots: u64, + /// Total snapshots in current group + pub group_snapshots: u64, +} + +impl StoreProgress { + pub fn new(total_groups: u64) -> Self { + StoreProgress { + total_groups, + .. Default::default() + } + } + + /// Calculates an interpolated relative progress based on current counters. + pub fn percentage(&self) -> f64 { + let per_groups = (self.done_groups as f64) / (self.total_groups as f64); + if self.group_snapshots == 0 { + per_groups + } else { + let per_snapshots = (self.done_snapshots as f64) / (self.group_snapshots as f64); + per_groups + (1.0 / self.total_groups as f64) * per_snapshots + } + } +} + +impl std::fmt::Display for StoreProgress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{:.2}% ({} of {} groups, {} of {} group snapshots)", + self.percentage() * 100.0, + self.done_groups, + self.total_groups, + self.done_snapshots, + self.group_snapshots, + ) + } +} diff --git a/src/client/pull.rs b/src/client/pull.rs index 0c9afe0a..2555a14c 100644 --- a/src/client/pull.rs +++ b/src/client/pull.rs @@ -395,7 +395,7 @@ pub async fn pull_group( tgt_store: Arc, group: &BackupGroup, delete: bool, - progress: Option<(usize, usize)>, // (groups_done, group_count) + progress: &mut StoreProgress, ) -> Result<(), Error> { let path = format!("api2/json/admin/datastore/{}/snapshots", src_repo.store()); @@ -418,18 +418,10 @@ pub async fn pull_group( let mut remote_snapshots = std::collections::HashSet::new(); - let (per_start, per_group) = if let Some((groups_done, group_count)) = progress { - let per_start = (groups_done as f64)/(group_count as f64); - let per_group = 1.0/(group_count as f64); - (per_start, per_group) - } else { - (0.0, 1.0) - }; - // start with 16384 chunks (up to 65GB) let downloaded_chunks = Arc::new(Mutex::new(HashSet::with_capacity(1024*64))); - let snapshot_count = list.len(); + progress.group_snapshots = list.len() as u64; for (pos, item) in list.into_iter().enumerate() { let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?; @@ -469,9 +461,8 @@ pub async fn pull_group( let result = pull_snapshot_from(worker, reader, tgt_store.clone(), &snapshot, downloaded_chunks.clone()).await; - let percentage = (pos as f64)/(snapshot_count as f64); - let percentage = per_start + percentage*per_group; - worker.log(format!("percentage done: {:.2}%", percentage*100.0)); + progress.done_snapshots = pos as u64 + 1; + worker.log(format!("percentage done: {}", progress.clone())); result?; // stop on error } @@ -523,9 +514,13 @@ pub async fn pull_store( new_groups.insert(BackupGroup::new(&item.backup_type, &item.backup_id)); } - let group_count = list.len(); + let mut progress = StoreProgress::new(list.len() as u64); + + for (done, item) in list.into_iter().enumerate() { + progress.done_groups = done as u64; + progress.done_snapshots = 0; + progress.group_snapshots = 0; - for (groups_done, item) in list.into_iter().enumerate() { let group = BackupGroup::new(&item.backup_type, &item.backup_id); let (owner, _lock_guard) = match tgt_store.create_locked_backup_group(&group, &auth_id) { @@ -551,7 +546,7 @@ pub async fn pull_store( tgt_store.clone(), &group, delete, - Some((groups_done, group_count)), + &mut progress, ).await { worker.log(format!( "sync group {}/{} failed - {}",