From 249dde8b633e53aaeeec47e56b7941870034239f Mon Sep 17 00:00:00 2001 From: Thomas Lamprecht Date: Fri, 15 Apr 2022 12:20:51 +0200 Subject: [PATCH] backup: switch over to streaming Iterator improving memory usage Avoid collecting the whole group list in memory only to iterate and filter over it again. Note that the change could result in a indentation change, so best viewed with `-w` flag. Signed-off-by: Thomas Lamprecht --- src/api2/admin/datastore.rs | 110 ++++++++++++++++-------------------- src/backup/verify.rs | 3 +- src/server/prune_job.rs | 3 +- src/server/pull.rs | 3 +- 4 files changed, 55 insertions(+), 64 deletions(-) diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index c436ec0b..5fe75324 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -166,67 +166,58 @@ pub fn list_groups( let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0; - let group_info = - datastore - .list_backup_groups()? - .into_iter() - .fold(Vec::new(), |mut group_info, group| { - let owner = match datastore.get_owner(&group) { - Ok(auth_id) => auth_id, - Err(err) => { - eprintln!( - "Failed to get owner of group '{}/{}' - {}", - &store, group, err - ); - return group_info; - } - }; - if !list_all && check_backup_owner(&owner, &auth_id).is_err() { - return group_info; + datastore + .iter_backup_groups()? + .try_fold(Vec::new(), |mut group_info, group| { + let group = group?; + let owner = match datastore.get_owner(&group) { + Ok(auth_id) => auth_id, + Err(err) => { + let id = &store; + eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err); + return Ok(group_info); } + }; + if !list_all && check_backup_owner(&owner, &auth_id).is_err() { + return Ok(group_info); + } - let snapshots = match group.list_backups(&datastore.base_path()) { - Ok(snapshots) => snapshots, - Err(_) => { - return group_info; + let snapshots = match group.list_backups(&datastore.base_path()) { + Ok(snapshots) => snapshots, + Err(_) => return Ok(group_info), + }; + + let backup_count: u64 = snapshots.len() as u64; + if backup_count == 0 { + return Ok(group_info); + } + + let last_backup = snapshots + .iter() + .fold(&snapshots[0], |a, b| { + if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() { + a + } else { + b } - }; + }) + .to_owned(); - let backup_count: u64 = snapshots.len() as u64; - if backup_count == 0 { - return group_info; - } + let note_path = get_group_note_path(&datastore, &group); + let comment = file_read_firstline(¬e_path).ok(); - let last_backup = snapshots - .iter() - .fold(&snapshots[0], |last, curr| { - if curr.is_finished() - && curr.backup_dir.backup_time() > last.backup_dir.backup_time() - { - curr - } else { - last - } - }) - .to_owned(); - - let note_path = get_group_note_path(&datastore, &group); - let comment = file_read_firstline(¬e_path).ok(); - - group_info.push(GroupListItem { - backup_type: group.backup_type().to_string(), - backup_id: group.backup_id().to_string(), - last_backup: last_backup.backup_dir.backup_time(), - owner: Some(owner), - backup_count, - files: last_backup.files, - comment, - }); - - group_info + group_info.push(GroupListItem { + backup_type: group.backup_type().to_string(), + backup_id: group.backup_id().to_string(), + last_backup: last_backup.backup_dir.backup_time(), + owner: Some(owner), + backup_count, + files: last_backup.files, + comment, }); - Ok(group_info) + Ok(group_info) + }) } #[api( @@ -417,16 +408,16 @@ pub fn list_snapshots( let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?; + // FIXME: filter also owner before collecting, for doing that nicely the owner should move into + // backup group and provide an error free (Err -> None) accessor let groups = match (backup_type, backup_id) { (Some(backup_type), Some(backup_id)) => vec![BackupGroup::new(backup_type, backup_id)], (Some(backup_type), None) => datastore - .list_backup_groups()? - .into_iter() + .iter_backup_groups_ok()? .filter(|group| group.backup_type() == backup_type) .collect(), (None, Some(backup_id)) => datastore - .list_backup_groups()? - .into_iter() + .iter_backup_groups_ok()? .filter(|group| group.backup_id() == backup_id) .collect(), _ => datastore.list_backup_groups()?, @@ -537,8 +528,7 @@ pub fn list_snapshots( fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result { store - .list_backup_groups()? - .into_iter() + .iter_backup_groups_ok()? .filter(|group| { let owner = match store.get_owner(group) { Ok(owner) => owner, diff --git a/src/backup/verify.rs b/src/backup/verify.rs index 6a10653e..e9067536 100644 --- a/src/backup/verify.rs +++ b/src/backup/verify.rs @@ -537,9 +537,8 @@ pub fn verify_all_backups( } }; - let mut list = match verify_worker.datastore.list_backup_groups() { + let mut list = match verify_worker.datastore.iter_backup_groups_ok() { Ok(list) => list - .into_iter() .filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark")) .filter(filter_by_owner) .collect::>(), diff --git a/src/server/prune_job.rs b/src/server/prune_job.rs index 94603a2b..1a8b253a 100644 --- a/src/server/prune_job.rs +++ b/src/server/prune_job.rs @@ -42,7 +42,8 @@ pub fn prune_datastore( let privs = user_info.lookup_privs(&auth_id, &["datastore", store]); let has_privs = privs & PRIV_DATASTORE_MODIFY != 0; - for group in datastore.list_backup_groups()? { + for group in datastore.iter_backup_groups()? { + let group = group?; let list = group.list_backups(&datastore.base_path())?; if !has_privs && !datastore.owns_backup(&group, &auth_id)? { diff --git a/src/server/pull.rs b/src/server/pull.rs index 6437ee21..34755016 100644 --- a/src/server/pull.rs +++ b/src/server/pull.rs @@ -797,7 +797,8 @@ pub async fn pull_store( if params.remove_vanished { let result: Result<(), Error> = proxmox_lang::try_block!({ - for local_group in params.store.list_backup_groups()? { + for local_group in params.store.iter_backup_groups()? { + let local_group = local_group?; if new_groups.contains(&local_group) { continue; }