backup: switch over to streaming Iterator improving memory usage
Avoid collecting the whole group list in memory only to iterate and filter over it again. Note that the change could result in a indentation change, so best viewed with `-w` flag. Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
parent
7b125de3e1
commit
249dde8b63
|
@ -166,67 +166,58 @@ pub fn list_groups(
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||||
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
let list_all = (user_privs & PRIV_DATASTORE_AUDIT) != 0;
|
||||||
|
|
||||||
let group_info =
|
datastore
|
||||||
datastore
|
.iter_backup_groups()?
|
||||||
.list_backup_groups()?
|
.try_fold(Vec::new(), |mut group_info, group| {
|
||||||
.into_iter()
|
let group = group?;
|
||||||
.fold(Vec::new(), |mut group_info, group| {
|
let owner = match datastore.get_owner(&group) {
|
||||||
let owner = match datastore.get_owner(&group) {
|
Ok(auth_id) => auth_id,
|
||||||
Ok(auth_id) => auth_id,
|
Err(err) => {
|
||||||
Err(err) => {
|
let id = &store;
|
||||||
eprintln!(
|
eprintln!("Failed to get owner of group '{}/{}' - {}", id, group, err);
|
||||||
"Failed to get owner of group '{}/{}' - {}",
|
return Ok(group_info);
|
||||||
&store, group, err
|
|
||||||
);
|
|
||||||
return group_info;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
|
|
||||||
return group_info;
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
if !list_all && check_backup_owner(&owner, &auth_id).is_err() {
|
||||||
|
return Ok(group_info);
|
||||||
|
}
|
||||||
|
|
||||||
let snapshots = match group.list_backups(&datastore.base_path()) {
|
let snapshots = match group.list_backups(&datastore.base_path()) {
|
||||||
Ok(snapshots) => snapshots,
|
Ok(snapshots) => snapshots,
|
||||||
Err(_) => {
|
Err(_) => return Ok(group_info),
|
||||||
return group_info;
|
};
|
||||||
|
|
||||||
|
let backup_count: u64 = snapshots.len() as u64;
|
||||||
|
if backup_count == 0 {
|
||||||
|
return Ok(group_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
let last_backup = snapshots
|
||||||
|
.iter()
|
||||||
|
.fold(&snapshots[0], |a, b| {
|
||||||
|
if a.is_finished() && a.backup_dir.backup_time() > b.backup_dir.backup_time() {
|
||||||
|
a
|
||||||
|
} else {
|
||||||
|
b
|
||||||
}
|
}
|
||||||
};
|
})
|
||||||
|
.to_owned();
|
||||||
|
|
||||||
let backup_count: u64 = snapshots.len() as u64;
|
let note_path = get_group_note_path(&datastore, &group);
|
||||||
if backup_count == 0 {
|
let comment = file_read_firstline(¬e_path).ok();
|
||||||
return group_info;
|
|
||||||
}
|
|
||||||
|
|
||||||
let last_backup = snapshots
|
group_info.push(GroupListItem {
|
||||||
.iter()
|
backup_type: group.backup_type().to_string(),
|
||||||
.fold(&snapshots[0], |last, curr| {
|
backup_id: group.backup_id().to_string(),
|
||||||
if curr.is_finished()
|
last_backup: last_backup.backup_dir.backup_time(),
|
||||||
&& curr.backup_dir.backup_time() > last.backup_dir.backup_time()
|
owner: Some(owner),
|
||||||
{
|
backup_count,
|
||||||
curr
|
files: last_backup.files,
|
||||||
} else {
|
comment,
|
||||||
last
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.to_owned();
|
|
||||||
|
|
||||||
let note_path = get_group_note_path(&datastore, &group);
|
|
||||||
let comment = file_read_firstline(¬e_path).ok();
|
|
||||||
|
|
||||||
group_info.push(GroupListItem {
|
|
||||||
backup_type: group.backup_type().to_string(),
|
|
||||||
backup_id: group.backup_id().to_string(),
|
|
||||||
last_backup: last_backup.backup_dir.backup_time(),
|
|
||||||
owner: Some(owner),
|
|
||||||
backup_count,
|
|
||||||
files: last_backup.files,
|
|
||||||
comment,
|
|
||||||
});
|
|
||||||
|
|
||||||
group_info
|
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(group_info)
|
Ok(group_info)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[api(
|
#[api(
|
||||||
|
@ -417,16 +408,16 @@ pub fn list_snapshots(
|
||||||
|
|
||||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||||
|
|
||||||
|
// FIXME: filter also owner before collecting, for doing that nicely the owner should move into
|
||||||
|
// backup group and provide an error free (Err -> None) accessor
|
||||||
let groups = match (backup_type, backup_id) {
|
let groups = match (backup_type, backup_id) {
|
||||||
(Some(backup_type), Some(backup_id)) => vec![BackupGroup::new(backup_type, backup_id)],
|
(Some(backup_type), Some(backup_id)) => vec![BackupGroup::new(backup_type, backup_id)],
|
||||||
(Some(backup_type), None) => datastore
|
(Some(backup_type), None) => datastore
|
||||||
.list_backup_groups()?
|
.iter_backup_groups_ok()?
|
||||||
.into_iter()
|
|
||||||
.filter(|group| group.backup_type() == backup_type)
|
.filter(|group| group.backup_type() == backup_type)
|
||||||
.collect(),
|
.collect(),
|
||||||
(None, Some(backup_id)) => datastore
|
(None, Some(backup_id)) => datastore
|
||||||
.list_backup_groups()?
|
.iter_backup_groups_ok()?
|
||||||
.into_iter()
|
|
||||||
.filter(|group| group.backup_id() == backup_id)
|
.filter(|group| group.backup_id() == backup_id)
|
||||||
.collect(),
|
.collect(),
|
||||||
_ => datastore.list_backup_groups()?,
|
_ => datastore.list_backup_groups()?,
|
||||||
|
@ -537,8 +528,7 @@ pub fn list_snapshots(
|
||||||
|
|
||||||
fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
|
fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Result<Counts, Error> {
|
||||||
store
|
store
|
||||||
.list_backup_groups()?
|
.iter_backup_groups_ok()?
|
||||||
.into_iter()
|
|
||||||
.filter(|group| {
|
.filter(|group| {
|
||||||
let owner = match store.get_owner(group) {
|
let owner = match store.get_owner(group) {
|
||||||
Ok(owner) => owner,
|
Ok(owner) => owner,
|
||||||
|
|
|
@ -537,9 +537,8 @@ pub fn verify_all_backups(
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut list = match verify_worker.datastore.list_backup_groups() {
|
let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
|
||||||
Ok(list) => list
|
Ok(list) => list
|
||||||
.into_iter()
|
|
||||||
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
||||||
.filter(filter_by_owner)
|
.filter(filter_by_owner)
|
||||||
.collect::<Vec<BackupGroup>>(),
|
.collect::<Vec<BackupGroup>>(),
|
||||||
|
|
|
@ -42,7 +42,8 @@ pub fn prune_datastore(
|
||||||
let privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
let privs = user_info.lookup_privs(&auth_id, &["datastore", store]);
|
||||||
let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
|
let has_privs = privs & PRIV_DATASTORE_MODIFY != 0;
|
||||||
|
|
||||||
for group in datastore.list_backup_groups()? {
|
for group in datastore.iter_backup_groups()? {
|
||||||
|
let group = group?;
|
||||||
let list = group.list_backups(&datastore.base_path())?;
|
let list = group.list_backups(&datastore.base_path())?;
|
||||||
|
|
||||||
if !has_privs && !datastore.owns_backup(&group, &auth_id)? {
|
if !has_privs && !datastore.owns_backup(&group, &auth_id)? {
|
||||||
|
|
|
@ -797,7 +797,8 @@ pub async fn pull_store(
|
||||||
|
|
||||||
if params.remove_vanished {
|
if params.remove_vanished {
|
||||||
let result: Result<(), Error> = proxmox_lang::try_block!({
|
let result: Result<(), Error> = proxmox_lang::try_block!({
|
||||||
for local_group in params.store.list_backup_groups()? {
|
for local_group in params.store.iter_backup_groups()? {
|
||||||
|
let local_group = local_group?;
|
||||||
if new_groups.contains(&local_group) {
|
if new_groups.contains(&local_group) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue