diff --git a/src/api2/admin/datastore.rs b/src/api2/admin/datastore.rs index d2da051c..94ed1182 100644 --- a/src/api2/admin/datastore.rs +++ b/src/api2/admin/datastore.rs @@ -71,6 +71,25 @@ fn read_backup_index(store: &DataStore, backup_dir: &BackupDir) -> Result Result, Error> { + let mut files = read_backup_index(&store, &info.backup_dir)?; + + let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { + acc.insert(item.filename.clone()); + acc + }); + + for file in &info.files { + if file_set.contains(file) { continue; } + files.push(BackupContent { filename: file.to_string(), size: None, encrypted: None }); + } + + Ok(files) +} + fn group_backups(backup_list: Vec) -> HashMap> { let mut group_hash = HashMap::new(); @@ -204,21 +223,9 @@ pub fn list_snapshot_files( let allowed = (user_privs & (PRIV_DATASTORE_AUDIT | PRIV_DATASTORE_READ)) != 0; if !allowed { check_backup_owner(&datastore, snapshot.group(), &username)?; } - let mut files = read_backup_index(&datastore, &snapshot)?; - let info = BackupInfo::new(&datastore.base_path(), snapshot)?; - let file_set = files.iter().fold(HashSet::new(), |mut acc, item| { - acc.insert(item.filename.clone()); - acc - }); - - for file in info.files { - if file_set.contains(&file) { continue; } - files.push(BackupContent { filename: file, size: None, encrypted: None }); - } - - Ok(files) + get_all_snapshot_files(&datastore, &info) } #[api( @@ -339,25 +346,28 @@ pub fn list_snapshots ( if owner != username { continue; } } - let mut result_item = SnapshotListItem { + let mut size = None; + + let files = match get_all_snapshot_files(&datastore, &info) { + Ok(files) => { + size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum()); + files + }, + Err(err) => { + eprintln!("error during snapshot file listing: '{}'", err); + info.files.iter().map(|x| BackupContent { filename: x.to_string(), size: None, encrypted: None }).collect() + }, + }; + + let result_item = SnapshotListItem { backup_type: group.backup_type().to_string(), backup_id: group.backup_id().to_string(), backup_time: info.backup_dir.backup_time().timestamp(), - files: info.files, - size: None, + files, + size, owner: Some(owner), }; - if let Ok(index) = read_backup_index(&datastore, &info.backup_dir) { - let mut backup_size = 0; - for item in index.iter() { - if let Some(item_size) = item.size { - backup_size += item_size; - } - } - result_item.size = Some(backup_size); - } - snapshots.push(result_item); } diff --git a/src/api2/types.rs b/src/api2/types.rs index e55d61bf..4c86b1ce 100644 --- a/src/api2/types.rs +++ b/src/api2/types.rs @@ -428,7 +428,7 @@ pub struct SnapshotListItem { pub backup_id: String, pub backup_time: i64, /// List of contained archive files. - pub files: Vec, + pub files: Vec, /// Overall snapshot size (sum of all archive sizes). #[serde(skip_serializing_if="Option::is_none")] pub size: Option, diff --git a/src/bin/proxmox-backup-client.rs b/src/bin/proxmox-backup-client.rs index fe09830f..e3951c52 100644 --- a/src/bin/proxmox-backup-client.rs +++ b/src/bin/proxmox-backup-client.rs @@ -445,7 +445,11 @@ async fn list_snapshots(param: Value) -> Result { let render_files = |_v: &Value, record: &Value| -> Result { let item: SnapshotListItem = serde_json::from_value(record.to_owned())?; - Ok(tools::format::render_backup_file_list(&item.files)) + let mut filenames = Vec::new(); + for file in &item.files { + filenames.push(file.filename.to_string()); + } + Ok(tools::format::render_backup_file_list(&filenames[..])) }; let options = default_table_format_options() diff --git a/www/DataStoreContent.js b/www/DataStoreContent.js index 9b4bf248..601f3ab9 100644 --- a/www/DataStoreContent.js +++ b/www/DataStoreContent.js @@ -196,6 +196,11 @@ Ext.define('PBS.DataStoreContent', { header: gettext("Files"), sortable: false, dataIndex: 'files', + renderer: function(files) { + return files.map((file) => { + return file.filename; + }).join(', '); + }, flex: 2 }, ],