api-types: introduce BackupType enum and Group/Dir api types
The type is a real enum. All are API types and implement Display and FromStr. The ordering is the same as it is in pbs-datastore. Also, they are now flattened into a few structs instead of being copied manually. Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
committed by
Thomas Lamprecht
parent
33eb23d57e
commit
988d575dbb
@ -30,7 +30,7 @@ use pxar::accessor::aio::Accessor;
|
||||
use pxar::EntryKind;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, BackupContent, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
|
||||
Authid, BackupContent, BackupType, Counts, CryptMode, DataStoreListItem, DataStoreStatus,
|
||||
GarbageCollectionStatus, GroupListItem, Operation, PruneOptions, RRDMode, RRDTimeFrame,
|
||||
SnapshotListItem, SnapshotVerifyState, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
|
||||
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, DATASTORE_SCHEMA, IGNORE_VERIFIED_BACKUPS_SCHEMA,
|
||||
@ -207,8 +207,7 @@ pub fn list_groups(
|
||||
let comment = file_read_firstline(¬e_path).ok();
|
||||
|
||||
group_info.push(GroupListItem {
|
||||
backup_type: group.backup_type().to_string(),
|
||||
backup_id: group.backup_id().to_string(),
|
||||
backup: group.into(),
|
||||
last_backup: last_backup.backup_dir.backup_time(),
|
||||
owner: Some(owner),
|
||||
backup_count,
|
||||
@ -223,15 +222,9 @@ pub fn list_groups(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
@ -244,7 +237,7 @@ pub fn list_groups(
|
||||
/// Delete backup group including all snapshots.
|
||||
pub fn delete_group(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
_info: &ApiMethod,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -266,18 +259,10 @@ pub fn delete_group(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||
},
|
||||
},
|
||||
returns: pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOT_FILES_RETURN_TYPE,
|
||||
@ -291,7 +276,7 @@ pub fn delete_group(
|
||||
/// List snapshot files.
|
||||
pub fn list_snapshot_files(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
_info: &ApiMethod,
|
||||
@ -319,18 +304,10 @@ pub fn list_snapshot_files(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
@ -343,7 +320,7 @@ pub fn list_snapshot_files(
|
||||
/// Delete backup snapshot.
|
||||
pub fn delete_snapshot(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
_info: &ApiMethod,
|
||||
@ -370,12 +347,10 @@ pub fn delete_snapshot(
|
||||
streaming: true,
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": {
|
||||
optional: true,
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
type: BackupType,
|
||||
},
|
||||
"backup-id": {
|
||||
optional: true,
|
||||
@ -394,7 +369,7 @@ pub fn delete_snapshot(
|
||||
/// List backup snapshots.
|
||||
pub fn list_snapshots(
|
||||
store: String,
|
||||
backup_type: Option<String>,
|
||||
backup_type: Option<BackupType>,
|
||||
backup_id: Option<String>,
|
||||
_param: Value,
|
||||
_info: &ApiMethod,
|
||||
@ -424,9 +399,10 @@ pub fn list_snapshots(
|
||||
};
|
||||
|
||||
let info_to_snapshot_list_item = |group: &BackupGroup, owner, info: BackupInfo| {
|
||||
let backup_type = group.backup_type().to_string();
|
||||
let backup_id = group.backup_id().to_string();
|
||||
let backup_time = info.backup_dir.backup_time();
|
||||
let backup = pbs_api_types::BackupDir {
|
||||
group: group.into(),
|
||||
time: info.backup_dir.backup_time(),
|
||||
};
|
||||
let protected = info.backup_dir.is_protected(datastore.base_path());
|
||||
|
||||
match get_all_snapshot_files(&datastore, &info) {
|
||||
@ -458,9 +434,7 @@ pub fn list_snapshots(
|
||||
let size = Some(files.iter().map(|x| x.size.unwrap_or(0)).sum());
|
||||
|
||||
SnapshotListItem {
|
||||
backup_type,
|
||||
backup_id,
|
||||
backup_time,
|
||||
backup,
|
||||
comment,
|
||||
verification,
|
||||
fingerprint,
|
||||
@ -483,9 +457,7 @@ pub fn list_snapshots(
|
||||
.collect();
|
||||
|
||||
SnapshotListItem {
|
||||
backup_type,
|
||||
backup_id,
|
||||
backup_time,
|
||||
backup,
|
||||
comment: None,
|
||||
verification: None,
|
||||
fingerprint: None,
|
||||
@ -550,10 +522,9 @@ fn get_snapshots_count(store: &DataStore, filter_owner: Option<&Authid>) -> Resu
|
||||
// only include groups with snapshots, counting/displaying emtpy groups can confuse
|
||||
if snapshot_count > 0 {
|
||||
let type_count = match group.backup_type() {
|
||||
"ct" => counts.ct.get_or_insert(Default::default()),
|
||||
"vm" => counts.vm.get_or_insert(Default::default()),
|
||||
"host" => counts.host.get_or_insert(Default::default()),
|
||||
_ => counts.other.get_or_insert(Default::default()),
|
||||
BackupType::Ct => counts.ct.get_or_insert(Default::default()),
|
||||
BackupType::Vm => counts.vm.get_or_insert(Default::default()),
|
||||
BackupType::Host => counts.host.get_or_insert(Default::default()),
|
||||
};
|
||||
|
||||
type_count.groups += 1;
|
||||
@ -630,7 +601,7 @@ pub fn status(
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
type: BackupType,
|
||||
optional: true,
|
||||
},
|
||||
"backup-id": {
|
||||
@ -664,7 +635,7 @@ pub fn status(
|
||||
/// or all backups in the datastore.
|
||||
pub fn verify(
|
||||
store: String,
|
||||
backup_type: Option<String>,
|
||||
backup_type: Option<BackupType>,
|
||||
backup_id: Option<String>,
|
||||
backup_time: Option<i64>,
|
||||
ignore_verified: Option<bool>,
|
||||
@ -771,12 +742,8 @@ pub fn verify(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"dry-run": {
|
||||
optional: true,
|
||||
type: bool,
|
||||
@ -800,7 +767,7 @@ pub fn verify(
|
||||
/// Prune a group on the datastore
|
||||
pub fn prune(
|
||||
backup_id: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
dry_run: bool,
|
||||
prune_options: PruneOptions,
|
||||
store: String,
|
||||
@ -809,13 +776,13 @@ pub fn prune(
|
||||
) -> Result<Value, Error> {
|
||||
let auth_id: Authid = rpcenv.get_auth_id().unwrap().parse()?;
|
||||
|
||||
let group = BackupGroup::new(&backup_type, &backup_id);
|
||||
let group = BackupGroup::new(backup_type, &backup_id);
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
check_priv_or_backup_owner(&datastore, &group, &auth_id, PRIV_DATASTORE_MODIFY)?;
|
||||
|
||||
let worker_id = format!("{}:{}/{}", store, &backup_type, &backup_id);
|
||||
let worker_id = format!("{}:{}/{}", store, backup_type, &backup_id);
|
||||
|
||||
let mut prune_result = Vec::new();
|
||||
|
||||
@ -1111,7 +1078,7 @@ pub fn download_file(
|
||||
|
||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
@ -1194,7 +1161,7 @@ pub fn download_file_decoded(
|
||||
|
||||
let file_name = required_string_param(¶m, "file-name")?.to_owned();
|
||||
|
||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
@ -1320,7 +1287,7 @@ pub fn upload_backup_log(
|
||||
|
||||
let file_name = CLIENT_LOG_BLOB_NAME;
|
||||
|
||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
@ -1369,18 +1336,10 @@ pub fn upload_backup_log(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||
"filepath": {
|
||||
description: "Base64 encoded path.",
|
||||
type: String,
|
||||
@ -1394,7 +1353,7 @@ pub fn upload_backup_log(
|
||||
/// Get the entries of the given path of the catalog
|
||||
pub fn catalog(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
filepath: String,
|
||||
@ -1481,7 +1440,7 @@ pub fn pxar_file_download(
|
||||
|
||||
let filepath = required_string_param(¶m, "filepath")?.to_owned();
|
||||
|
||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
@ -1659,15 +1618,9 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
@ -1677,7 +1630,7 @@ pub fn get_active_operations(store: String, _param: Value) -> Result<Value, Erro
|
||||
/// Get "notes" for a backup group
|
||||
pub fn get_group_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
) -> Result<String, Error> {
|
||||
@ -1695,15 +1648,9 @@ pub fn get_group_notes(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
notes: {
|
||||
description: "A multiline text.",
|
||||
},
|
||||
@ -1718,7 +1665,7 @@ pub fn get_group_notes(
|
||||
/// Set "notes" for a backup group
|
||||
pub fn set_group_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
notes: String,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -1739,18 +1686,10 @@ pub fn set_group_notes(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
@ -1760,7 +1699,7 @@ pub fn set_group_notes(
|
||||
/// Get "notes" for a specific backup
|
||||
pub fn get_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -1787,18 +1726,10 @@ pub fn get_notes(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||
notes: {
|
||||
description: "A multiline text.",
|
||||
},
|
||||
@ -1813,7 +1744,7 @@ pub fn get_notes(
|
||||
/// Set "notes" for a specific backup
|
||||
pub fn set_notes(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
notes: String,
|
||||
@ -1843,18 +1774,10 @@ pub fn set_notes(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||
},
|
||||
},
|
||||
access: {
|
||||
@ -1864,7 +1787,7 @@ pub fn set_notes(
|
||||
/// Query protection for a specific backup
|
||||
pub fn get_protection(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
@ -1887,18 +1810,10 @@ pub fn get_protection(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
"backup-time": {
|
||||
schema: BACKUP_TIME_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"backup-time": { schema: BACKUP_TIME_SCHEMA },
|
||||
protected: {
|
||||
description: "Enable/disable protection.",
|
||||
},
|
||||
@ -1913,7 +1828,7 @@ pub fn get_protection(
|
||||
/// En- or disable protection for a specific backup
|
||||
pub fn set_protection(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
backup_time: i64,
|
||||
protected: bool,
|
||||
@ -1937,15 +1852,9 @@ pub fn set_protection(
|
||||
#[api(
|
||||
input: {
|
||||
properties: {
|
||||
store: {
|
||||
schema: DATASTORE_SCHEMA,
|
||||
},
|
||||
"backup-type": {
|
||||
schema: BACKUP_TYPE_SCHEMA,
|
||||
},
|
||||
"backup-id": {
|
||||
schema: BACKUP_ID_SCHEMA,
|
||||
},
|
||||
store: { schema: DATASTORE_SCHEMA },
|
||||
"backup-type": { type: BackupType },
|
||||
"backup-id": { schema: BACKUP_ID_SCHEMA },
|
||||
"new-owner": {
|
||||
type: Authid,
|
||||
},
|
||||
@ -1959,7 +1868,7 @@ pub fn set_protection(
|
||||
/// Change owner of a backup group
|
||||
pub fn set_backup_owner(
|
||||
store: String,
|
||||
backup_type: String,
|
||||
backup_type: BackupType,
|
||||
backup_id: String,
|
||||
new_owner: Authid,
|
||||
rpcenv: &mut dyn RpcEnvironment,
|
||||
|
@ -16,7 +16,7 @@ use proxmox_schema::*;
|
||||
use proxmox_sys::sortable;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
Authid, BackupType, Operation, SnapshotVerifyState, VerifyState, BACKUP_ARCHIVE_NAME_SCHEMA,
|
||||
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA,
|
||||
DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||
};
|
||||
@ -82,7 +82,7 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Write))?;
|
||||
|
||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
@ -109,7 +109,7 @@ fn upgrade_to_backup_protocol(
|
||||
|
||||
let backup_group = BackupGroup::new(backup_type, backup_id);
|
||||
|
||||
let worker_type = if backup_type == "host" && backup_id == "benchmark" {
|
||||
let worker_type = if backup_type == BackupType::Host && backup_id == "benchmark" {
|
||||
if !benchmark {
|
||||
bail!("unable to run benchmark without --benchmark flags");
|
||||
}
|
||||
|
@ -16,9 +16,9 @@ use proxmox_schema::{BooleanSchema, ObjectSchema};
|
||||
use proxmox_sys::sortable;
|
||||
|
||||
use pbs_api_types::{
|
||||
Authid, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA,
|
||||
BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA, PRIV_DATASTORE_BACKUP,
|
||||
PRIV_DATASTORE_READ,
|
||||
Authid, BackupType, Operation, BACKUP_ARCHIVE_NAME_SCHEMA, BACKUP_ID_SCHEMA,
|
||||
BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, CHUNK_DIGEST_SCHEMA, DATASTORE_SCHEMA,
|
||||
PRIV_DATASTORE_BACKUP, PRIV_DATASTORE_READ,
|
||||
};
|
||||
use pbs_config::CachedUserInfo;
|
||||
use pbs_datastore::backup_info::BackupDir;
|
||||
@ -90,7 +90,7 @@ fn upgrade_to_backup_reader_protocol(
|
||||
|
||||
let datastore = DataStore::lookup_datastore(&store, Some(Operation::Read))?;
|
||||
|
||||
let backup_type = required_string_param(¶m, "backup-type")?;
|
||||
let backup_type: BackupType = required_string_param(¶m, "backup-type")?.parse()?;
|
||||
let backup_id = required_string_param(¶m, "backup-id")?;
|
||||
let backup_time = required_integer_param(¶m, "backup-time")?;
|
||||
|
||||
|
@ -441,7 +441,7 @@ pub fn list_content(
|
||||
for (store, snapshot) in media_catalog_snapshot_list(status_path, &media_id)? {
|
||||
let backup_dir: BackupDir = snapshot.parse()?;
|
||||
|
||||
if let Some(ref backup_type) = filter.backup_type {
|
||||
if let Some(backup_type) = filter.backup_type {
|
||||
if backup_dir.group().backup_type() != backup_type {
|
||||
continue;
|
||||
}
|
||||
|
@ -8,7 +8,7 @@ use anyhow::{bail, format_err, Error};
|
||||
|
||||
use proxmox_sys::{task_log, WorkerTaskContext};
|
||||
|
||||
use pbs_api_types::{Authid, CryptMode, SnapshotVerifyState, VerifyState, UPID};
|
||||
use pbs_api_types::{Authid, BackupType, CryptMode, SnapshotVerifyState, VerifyState, UPID};
|
||||
use pbs_datastore::backup_info::{BackupDir, BackupGroup, BackupInfo};
|
||||
use pbs_datastore::index::IndexFile;
|
||||
use pbs_datastore::manifest::{archive_type, ArchiveType, BackupManifest, FileInfo};
|
||||
@ -539,7 +539,9 @@ pub fn verify_all_backups(
|
||||
|
||||
let mut list = match verify_worker.datastore.iter_backup_groups_ok() {
|
||||
Ok(list) => list
|
||||
.filter(|group| !(group.backup_type() == "host" && group.backup_id() == "benchmark"))
|
||||
.filter(|group| {
|
||||
!(group.backup_type() == BackupType::Host && group.backup_id() == "benchmark")
|
||||
})
|
||||
.filter(filter_by_owner)
|
||||
.collect::<Vec<BackupGroup>>(),
|
||||
Err(err) => {
|
||||
|
@ -523,7 +523,7 @@ pub fn complete_remote_datastore_group(_arg: &str, param: &HashMap<String, Strin
|
||||
.await
|
||||
}) {
|
||||
for item in data {
|
||||
list.push(format!("{}/{}", item.backup_type, item.backup_id));
|
||||
list.push(format!("{}/{}", item.backup.ty, item.backup.id));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ pub fn complete_datastore_group_filter(_arg: &str, param: &HashMap<String, Strin
|
||||
list.extend(
|
||||
groups
|
||||
.iter()
|
||||
.map(|group| format!("group:{}/{}", group.backup_type, group.backup_id)),
|
||||
.map(|group| format!("group:{}/{}", group.backup.ty, group.backup.id)),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -577,7 +577,7 @@ pub async fn pull_group(
|
||||
let mut result = client.get(&path, Some(args)).await?;
|
||||
let mut list: Vec<SnapshotListItem> = serde_json::from_value(result["data"].take())?;
|
||||
|
||||
list.sort_unstable_by(|a, b| a.backup_time.cmp(&b.backup_time));
|
||||
list.sort_unstable_by(|a, b| a.backup.time.cmp(&b.backup.time));
|
||||
|
||||
client.login().await?; // make sure auth is complete
|
||||
|
||||
@ -599,7 +599,7 @@ pub async fn pull_group(
|
||||
};
|
||||
|
||||
for (pos, item) in list.into_iter().enumerate() {
|
||||
let snapshot = BackupDir::new(item.backup_type, item.backup_id, item.backup_time)?;
|
||||
let snapshot = BackupDir::new(item.backup.ty(), item.backup.id(), item.backup.time)?;
|
||||
|
||||
// in-progress backups can't be synced
|
||||
if item.size.is_none() {
|
||||
@ -712,9 +712,9 @@ pub async fn pull_store(
|
||||
|
||||
let total_count = list.len();
|
||||
list.sort_unstable_by(|a, b| {
|
||||
let type_order = a.backup_type.cmp(&b.backup_type);
|
||||
let type_order = a.backup.ty.cmp(&b.backup.ty);
|
||||
if type_order == std::cmp::Ordering::Equal {
|
||||
a.backup_id.cmp(&b.backup_id)
|
||||
a.backup.id.cmp(&b.backup.id)
|
||||
} else {
|
||||
type_order
|
||||
}
|
||||
@ -726,7 +726,7 @@ pub async fn pull_store(
|
||||
|
||||
let list: Vec<BackupGroup> = list
|
||||
.into_iter()
|
||||
.map(|item| BackupGroup::new(item.backup_type, item.backup_id))
|
||||
.map(|item| BackupGroup::new(item.backup.ty, item.backup.id))
|
||||
.collect();
|
||||
|
||||
let list = if let Some(ref group_filter) = ¶ms.group_filter {
|
||||
|
Reference in New Issue
Block a user