api-types: add namespace to BackupGroup

Make it easier by adding an helper accepting either group or
directory

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Wolfgang Bumiller
2022-04-21 15:04:59 +02:00
committed by Thomas Lamprecht
parent 42103c467d
commit 8c74349b08
22 changed files with 431 additions and 319 deletions

View File

@ -14,7 +14,7 @@ use proxmox_router::{
};
use proxmox_schema::{api, ApiType, ReturnType};
use pbs_api_types::BackupType;
use pbs_api_types::{BackupNamespace, BackupType};
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupRepository, BackupWriter};
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
@ -242,9 +242,13 @@ async fn test_upload_speed(
client,
crypt_config.clone(),
repo.store(),
BackupType::Host,
"benchmark",
backup_time,
&(
BackupNamespace::root(),
BackupType::Host,
"benchmark".to_string(),
backup_time,
)
.into(),
false,
true,
)

View File

@ -14,9 +14,9 @@ use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param;
use crate::{
api_datastore_latest_snapshot, complete_backup_snapshot, complete_group_or_snapshot,
complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key,
extract_repository_from_value, format_key_source, record_repository, BackupDir, BackupGroup,
complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name,
complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group,
extract_repository_from_value, format_key_source, record_repository, BackupDir,
BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
};
@ -68,16 +68,8 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let client = connect(&repo)?;
let client = BackupReader::start(
client,
crypt_config.clone(),
repo.store(),
snapshot.group.ty,
&snapshot.group.id,
snapshot.time,
true,
)
.await?;
let client =
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -153,13 +145,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let path = required_string_param(&param, "snapshot")?;
let archive_name = required_string_param(&param, "archive-name")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group: BackupGroup = path.parse()?;
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group.ty, snapshot.group.id, snapshot.time)
};
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let crypto = crypto_parameters(&param)?;
@ -186,9 +172,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
client,
crypt_config.clone(),
repo.store(),
backup_type,
&backup_id,
backup_time,
&backup_dir,
true,
)
.await?;

View File

@ -7,6 +7,7 @@ use std::task::Context;
use anyhow::{bail, format_err, Error};
use futures::stream::{StreamExt, TryStreamExt};
use serde::Deserialize;
use serde_json::{json, Value};
use tokio::sync::mpsc;
use tokio_stream::wrappers::ReceiverStream;
@ -22,10 +23,10 @@ use proxmox_time::{epoch_i64, strftime_local};
use pxar::accessor::{MaybeReady, ReadAt, ReadAtOperation};
use pbs_api_types::{
Authid, BackupDir, BackupGroup, BackupType, CryptMode, Fingerprint, GroupListItem, HumanByte,
PruneListItem, PruneOptions, RateLimitConfig, SnapshotListItem, StorageStatus,
BACKUP_ID_SCHEMA, BACKUP_TIME_SCHEMA, BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA,
TRAFFIC_CONTROL_RATE_SCHEMA,
Authid, BackupDir, BackupGroup, BackupNamespace, BackupPart, BackupType, CryptMode,
Fingerprint, GroupListItem, HumanByte, PruneListItem, PruneOptions, RateLimitConfig,
SnapshotListItem, StorageStatus, BACKUP_ID_SCHEMA, BACKUP_NAMESPACE_SCHEMA, BACKUP_TIME_SCHEMA,
BACKUP_TYPE_SCHEMA, TRAFFIC_CONTROL_BURST_SCHEMA, TRAFFIC_CONTROL_RATE_SCHEMA,
};
use pbs_client::catalog_shell::Shell;
use pbs_client::tools::{
@ -148,7 +149,7 @@ pub async fn api_datastore_latest_snapshot(
client: &HttpClient,
store: &str,
group: BackupGroup,
) -> Result<(BackupType, String, i64), Error> {
) -> Result<BackupDir, Error> {
let list = api_datastore_list_snapshots(client, store, Some(group.clone())).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
@ -158,7 +159,20 @@ pub async fn api_datastore_latest_snapshot(
list.sort_unstable_by(|a, b| b.backup.time.cmp(&a.backup.time));
Ok((group.ty, group.id, list[0].backup.time))
Ok((group, list[0].backup.time).into())
}
pub async fn dir_or_last_from_group(
client: &HttpClient,
repo: &BackupRepository,
path: &str,
) -> Result<BackupDir, Error> {
match path.parse::<BackupPart>()? {
BackupPart::Dir(dir) => Ok(dir),
BackupPart::Group(group) => {
api_datastore_latest_snapshot(&client, repo.store(), group).await
}
}
}
async fn backup_directory<P: AsRef<Path>>(
@ -251,13 +265,12 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
record_repository(&repo);
let render_group_path = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
let group = BackupGroup::new(item.backup.ty, item.backup.id);
Ok(group.to_string())
let item = GroupListItem::deserialize(record)?;
Ok(item.backup.to_string())
};
let render_last_backup = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
let item = GroupListItem::deserialize(record)?;
let snapshot = BackupDir {
group: item.backup,
time: item.last_backup,
@ -266,7 +279,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
};
let render_files = |_v: &Value, record: &Value| -> Result<String, Error> {
let item: GroupListItem = serde_json::from_value(record.to_owned())?;
let item = GroupListItem::deserialize(record)?;
Ok(pbs_tools::format::render_backup_file_list(&item.files))
};
@ -560,6 +573,10 @@ fn spawn_catalog_upload(
optional: true,
default: false,
},
"backup-ns": {
schema: BACKUP_NAMESPACE_SCHEMA,
optional: true,
},
"backup-type": {
schema: BACKUP_TYPE_SCHEMA,
optional: true,
@ -653,6 +670,14 @@ async fn create_backup(
.as_str()
.unwrap_or(proxmox_sys::nodename());
let backup_namespace: BackupNamespace = match param.get("backup-ns") {
Some(ns) => ns
.as_str()
.ok_or_else(|| format_err!("bad namespace {:?}", ns))?
.parse()?,
None => BackupNamespace::root(),
};
let backup_type: BackupType = param["backup-type"].as_str().unwrap_or("host").parse()?;
let include_dev = param["include-dev"].as_array();
@ -775,12 +800,13 @@ async fn create_backup(
let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo);
println!(
"Starting backup: {}/{}/{}",
let snapshot = BackupDir::from((
backup_namespace,
backup_type,
backup_id,
pbs_datastore::BackupDir::backup_time_to_string(backup_time)?
);
backup_id.to_owned(),
backup_time,
));
println!("Starting backup: {snapshot}");
println!("Client name: {}", proxmox_sys::nodename());
@ -827,9 +853,7 @@ async fn create_backup(
client,
crypt_config.clone(),
repo.store(),
backup_type,
backup_id,
backup_time,
&snapshot,
verbose,
false,
)
@ -873,7 +897,6 @@ async fn create_backup(
None
};
let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
let mut manifest = BackupManifest::new(snapshot);
let mut catalog = None;
@ -1182,13 +1205,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
let path = json::required_string_param(&param, "snapshot")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group: BackupGroup = path.parse()?;
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group.ty, snapshot.group.id, snapshot.time)
};
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let target = json::required_string_param(&param, "target")?;
let target = if target == "-" { None } else { Some(target) };
@ -1211,9 +1228,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
client,
crypt_config.clone(),
repo.store(),
backup_type,
&backup_id,
backup_time,
&backup_dir,
true,
)
.await?;

View File

@ -18,7 +18,6 @@ use proxmox_schema::*;
use proxmox_sys::fd::Fd;
use proxmox_sys::sortable;
use pbs_api_types::{BackupDir, BackupGroup};
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_config::key_config::load_and_decrypt_key;
@ -29,8 +28,8 @@ use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param;
use crate::{
api_datastore_latest_snapshot, complete_group_or_snapshot, complete_img_archive_name,
complete_pxar_archive_name, complete_repository, connect, extract_repository_from_value,
complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name,
complete_repository, connect, dir_or_last_from_group, extract_repository_from_value,
record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
};
@ -199,13 +198,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
record_repository(&repo);
let path = required_string_param(&param, "snapshot")?;
let (backup_type, backup_id, backup_time) = if path.matches('/').count() == 1 {
let group: BackupGroup = path.parse()?;
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group.ty, snapshot.group.id, snapshot.time)
};
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
let crypt_config = match keyfile {
@ -236,9 +229,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
client,
crypt_config.clone(),
repo.store(),
backup_type,
&backup_id,
backup_time,
&backup_dir,
true,
)
.await?;