split the namespace out of BackupGroup/Dir api types

We decided to go this route because it'll most likely be
safer in the API as we need to explicitly add namespaces
support to the various API endpoints this way.

For example, 'pull' should have 2 namespaces: local and
remote, and the GroupFilter (which would otherwise contain
exactly *one* namespace parameter) needs to be applied for
both sides (to decide what to pull from the remote, and what
to *remove* locally as cleanup).

The *datastore* types still contain the namespace and have a
`.backup_ns()` getter.

Note that the datastore's `Display` implementations are no
longer safe to use as a deserializable string.

Additionally, some datastore based methods now have been
exposed via the BackupGroup/BackupDir types to avoid a
"round trip" in code.

Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Wolfgang Bumiller
2022-05-09 15:39:29 +02:00
committed by Thomas Lamprecht
parent 1baf9030ad
commit 133d718fe4
25 changed files with 800 additions and 509 deletions

View File

@ -242,13 +242,8 @@ async fn test_upload_speed(
client,
crypt_config.clone(),
repo.store(),
&(
BackupNamespace::root(),
BackupType::Host,
"benchmark".to_string(),
backup_time,
)
.into(),
&BackupNamespace::root(),
&(BackupType::Host, "benchmark".to_string(), backup_time).into(),
false,
true,
)

View File

@ -8,6 +8,7 @@ use serde_json::Value;
use proxmox_router::cli::*;
use proxmox_schema::api;
use pbs_api_types::BackupNamespace;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_tools::crypt_config::CryptConfig;
@ -16,9 +17,9 @@ use pbs_tools::json::required_string_param;
use crate::{
complete_backup_snapshot, complete_group_or_snapshot, complete_pxar_archive_name,
complete_repository, connect, crypto_parameters, decrypt_key, dir_or_last_from_group,
extract_repository_from_value, format_key_source, record_repository, BackupDir,
BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
extract_repository_from_value, format_key_source, optional_ns_param, record_repository,
BackupDir, BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader,
IndexFile, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
};
#[api(
@ -28,6 +29,10 @@ use crate::{
schema: REPO_URL_SCHEMA,
optional: true,
},
ns: {
type: BackupNamespace,
optional: true,
},
snapshot: {
type: String,
description: "Snapshot path.",
@ -48,6 +53,7 @@ use crate::{
async fn dump_catalog(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?;
let snapshot: BackupDir = path.parse()?;
@ -68,8 +74,15 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
let client = connect(&repo)?;
let client =
BackupReader::start(client, crypt_config.clone(), repo.store(), &snapshot, true).await?;
let client = BackupReader::start(
client,
crypt_config.clone(),
repo.store(),
&backup_ns,
&snapshot,
true,
)
.await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -114,6 +127,10 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
#[api(
input: {
properties: {
ns: {
type: BackupNamespace,
optional: true,
},
"snapshot": {
type: String,
description: "Group/Snapshot path.",
@ -142,10 +159,11 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
async fn catalog_shell(param: Value) -> Result<(), Error> {
let repo = extract_repository_from_value(&param)?;
let client = connect(&repo)?;
let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?;
let archive_name = required_string_param(&param, "archive-name")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
let crypto = crypto_parameters(&param)?;
@ -172,6 +190,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
client,
crypt_config.clone(),
repo.store(),
&backup_ns,
&backup_dir,
true,
)

View File

@ -127,24 +127,21 @@ fn record_repository(repo: &BackupRepository) {
);
}
enum List {
Any,
Group(BackupGroup),
Namespace(BackupNamespace),
}
async fn api_datastore_list_snapshots(
client: &HttpClient,
store: &str,
list: List,
ns: &BackupNamespace,
group: Option<&BackupGroup>,
) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/snapshots", store);
let args = match list {
List::Group(group) => serde_json::to_value(group)?,
List::Namespace(ns) => json!({ "backup-ns": ns }),
List::Any => json!({}),
let mut args = match group {
Some(group) => serde_json::to_value(group)?,
None => json!({}),
};
if !ns.is_root() {
args["backup-ns"] = serde_json::to_value(ns)?;
}
let mut result = client.get(&path, Some(args)).await?;
@ -154,9 +151,10 @@ async fn api_datastore_list_snapshots(
pub async fn api_datastore_latest_snapshot(
client: &HttpClient,
store: &str,
ns: &BackupNamespace,
group: BackupGroup,
) -> Result<BackupDir, Error> {
let list = api_datastore_list_snapshots(client, store, List::Group(group.clone())).await?;
let list = api_datastore_list_snapshots(client, store, ns, Some(&group)).await?;
let mut list: Vec<SnapshotListItem> = serde_json::from_value(list)?;
if list.is_empty() {
@ -171,12 +169,13 @@ pub async fn api_datastore_latest_snapshot(
pub async fn dir_or_last_from_group(
client: &HttpClient,
repo: &BackupRepository,
ns: &BackupNamespace,
path: &str,
) -> Result<BackupDir, Error> {
match path.parse::<BackupPart>()? {
BackupPart::Dir(dir) => Ok(dir),
BackupPart::Group(group) => {
api_datastore_latest_snapshot(&client, repo.store(), group).await
api_datastore_latest_snapshot(&client, repo.store(), ns, group).await
}
}
}
@ -242,6 +241,14 @@ async fn backup_image<P: AsRef<Path>>(
Ok(stats)
}
pub fn optional_ns_param(param: &Value) -> Result<BackupNamespace, Error> {
Ok(match param.get("ns") {
Some(Value::String(ns)) => ns.parse()?,
Some(_) => bail!("invalid namespace parameter"),
None => BackupNamespace::root(),
})
}
#[api(
input: {
properties: {
@ -270,10 +277,7 @@ async fn list_backup_groups(param: Value) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/groups", repo.store());
let backup_ns: BackupNamespace = match &param["ns"] {
Value::String(s) => s.parse()?,
_ => BackupNamespace::root(),
};
let backup_ns = optional_ns_param(&param)?;
let mut result = client
.get(&path, Some(json!({ "backup-ns": backup_ns })))
.await?;
@ -692,7 +696,7 @@ async fn create_backup(
.as_str()
.unwrap_or(proxmox_sys::nodename());
let backup_namespace: BackupNamespace = match param.get("backup-ns") {
let backup_ns: BackupNamespace = match param.get("backup-ns") {
Some(ns) => ns
.as_str()
.ok_or_else(|| format_err!("bad namespace {:?}", ns))?
@ -822,13 +826,12 @@ async fn create_backup(
let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo);
let snapshot = BackupDir::from((
backup_namespace,
backup_type,
backup_id.to_owned(),
backup_time,
));
println!("Starting backup: {snapshot}");
let snapshot = BackupDir::from((backup_type, backup_id.to_owned(), backup_time));
if backup_ns.is_root() {
println!("Starting backup: {snapshot}");
} else {
println!("Starting backup: [{backup_ns}]:{snapshot}");
}
println!("Client name: {}", proxmox_sys::nodename());
@ -875,6 +878,7 @@ async fn create_backup(
client,
crypt_config.clone(),
repo.store(),
&backup_ns,
&snapshot,
verbose,
false,
@ -1151,55 +1155,59 @@ fn parse_archive_type(name: &str) -> (String, ArchiveType) {
}
#[api(
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
snapshot: {
type: String,
description: "Group/Snapshot path.",
},
"archive-name": {
description: "Backup archive name.",
type: String,
},
target: {
type: String,
description: r###"Target directory path. Use '-' to write to standard output.
input: {
properties: {
repository: {
schema: REPO_URL_SCHEMA,
optional: true,
},
ns: {
type: BackupNamespace,
optional: true,
},
snapshot: {
type: String,
description: "Group/Snapshot path.",
},
"archive-name": {
description: "Backup archive name.",
type: String,
},
target: {
type: String,
description: r###"Target directory path. Use '-' to write to standard output.
We do not extract '.pxar' archives when writing to standard output.
"###
},
rate: {
schema: TRAFFIC_CONTROL_RATE_SCHEMA,
optional: true,
},
burst: {
schema: TRAFFIC_CONTROL_BURST_SCHEMA,
optional: true,
},
"allow-existing-dirs": {
type: Boolean,
description: "Do not fail if directories already exists.",
optional: true,
},
keyfile: {
schema: KEYFILE_SCHEMA,
optional: true,
},
"keyfd": {
schema: KEYFD_SCHEMA,
optional: true,
},
"crypt-mode": {
type: CryptMode,
optional: true,
},
}
}
},
rate: {
schema: TRAFFIC_CONTROL_RATE_SCHEMA,
optional: true,
},
burst: {
schema: TRAFFIC_CONTROL_BURST_SCHEMA,
optional: true,
},
"allow-existing-dirs": {
type: Boolean,
description: "Do not fail if directories already exists.",
optional: true,
},
keyfile: {
schema: KEYFILE_SCHEMA,
optional: true,
},
"keyfd": {
schema: KEYFD_SCHEMA,
optional: true,
},
"crypt-mode": {
type: CryptMode,
optional: true,
},
}
}
)]
/// Restore backup repository.
async fn restore(param: Value) -> Result<Value, Error> {
@ -1225,9 +1233,14 @@ async fn restore(param: Value) -> Result<Value, Error> {
let client = connect_rate_limited(&repo, rate_limit)?;
record_repository(&repo);
let ns = match param.get("ns") {
Some(Value::String(ns)) => ns.parse()?,
Some(_) => bail!("invalid namespace parameter"),
None => BackupNamespace::root(),
};
let path = json::required_string_param(&param, "snapshot")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let backup_dir = dir_or_last_from_group(&client, &repo, &ns, &path).await?;
let target = json::required_string_param(&param, "target")?;
let target = if target == "-" { None } else { Some(target) };
@ -1250,6 +1263,7 @@ async fn restore(param: Value) -> Result<Value, Error> {
client,
crypt_config.clone(),
repo.store(),
&ns,
&backup_dir,
true,
)

View File

@ -18,6 +18,7 @@ use proxmox_schema::*;
use proxmox_sys::fd::Fd;
use proxmox_sys::sortable;
use pbs_api_types::BackupNamespace;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_config::key_config::load_and_decrypt_key;
@ -30,7 +31,7 @@ use pbs_tools::json::required_string_param;
use crate::{
complete_group_or_snapshot, complete_img_archive_name, complete_pxar_archive_name,
complete_repository, connect, dir_or_last_from_group, extract_repository_from_value,
record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
optional_ns_param, record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
};
#[sortable]
@ -39,6 +40,7 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
&ObjectSchema::new(
"Mount pxar archive.",
&sorted!([
("ns", true, &BackupNamespace::API_SCHEMA,),
(
"snapshot",
false,
@ -197,8 +199,9 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
record_repository(&repo);
let backup_ns = optional_ns_param(&param)?;
let path = required_string_param(&param, "snapshot")?;
let backup_dir = dir_or_last_from_group(&client, &repo, &path).await?;
let backup_dir = dir_or_last_from_group(&client, &repo, &backup_ns, &path).await?;
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
let crypt_config = match keyfile {
@ -229,6 +232,7 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
client,
crypt_config.clone(),
repo.store(),
&backup_ns,
&backup_dir,
true,
)

View File

@ -1,6 +1,6 @@
use std::sync::Arc;
use anyhow::{bail, Error};
use anyhow::Error;
use serde_json::{json, Value};
use proxmox_router::cli::*;
@ -17,7 +17,7 @@ use pbs_tools::json::required_string_param;
use crate::{
api_datastore_list_snapshots, complete_backup_group, complete_backup_snapshot,
complete_repository, connect, crypto_parameters, extract_repository_from_value,
record_repository, BackupDir, List, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
optional_ns_param, record_repository, BackupDir, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
};
#[api(
@ -56,17 +56,10 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
.map(|group| group.parse())
.transpose()?;
let backup_ns: Option<BackupNamespace> =
param["ns"].as_str().map(|ns| ns.parse()).transpose()?;
let backup_ns = optional_ns_param(&param)?;
let list = match (group, backup_ns) {
(Some(group), None) => List::Group(group),
(None, Some(ns)) => List::Namespace(ns),
(None, None) => List::Any,
(Some(_), Some(_)) => bail!("'ns' and 'group' parameters are mutually exclusive"),
};
let mut data = api_datastore_list_snapshots(&client, repo.store(), list).await?;
let mut data =
api_datastore_list_snapshots(&client, repo.store(), &backup_ns, group.as_ref()).await?;
record_repository(&repo);