backup client: rustfmt

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2022-04-14 14:06:15 +02:00
parent 00ae34dfda
commit f9a5beaa15
7 changed files with 348 additions and 267 deletions

View File

@ -1,34 +1,27 @@
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::{Error};
use serde_json::Value;
use anyhow::Error;
use serde::Serialize;
use serde_json::Value;
use proxmox_schema::{api, ApiType, ReturnType};
use proxmox_router::{
cli::{
OUTPUT_FORMAT,
ColumnConfig,
get_output_format,
format_and_print_result_full,
default_table_format_options,
default_table_format_options, format_and_print_result_full, get_output_format,
ColumnConfig, OUTPUT_FORMAT,
},
ApiMethod,
RpcEnvironment,
ApiMethod, RpcEnvironment,
};
use proxmox_schema::{api, ApiType, ReturnType};
use pbs_tools::crypt_config::CryptConfig;
use pbs_config::key_config::{KeyDerivationConfig, load_and_decrypt_key};
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupRepository, BackupWriter};
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
use pbs_datastore::data_blob::{DataBlob, DataChunkBuilder};
use pbs_tools::crypt_config::CryptConfig;
use crate::{
KEYFILE_SCHEMA, REPO_URL_SCHEMA,
extract_repository_from_value,
record_repository,
connect,
connect, extract_repository_from_value, record_repository, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
};
#[api()]
@ -36,7 +29,7 @@ use crate::{
/// Speed test result
struct Speed {
/// The meassured speed in Bytes/second
#[serde(skip_serializing_if="Option::is_none")]
#[serde(skip_serializing_if = "Option::is_none")]
speed: Option<f64>,
/// Top result we want to compare with
top: f64,
@ -81,7 +74,7 @@ struct BenchmarkResult {
verify: Speed,
}
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
static BENCHMARK_RESULT_2020_TOP: BenchmarkResult = BenchmarkResult {
tls: Speed {
speed: None,
top: 1_000_000.0 * 1235.0, // TLS to localhost, AMD Ryzen 7 2700X
@ -137,7 +130,6 @@ pub async fn benchmark(
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> {
let repo = extract_repository_from_value(&param).ok();
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
@ -170,11 +162,7 @@ pub async fn benchmark(
}
// print comparison table
fn render_result(
output_format: &str,
benchmark_result: &BenchmarkResult,
) -> Result<(), Error> {
fn render_result(output_format: &str, benchmark_result: &BenchmarkResult) -> Result<(), Error> {
let mut data = serde_json::to_value(benchmark_result)?;
let return_type = ReturnType::new(false, &BenchmarkResult::API_SCHEMA);
@ -183,31 +171,52 @@ fn render_result(
None => Ok(String::from("not tested")),
Some(speed) => {
let top = value["top"].as_f64().unwrap();
Ok(format!("{:.2} MB/s ({:.0}%)", speed/1_000_000.0, (speed*100.0)/top))
Ok(format!(
"{:.2} MB/s ({:.0}%)",
speed / 1_000_000.0,
(speed * 100.0) / top
))
}
}
};
let options = default_table_format_options()
.column(ColumnConfig::new("tls")
.column(
ColumnConfig::new("tls")
.header("TLS (maximal backup upload speed)")
.right_align(false).renderer(render_speed))
.column(ColumnConfig::new("sha256")
.right_align(false)
.renderer(render_speed),
)
.column(
ColumnConfig::new("sha256")
.header("SHA256 checksum computation speed")
.right_align(false).renderer(render_speed))
.column(ColumnConfig::new("compress")
.right_align(false)
.renderer(render_speed),
)
.column(
ColumnConfig::new("compress")
.header("ZStd level 1 compression speed")
.right_align(false).renderer(render_speed))
.column(ColumnConfig::new("decompress")
.right_align(false)
.renderer(render_speed),
)
.column(
ColumnConfig::new("decompress")
.header("ZStd level 1 decompression speed")
.right_align(false).renderer(render_speed))
.column(ColumnConfig::new("verify")
.right_align(false)
.renderer(render_speed),
)
.column(
ColumnConfig::new("verify")
.header("Chunk verification speed")
.right_align(false).renderer(render_speed))
.column(ColumnConfig::new("aes256_gcm")
.right_align(false)
.renderer(render_speed),
)
.column(
ColumnConfig::new("aes256_gcm")
.header("AES256 GCM encryption speed")
.right_align(false).renderer(render_speed));
.right_align(false)
.renderer(render_speed),
);
format_and_print_result_full(&mut data, &return_type, output_format, &options);
@ -220,13 +229,14 @@ async fn test_upload_speed(
crypt_config: Option<Arc<CryptConfig>>,
verbose: bool,
) -> Result<(), Error> {
let backup_time = proxmox_time::epoch_i64();
let client = connect(&repo)?;
record_repository(&repo);
if verbose { eprintln!("Connecting to backup server"); }
if verbose {
eprintln!("Connecting to backup server");
}
let client = BackupWriter::start(
client,
crypt_config.clone(),
@ -235,13 +245,16 @@ async fn test_upload_speed(
"benchmark",
backup_time,
false,
true
).await?;
true,
)
.await?;
if verbose { eprintln!("Start TLS speed test"); }
if verbose {
eprintln!("Start TLS speed test");
}
let speed = client.upload_speedtest(verbose).await?;
eprintln!("TLS speed: {:.2} MB/s", speed/1_000_000.0);
eprintln!("TLS speed: {:.2} MB/s", speed / 1_000_000.0);
benchmark_result.tls.speed = Some(speed);
@ -249,11 +262,7 @@ async fn test_upload_speed(
}
// test hash/crypt/compress speed
fn test_crypt_speed(
benchmark_result: &mut BenchmarkResult,
_verbose: bool,
) -> Result<(), Error> {
fn test_crypt_speed(benchmark_result: &mut BenchmarkResult, _verbose: bool) -> Result<(), Error> {
let pw = b"test";
let kdf = KeyDerivationConfig::Scrypt {
@ -269,44 +278,46 @@ fn test_crypt_speed(
//let random_data = proxmox_sys::linux::random_data(1024*1024)?;
let mut random_data = vec![];
// generate pseudo random byte sequence
for i in 0..256*1024 {
for j in 0..4 {
let byte = ((i >> (j<<3))&0xff) as u8;
random_data.push(byte);
}
// generate pseudo random byte sequence
for i in 0..256 * 1024 {
for j in 0..4 {
let byte = ((i >> (j << 3)) & 0xff) as u8;
random_data.push(byte);
}
}
assert_eq!(random_data.len(), 1024*1024);
assert_eq!(random_data.len(), 1024 * 1024);
let start_time = std::time::Instant::now();
let mut bytes = 0;
loop {
loop {
openssl::sha::sha256(&random_data);
bytes += random_data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; }
if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
}
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.sha256.speed = Some(speed);
eprintln!("SHA256 speed: {:.2} MB/s", speed/1_000_000.0);
eprintln!("SHA256 speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now();
let mut bytes = 0;
loop {
loop {
let mut reader = &random_data[..];
zstd::stream::encode_all(&mut reader, 1)?;
bytes += random_data.len();
if start_time.elapsed().as_micros() > 3_000_000 { break; }
if start_time.elapsed().as_micros() > 3_000_000 {
break;
}
}
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.compress.speed = Some(speed);
eprintln!("Compression speed: {:.2} MB/s", speed/1_000_000.0);
eprintln!("Compression speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now();
@ -316,49 +327,51 @@ fn test_crypt_speed(
};
let mut bytes = 0;
loop {
loop {
let mut reader = &compressed_data[..];
let data = zstd::stream::decode_all(&mut reader)?;
bytes += data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; }
if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
}
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.decompress.speed = Some(speed);
eprintln!("Decompress speed: {:.2} MB/s", speed/1_000_000.0);
eprintln!("Decompress speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now();
let mut bytes = 0;
loop {
loop {
let mut out = Vec::new();
DataBlob::encrypt_benchmark(&crypt_config, &random_data, &mut out)?;
bytes += random_data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; }
if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
}
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.aes256_gcm.speed = Some(speed);
eprintln!("AES256/GCM speed: {:.2} MB/s", speed/1_000_000.0);
eprintln!("AES256/GCM speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now();
let (chunk, digest) = DataChunkBuilder::new(&random_data)
.compress(true)
.build()?;
let (chunk, digest) = DataChunkBuilder::new(&random_data).compress(true).build()?;
let mut bytes = 0;
loop {
loop {
chunk.verify_unencrypted(random_data.len(), &digest)?;
bytes += random_data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; }
if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
}
let speed = (bytes as f64)/start_time.elapsed().as_secs_f64();
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.verify.speed = Some(speed);
eprintln!("Verify speed: {:.2} MB/s", speed/1_000_000.0);
eprintln!("Verify speed: {:.2} MB/s", speed / 1_000_000.0);
Ok(())
}

View File

@ -1,4 +1,4 @@
use anyhow::{Error};
use anyhow::Error;
use proxmox_router::cli::*;
use proxmox_schema::format::*;
@ -6,12 +6,10 @@ use proxmox_schema::format::*;
use pbs_client::catalog_shell::catalog_shell_cli;
fn main() -> Result<(), Error> {
match catalog_shell_cli() {
CommandLineInterface::Nested(map) => {
let usage = generate_nested_usage("", &map, DocumentationFormat::ReST);
println!("{}", usage);
}
_ => unreachable!(),
}

View File

@ -1,41 +1,24 @@
use std::os::unix::fs::OpenOptionsExt;
use std::io::{Seek, SeekFrom};
use std::os::unix::fs::OpenOptionsExt;
use std::sync::Arc;
use anyhow::{bail, format_err, Error};
use serde_json::Value;
use proxmox_schema::api;
use proxmox_router::cli::*;
use proxmox_schema::api;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_tools::json::required_string_param;
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param;
use crate::{
REPO_URL_SCHEMA,
KEYFD_SCHEMA,
extract_repository_from_value,
format_key_source,
record_repository,
decrypt_key,
api_datastore_latest_snapshot,
complete_repository,
complete_backup_snapshot,
complete_group_or_snapshot,
complete_pxar_archive_name,
connect,
crypto_parameters,
BackupDir,
BackupGroup,
BufferedDynamicReader,
BufferedDynamicReadAt,
CatalogReader,
CATALOG_NAME,
DynamicIndexReader,
IndexFile,
Shell,
api_datastore_latest_snapshot, complete_backup_snapshot, complete_group_or_snapshot,
complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key,
extract_repository_from_value, format_key_source, record_repository, BackupDir, BackupGroup,
BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
};
#[api(
@ -63,7 +46,6 @@ use crate::{
)]
/// Dump catalog.
async fn dump_catalog(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let path = required_string_param(&param, "snapshot")?;
@ -94,18 +76,26 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
snapshot.group().backup_id(),
snapshot.backup_time(),
true,
).await?;
)
.await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?;
let index = client
.download_dynamic_index(&manifest, CATALOG_NAME)
.await?;
let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
@ -168,7 +158,11 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
(
snapshot.group().backup_type().to_owned(),
snapshot.group().backup_id().to_owned(),
snapshot.backup_time(),
)
};
let crypto = crypto_parameters(&param)?;
@ -200,7 +194,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
&backup_id,
backup_time,
true,
).await?;
)
.await?;
let mut tmpfile = std::fs::OpenOptions::new()
.write(true)
@ -211,15 +206,21 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
let index = client
.download_dynamic_index(&manifest, &server_archive_name)
.await?;
let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(&server_archive_name)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used);
let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config.clone(),
file_info.chunk_crypt_mode(),
most_used,
);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size();
let reader: pbs_client::pxar::fuse::Reader =
Arc::new(BufferedDynamicReadAt::new(reader));
let reader: pbs_client::pxar::fuse::Reader = Arc::new(BufferedDynamicReadAt::new(reader));
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
client.download(CATALOG_NAME, &mut tmpfile).await?;
@ -233,7 +234,12 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let mut reader = BufferedDynamicReader::new(index, chunk_reader);
let mut catalogfile = std::fs::OpenOptions::new()
.write(true)
@ -246,11 +252,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
catalogfile.seek(SeekFrom::Start(0))?;
let catalog_reader = CatalogReader::new(catalogfile);
let state = Shell::new(
catalog_reader,
&server_archive_name,
decoder,
).await?;
let state = Shell::new(catalog_reader, &server_archive_name, decoder).await?;
println!("Starting interactive shell");
state.shell().await?;

View File

@ -5,29 +5,28 @@ use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use proxmox_sys::linux::tty;
use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_router::cli::{
complete_file_name, format_and_print_result_full, get_output_format,
CliCommand, CliCommandMap, ColumnConfig,
OUTPUT_FORMAT,
complete_file_name, format_and_print_result_full, get_output_format, CliCommand, CliCommandMap,
ColumnConfig, OUTPUT_FORMAT,
};
use proxmox_schema::{api, ApiType, ReturnType};
use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_sys::linux::tty;
use pbs_api_types::{PASSWORD_HINT_SCHEMA, Kdf, KeyInfo};
use pbs_config::key_config::{KeyConfig, rsa_decrypt_key_config};
use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat};
use pbs_api_types::{Kdf, KeyInfo, PASSWORD_HINT_SCHEMA};
use pbs_client::tools::key_source::{
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
place_default_encryption_key, place_default_master_pubkey,
};
use pbs_config::key_config::{rsa_decrypt_key_config, KeyConfig};
use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat};
#[api]
#[derive(Deserialize, Serialize)]
/// RSA public key information
pub struct RsaPubKeyInfo {
/// Path to key (if stored in a file)
#[serde(skip_serializing_if="Option::is_none")]
#[serde(skip_serializing_if = "Option::is_none")]
pub path: Option<String>,
/// RSA exponent
pub exponent: String,
@ -37,7 +36,7 @@ pub struct RsaPubKeyInfo {
pub length: usize,
}
#[cfg(not(target_arch="wasm32"))]
#[cfg(not(target_arch = "wasm32"))]
impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubKeyInfo {
type Error = anyhow::Error;
@ -55,7 +54,6 @@ impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubK
}
}
#[api(
input: {
properties: {
@ -391,7 +389,12 @@ fn create_master_key() -> Result<(), Error> {
let filename_priv = "master-private.pem";
println!("Writing private master key to {}", filename_priv);
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new(), true)?;
replace_file(
filename_priv,
priv_key.as_slice(),
CreateOptions::new(),
true,
)?;
Ok(())
}

View File

@ -13,32 +13,25 @@ use nix::unistd::{fork, ForkResult};
use serde_json::Value;
use tokio::signal::unix::{signal, SignalKind};
use proxmox_sys::sortable;
use proxmox_sys::fd::Fd;
use proxmox_router::{ApiHandler, ApiMethod, RpcEnvironment, cli::*};
use proxmox_router::{cli::*, ApiHandler, ApiMethod, RpcEnvironment};
use proxmox_schema::*;
use proxmox_sys::fd::Fd;
use proxmox_sys::sortable;
use pbs_tools::crypt_config::CryptConfig;
use pbs_config::key_config::load_and_decrypt_key;
use pbs_datastore::{BackupDir, BackupGroup, };
use pbs_datastore::index::IndexFile;
use pbs_datastore::dynamic_index::BufferedDynamicReader;
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_config::key_config::load_and_decrypt_key;
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
use pbs_datastore::dynamic_index::BufferedDynamicReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::{BackupDir, BackupGroup};
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param;
use crate::{
REPO_URL_SCHEMA,
extract_repository_from_value,
complete_pxar_archive_name,
complete_img_archive_name,
complete_group_or_snapshot,
complete_repository,
record_repository,
connect,
api_datastore_latest_snapshot,
BufferedDynamicReadAt,
api_datastore_latest_snapshot, complete_group_or_snapshot, complete_img_archive_name,
complete_pxar_archive_name, complete_repository, connect, extract_repository_from_value,
record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
};
#[sortable]
@ -47,14 +40,36 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
&ObjectSchema::new(
"Mount pxar archive.",
&sorted!([
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
("target", false, &StringSchema::new("Target directory path.").schema()),
(
"snapshot",
false,
&StringSchema::new("Group/Snapshot path.").schema()
),
(
"archive-name",
false,
&StringSchema::new("Backup archive name.").schema()
),
(
"target",
false,
&StringSchema::new("Target directory path.").schema()
),
("repository", true, &REPO_URL_SCHEMA),
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
(
"keyfile",
true,
&StringSchema::new("Path to encryption key.").schema()
),
(
"verbose",
true,
&BooleanSchema::new("Verbose output and stay in foreground.")
.default(false)
.schema()
),
]),
)
),
);
#[sortable]
@ -64,13 +79,31 @@ const API_METHOD_MAP: ApiMethod = ApiMethod::new(
"Map a drive image from a VM backup to a local loopback device. Use 'unmap' to undo.
WARNING: Only do this with *trusted* backups!",
&sorted!([
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()),
("archive-name", false, &StringSchema::new("Backup archive name.").schema()),
(
"snapshot",
false,
&StringSchema::new("Group/Snapshot path.").schema()
),
(
"archive-name",
false,
&StringSchema::new("Backup archive name.").schema()
),
("repository", true, &REPO_URL_SCHEMA),
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()),
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()),
(
"keyfile",
true,
&StringSchema::new("Path to encryption key.").schema()
),
(
"verbose",
true,
&BooleanSchema::new("Verbose output and stay in foreground.")
.default(false)
.schema()
),
]),
)
),
);
#[sortable]
@ -78,17 +111,19 @@ const API_METHOD_UNMAP: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&unmap),
&ObjectSchema::new(
"Unmap a loop device mapped with 'map' and release all resources.",
&sorted!([
("name", true, &StringSchema::new(
concat!("Archive name, path to loopdev (/dev/loopX) or loop device number. ",
"Omit to list all current mappings and force cleaning up leftover instances.")
).schema()),
]),
)
&sorted!([(
"name",
true,
&StringSchema::new(concat!(
"Archive name, path to loopdev (/dev/loopX) or loop device number. ",
"Omit to list all current mappings and force cleaning up leftover instances."
))
.schema()
),]),
),
);
pub fn mount_cmd_def() -> CliCommand {
CliCommand::new(&API_METHOD_MOUNT)
.arg_param(&["snapshot", "archive-name", "target"])
.completion_cb("repository", complete_repository)
@ -98,7 +133,6 @@ pub fn mount_cmd_def() -> CliCommand {
}
pub fn map_cmd_def() -> CliCommand {
CliCommand::new(&API_METHOD_MAP)
.arg_param(&["snapshot", "archive-name"])
.completion_cb("repository", complete_repository)
@ -107,21 +141,20 @@ pub fn map_cmd_def() -> CliCommand {
}
pub fn unmap_cmd_def() -> CliCommand {
CliCommand::new(&API_METHOD_UNMAP)
.arg_param(&["name"])
.completion_cb("name", complete_mapping_names)
}
fn complete_mapping_names<S: BuildHasher>(_arg: &str, _param: &HashMap<String, String, S>)
-> Vec<String>
{
fn complete_mapping_names<S: BuildHasher>(
_arg: &str,
_param: &HashMap<String, String, S>,
) -> Vec<String> {
match pbs_fuse_loop::find_all_mappings() {
Ok(mappings) => mappings
.filter_map(|(name, _)| {
proxmox_sys::systemd::unescape_unit(&name).ok()
}).collect(),
Err(_) => Vec::new()
.filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(&name).ok())
.collect(),
Err(_) => Vec::new(),
}
}
@ -130,7 +163,6 @@ fn mount(
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let verbose = param["verbose"].as_bool().unwrap_or(false);
if verbose {
// This will stay in foreground with debug output enabled as None is
@ -172,7 +204,11 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else {
let snapshot: BackupDir = path.parse()?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time())
(
snapshot.group().backup_type().to_owned(),
snapshot.group().backup_id().to_owned(),
snapshot.backup_time(),
)
};
let keyfile = param["keyfile"].as_str().map(PathBuf::from);
@ -208,7 +244,8 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
&backup_id,
backup_time,
true,
).await?;
)
.await?;
let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -223,7 +260,8 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
"/dev/null",
nix::fcntl::OFlag::O_RDWR,
nix::sys::stat::Mode::empty(),
).unwrap();
)
.unwrap();
nix::unistd::dup2(nullfd, 0).unwrap();
nix::unistd::dup2(nullfd, 1).unwrap();
nix::unistd::dup2(nullfd, 2).unwrap();
@ -245,16 +283,23 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
let mut interrupt_int = signal(SignalKind::interrupt())?;
let mut interrupt_term = signal(SignalKind::terminate())?;
let mut interrupt = futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed());
let mut interrupt =
futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed());
if server_archive_name.ends_with(".didx") {
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?;
let index = client
.download_dynamic_index(&manifest, &server_archive_name)
.await?;
let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used);
let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size();
let reader: pbs_client::pxar::fuse::Reader =
Arc::new(BufferedDynamicReadAt::new(reader));
let reader: pbs_client::pxar::fuse::Reader = Arc::new(BufferedDynamicReadAt::new(reader));
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
let session = pbs_client::pxar::fuse::Session::mount(
@ -274,15 +319,23 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
}
}
} else if server_archive_name.ends_with(".fidx") {
let index = client.download_fixed_index(&manifest, &server_archive_name).await?;
let index = client
.download_fixed_index(&manifest, &server_archive_name)
.await?;
let size = index.index_bytes();
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), HashMap::new());
let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
HashMap::new(),
);
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
let name = &format!("{}:{}/{}", repo, path, archive_name);
let name_escaped = proxmox_sys::systemd::escape_unit(name, false);
let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
let mut session =
pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
let loopdev = session.loopdev_path.clone();
let (st_send, st_recv) = futures::channel::mpsc::channel(1);
@ -335,7 +388,6 @@ fn unmap(
_info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> {
let mut name = match param["name"].as_str() {
Some(name) => name.to_owned(),
None => {
@ -343,14 +395,18 @@ fn unmap(
let mut any = false;
for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? {
let name = proxmox_sys::systemd::unescape_unit(&backing)?;
println!("{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), name);
println!(
"{}:\t{}",
loopdev.unwrap_or_else(|| "(unmapped)".to_string()),
name
);
any = true;
}
if !any {
println!("Nothing mapped.");
}
return Ok(Value::Null);
},
}
};
// allow loop device number alone

View File

@ -3,30 +3,21 @@ use std::sync::Arc;
use anyhow::Error;
use serde_json::{json, Value};
use proxmox_sys::fs::file_get_contents;
use proxmox_router::cli::*;
use proxmox_schema::api;
use proxmox_sys::fs::file_get_contents;
use pbs_tools::crypt_config::CryptConfig;
use pbs_config::key_config::decrypt_key;
use pbs_api_types::{SnapshotListItem, CryptMode};
use pbs_api_types::{CryptMode, SnapshotListItem};
use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_datastore::{DataBlob, BackupGroup};
use pbs_config::key_config::decrypt_key;
use pbs_datastore::{BackupGroup, DataBlob};
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param;
use crate::{
REPO_URL_SCHEMA,
KEYFILE_SCHEMA,
KEYFD_SCHEMA,
BackupDir,
api_datastore_list_snapshots,
complete_backup_snapshot,
complete_backup_group,
complete_repository,
connect,
crypto_parameters,
extract_repository_from_value,
record_repository,
api_datastore_list_snapshots, complete_backup_group, complete_backup_snapshot,
complete_repository, connect, crypto_parameters, extract_repository_from_value,
record_repository, BackupDir, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
};
#[api(
@ -50,7 +41,6 @@ use crate::{
)]
/// List backup snapshots.
async fn list_snapshots(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let output_format = get_output_format(&param);
@ -86,10 +76,13 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
.sortby("backup-type", false)
.sortby("backup-id", false)
.sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot"))
.column(
ColumnConfig::new("backup-id")
.renderer(render_snapshot_path)
.header("snapshot"),
)
.column(ColumnConfig::new("size").renderer(pbs_tools::format::render_bytes_human_readable))
.column(ColumnConfig::new("files").renderer(render_files))
;
.column(ColumnConfig::new("files").renderer(render_files));
let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE;
@ -118,7 +111,6 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
)]
/// List snapshot files.
async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let path = required_string_param(&param, "snapshot")?;
@ -130,11 +122,16 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/files", repo.store());
let mut result = client.get(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
}))).await?;
let mut result = client
.get(
&path,
Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
})),
)
.await?;
record_repository(&repo);
@ -165,7 +162,6 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
)]
/// Forget (remove) backup snapshots.
async fn forget_snapshots(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let path = required_string_param(&param, "snapshot")?;
@ -175,11 +171,16 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let result = client.delete(&path, Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
}))).await?;
let result = client
.delete(
&path,
Some(json!({
"backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(),
})),
)
.await?;
record_repository(&repo);
@ -218,7 +219,6 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
)]
/// Upload backup log file.
async fn upload_log(param: Value) -> Result<Value, Error> {
let logfile = required_string_param(&param, "logfile")?;
let repo = extract_repository_from_value(&param)?;
@ -243,12 +243,17 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
// fixme: howto sign log?
let blob = match crypto.mode {
CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?,
CryptMode::Encrypt => {
DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?
}
};
let raw_data = blob.into_inner();
let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store());
let path = format!(
"api2/json/admin/datastore/{}/upload-backup-log",
repo.store()
);
let args = json!({
"backup-type": snapshot.group().backup_type(),
@ -258,7 +263,9 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
let body = hyper::Body::from(raw_data);
client.upload("application/octet-stream", body, &path, Some(args)).await
client
.upload("application/octet-stream", body, &path, Some(args))
.await
}
#[api(
@ -495,21 +502,21 @@ pub fn snapshot_mgtm_cli() -> CliCommandMap {
CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
.arg_param(&["group"])
.completion_cb("group", complete_backup_group)
.completion_cb("repository", complete_repository)
.completion_cb("repository", complete_repository),
)
.insert(
"files",
CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
.arg_param(&["snapshot"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_backup_snapshot)
.completion_cb("snapshot", complete_backup_snapshot),
)
.insert(
"forget",
CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
.arg_param(&["snapshot"])
.completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_backup_snapshot)
.completion_cb("snapshot", complete_backup_snapshot),
)
.insert(
"upload-log",
@ -518,6 +525,6 @@ pub fn snapshot_mgtm_cli() -> CliCommandMap {
.completion_cb("snapshot", complete_backup_snapshot)
.completion_cb("logfile", complete_file_name)
.completion_cb("keyfile", complete_file_name)
.completion_cb("repository", complete_repository)
.completion_cb("repository", complete_repository),
)
}

View File

@ -1,21 +1,16 @@
use anyhow::{Error};
use anyhow::Error;
use serde_json::{json, Value};
use proxmox_schema::api;
use proxmox_router::cli::*;
use proxmox_schema::api;
use pbs_client::display_task_log;
use pbs_api_types::percent_encoding::percent_encode_component;
use pbs_client::display_task_log;
use pbs_tools::json::required_string_param;
use pbs_api_types::UPID;
use crate::{
REPO_URL_SCHEMA,
extract_repository_from_value,
complete_repository,
connect,
};
use crate::{complete_repository, connect, extract_repository_from_value, REPO_URL_SCHEMA};
#[api(
input: {
@ -46,7 +41,6 @@ use crate::{
)]
/// List running server tasks for this repo user
async fn task_list(param: Value) -> Result<Value, Error> {
let output_format = get_output_format(&param);
let repo = extract_repository_from_value(&param)?;
@ -63,15 +57,25 @@ async fn task_list(param: Value) -> Result<Value, Error> {
"store": repo.store(),
});
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?;
let mut result = client
.get("api2/json/nodes/localhost/tasks", Some(args))
.await?;
let mut data = result["data"].take();
let return_type = &pbs_api_types::NODE_TASKS_LIST_TASKS_RETURN_TYPE;
use pbs_tools::format::{render_epoch, render_task_status};
let options = default_table_format_options()
.column(ColumnConfig::new("starttime").right_align(false).renderer(render_epoch))
.column(ColumnConfig::new("endtime").right_align(false).renderer(render_epoch))
.column(
ColumnConfig::new("starttime")
.right_align(false)
.renderer(render_epoch),
)
.column(
ColumnConfig::new("endtime")
.right_align(false)
.renderer(render_epoch),
)
.column(ColumnConfig::new("upid"))
.column(ColumnConfig::new("status").renderer(render_task_status));
@ -95,9 +99,8 @@ async fn task_list(param: Value) -> Result<Value, Error> {
)]
/// Display the task log.
async fn task_log(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let upid = required_string_param(&param, "upid")?;
let upid = required_string_param(&param, "upid")?;
let client = connect(&repo)?;
@ -121,28 +124,27 @@ async fn task_log(param: Value) -> Result<Value, Error> {
)]
/// Try to stop a specific task.
async fn task_stop(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?;
let upid_str = required_string_param(&param, "upid")?;
let upid_str = required_string_param(&param, "upid")?;
let client = connect(&repo)?;
let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str));
let path = format!(
"api2/json/nodes/localhost/tasks/{}",
percent_encode_component(upid_str)
);
let _ = client.delete(&path, None).await?;
Ok(Value::Null)
}
pub fn task_mgmt_cli() -> CliCommandMap {
let task_list_cmd_def =
CliCommand::new(&API_METHOD_TASK_LIST).completion_cb("repository", complete_repository);
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST)
.completion_cb("repository", complete_repository);
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG).arg_param(&["upid"]);
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG)
.arg_param(&["upid"]);
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
.arg_param(&["upid"]);
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP).arg_param(&["upid"]);
CliCommandMap::new()
.insert("log", task_log_cmd_def)