backup client: rustfmt

Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
This commit is contained in:
Thomas Lamprecht 2022-04-14 14:06:15 +02:00
parent 00ae34dfda
commit f9a5beaa15
7 changed files with 348 additions and 267 deletions

View File

@ -1,34 +1,27 @@
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use anyhow::{Error}; use anyhow::Error;
use serde_json::Value;
use serde::Serialize; use serde::Serialize;
use serde_json::Value;
use proxmox_schema::{api, ApiType, ReturnType};
use proxmox_router::{ use proxmox_router::{
cli::{ cli::{
OUTPUT_FORMAT, default_table_format_options, format_and_print_result_full, get_output_format,
ColumnConfig, ColumnConfig, OUTPUT_FORMAT,
get_output_format,
format_and_print_result_full,
default_table_format_options,
}, },
ApiMethod, ApiMethod, RpcEnvironment,
RpcEnvironment,
}; };
use proxmox_schema::{api, ApiType, ReturnType};
use pbs_tools::crypt_config::CryptConfig;
use pbs_config::key_config::{KeyDerivationConfig, load_and_decrypt_key};
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupRepository, BackupWriter}; use pbs_client::{BackupRepository, BackupWriter};
use pbs_config::key_config::{load_and_decrypt_key, KeyDerivationConfig};
use pbs_datastore::data_blob::{DataBlob, DataChunkBuilder}; use pbs_datastore::data_blob::{DataBlob, DataChunkBuilder};
use pbs_tools::crypt_config::CryptConfig;
use crate::{ use crate::{
KEYFILE_SCHEMA, REPO_URL_SCHEMA, connect, extract_repository_from_value, record_repository, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
extract_repository_from_value,
record_repository,
connect,
}; };
#[api()] #[api()]
@ -137,7 +130,6 @@ pub async fn benchmark(
_info: &ApiMethod, _info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<(), Error> { ) -> Result<(), Error> {
let repo = extract_repository_from_value(&param).ok(); let repo = extract_repository_from_value(&param).ok();
let keyfile = param["keyfile"].as_str().map(PathBuf::from); let keyfile = param["keyfile"].as_str().map(PathBuf::from);
@ -170,11 +162,7 @@ pub async fn benchmark(
} }
// print comparison table // print comparison table
fn render_result( fn render_result(output_format: &str, benchmark_result: &BenchmarkResult) -> Result<(), Error> {
output_format: &str,
benchmark_result: &BenchmarkResult,
) -> Result<(), Error> {
let mut data = serde_json::to_value(benchmark_result)?; let mut data = serde_json::to_value(benchmark_result)?;
let return_type = ReturnType::new(false, &BenchmarkResult::API_SCHEMA); let return_type = ReturnType::new(false, &BenchmarkResult::API_SCHEMA);
@ -183,31 +171,52 @@ fn render_result(
None => Ok(String::from("not tested")), None => Ok(String::from("not tested")),
Some(speed) => { Some(speed) => {
let top = value["top"].as_f64().unwrap(); let top = value["top"].as_f64().unwrap();
Ok(format!("{:.2} MB/s ({:.0}%)", speed/1_000_000.0, (speed*100.0)/top)) Ok(format!(
"{:.2} MB/s ({:.0}%)",
speed / 1_000_000.0,
(speed * 100.0) / top
))
} }
} }
}; };
let options = default_table_format_options() let options = default_table_format_options()
.column(ColumnConfig::new("tls") .column(
ColumnConfig::new("tls")
.header("TLS (maximal backup upload speed)") .header("TLS (maximal backup upload speed)")
.right_align(false).renderer(render_speed)) .right_align(false)
.column(ColumnConfig::new("sha256") .renderer(render_speed),
)
.column(
ColumnConfig::new("sha256")
.header("SHA256 checksum computation speed") .header("SHA256 checksum computation speed")
.right_align(false).renderer(render_speed)) .right_align(false)
.column(ColumnConfig::new("compress") .renderer(render_speed),
)
.column(
ColumnConfig::new("compress")
.header("ZStd level 1 compression speed") .header("ZStd level 1 compression speed")
.right_align(false).renderer(render_speed)) .right_align(false)
.column(ColumnConfig::new("decompress") .renderer(render_speed),
)
.column(
ColumnConfig::new("decompress")
.header("ZStd level 1 decompression speed") .header("ZStd level 1 decompression speed")
.right_align(false).renderer(render_speed)) .right_align(false)
.column(ColumnConfig::new("verify") .renderer(render_speed),
)
.column(
ColumnConfig::new("verify")
.header("Chunk verification speed") .header("Chunk verification speed")
.right_align(false).renderer(render_speed)) .right_align(false)
.column(ColumnConfig::new("aes256_gcm") .renderer(render_speed),
)
.column(
ColumnConfig::new("aes256_gcm")
.header("AES256 GCM encryption speed") .header("AES256 GCM encryption speed")
.right_align(false).renderer(render_speed)); .right_align(false)
.renderer(render_speed),
);
format_and_print_result_full(&mut data, &return_type, output_format, &options); format_and_print_result_full(&mut data, &return_type, output_format, &options);
@ -220,13 +229,14 @@ async fn test_upload_speed(
crypt_config: Option<Arc<CryptConfig>>, crypt_config: Option<Arc<CryptConfig>>,
verbose: bool, verbose: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let backup_time = proxmox_time::epoch_i64(); let backup_time = proxmox_time::epoch_i64();
let client = connect(&repo)?; let client = connect(&repo)?;
record_repository(&repo); record_repository(&repo);
if verbose { eprintln!("Connecting to backup server"); } if verbose {
eprintln!("Connecting to backup server");
}
let client = BackupWriter::start( let client = BackupWriter::start(
client, client,
crypt_config.clone(), crypt_config.clone(),
@ -235,10 +245,13 @@ async fn test_upload_speed(
"benchmark", "benchmark",
backup_time, backup_time,
false, false,
true true,
).await?; )
.await?;
if verbose { eprintln!("Start TLS speed test"); } if verbose {
eprintln!("Start TLS speed test");
}
let speed = client.upload_speedtest(verbose).await?; let speed = client.upload_speedtest(verbose).await?;
eprintln!("TLS speed: {:.2} MB/s", speed / 1_000_000.0); eprintln!("TLS speed: {:.2} MB/s", speed / 1_000_000.0);
@ -249,11 +262,7 @@ async fn test_upload_speed(
} }
// test hash/crypt/compress speed // test hash/crypt/compress speed
fn test_crypt_speed( fn test_crypt_speed(benchmark_result: &mut BenchmarkResult, _verbose: bool) -> Result<(), Error> {
benchmark_result: &mut BenchmarkResult,
_verbose: bool,
) -> Result<(), Error> {
let pw = b"test"; let pw = b"test";
let kdf = KeyDerivationConfig::Scrypt { let kdf = KeyDerivationConfig::Scrypt {
@ -285,14 +294,15 @@ fn test_crypt_speed(
loop { loop {
openssl::sha::sha256(&random_data); openssl::sha::sha256(&random_data);
bytes += random_data.len(); bytes += random_data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; } if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
} }
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64(); let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.sha256.speed = Some(speed); benchmark_result.sha256.speed = Some(speed);
eprintln!("SHA256 speed: {:.2} MB/s", speed / 1_000_000.0); eprintln!("SHA256 speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now(); let start_time = std::time::Instant::now();
let mut bytes = 0; let mut bytes = 0;
@ -300,14 +310,15 @@ fn test_crypt_speed(
let mut reader = &random_data[..]; let mut reader = &random_data[..];
zstd::stream::encode_all(&mut reader, 1)?; zstd::stream::encode_all(&mut reader, 1)?;
bytes += random_data.len(); bytes += random_data.len();
if start_time.elapsed().as_micros() > 3_000_000 { break; } if start_time.elapsed().as_micros() > 3_000_000 {
break;
}
} }
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64(); let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.compress.speed = Some(speed); benchmark_result.compress.speed = Some(speed);
eprintln!("Compression speed: {:.2} MB/s", speed / 1_000_000.0); eprintln!("Compression speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now(); let start_time = std::time::Instant::now();
let compressed_data = { let compressed_data = {
@ -320,14 +331,15 @@ fn test_crypt_speed(
let mut reader = &compressed_data[..]; let mut reader = &compressed_data[..];
let data = zstd::stream::decode_all(&mut reader)?; let data = zstd::stream::decode_all(&mut reader)?;
bytes += data.len(); bytes += data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; } if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
} }
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64(); let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.decompress.speed = Some(speed); benchmark_result.decompress.speed = Some(speed);
eprintln!("Decompress speed: {:.2} MB/s", speed / 1_000_000.0); eprintln!("Decompress speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now(); let start_time = std::time::Instant::now();
let mut bytes = 0; let mut bytes = 0;
@ -335,25 +347,26 @@ fn test_crypt_speed(
let mut out = Vec::new(); let mut out = Vec::new();
DataBlob::encrypt_benchmark(&crypt_config, &random_data, &mut out)?; DataBlob::encrypt_benchmark(&crypt_config, &random_data, &mut out)?;
bytes += random_data.len(); bytes += random_data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; } if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
} }
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64(); let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.aes256_gcm.speed = Some(speed); benchmark_result.aes256_gcm.speed = Some(speed);
eprintln!("AES256/GCM speed: {:.2} MB/s", speed / 1_000_000.0); eprintln!("AES256/GCM speed: {:.2} MB/s", speed / 1_000_000.0);
let start_time = std::time::Instant::now(); let start_time = std::time::Instant::now();
let (chunk, digest) = DataChunkBuilder::new(&random_data) let (chunk, digest) = DataChunkBuilder::new(&random_data).compress(true).build()?;
.compress(true)
.build()?;
let mut bytes = 0; let mut bytes = 0;
loop { loop {
chunk.verify_unencrypted(random_data.len(), &digest)?; chunk.verify_unencrypted(random_data.len(), &digest)?;
bytes += random_data.len(); bytes += random_data.len();
if start_time.elapsed().as_micros() > 1_000_000 { break; } if start_time.elapsed().as_micros() > 1_000_000 {
break;
}
} }
let speed = (bytes as f64) / start_time.elapsed().as_secs_f64(); let speed = (bytes as f64) / start_time.elapsed().as_secs_f64();
benchmark_result.verify.speed = Some(speed); benchmark_result.verify.speed = Some(speed);

View File

@ -1,4 +1,4 @@
use anyhow::{Error}; use anyhow::Error;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_schema::format::*; use proxmox_schema::format::*;
@ -6,12 +6,10 @@ use proxmox_schema::format::*;
use pbs_client::catalog_shell::catalog_shell_cli; use pbs_client::catalog_shell::catalog_shell_cli;
fn main() -> Result<(), Error> { fn main() -> Result<(), Error> {
match catalog_shell_cli() { match catalog_shell_cli() {
CommandLineInterface::Nested(map) => { CommandLineInterface::Nested(map) => {
let usage = generate_nested_usage("", &map, DocumentationFormat::ReST); let usage = generate_nested_usage("", &map, DocumentationFormat::ReST);
println!("{}", usage); println!("{}", usage);
} }
_ => unreachable!(), _ => unreachable!(),
} }

View File

@ -1,41 +1,24 @@
use std::os::unix::fs::OpenOptionsExt;
use std::io::{Seek, SeekFrom}; use std::io::{Seek, SeekFrom};
use std::os::unix::fs::OpenOptionsExt;
use std::sync::Arc; use std::sync::Arc;
use anyhow::{bail, format_err, Error}; use anyhow::{bail, format_err, Error};
use serde_json::Value; use serde_json::Value;
use proxmox_schema::api;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_schema::api;
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_tools::json::required_string_param;
use pbs_tools::crypt_config::CryptConfig; use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param;
use crate::{ use crate::{
REPO_URL_SCHEMA, api_datastore_latest_snapshot, complete_backup_snapshot, complete_group_or_snapshot,
KEYFD_SCHEMA, complete_pxar_archive_name, complete_repository, connect, crypto_parameters, decrypt_key,
extract_repository_from_value, extract_repository_from_value, format_key_source, record_repository, BackupDir, BackupGroup,
format_key_source, BufferedDynamicReadAt, BufferedDynamicReader, CatalogReader, DynamicIndexReader, IndexFile,
record_repository, Shell, CATALOG_NAME, KEYFD_SCHEMA, REPO_URL_SCHEMA,
decrypt_key,
api_datastore_latest_snapshot,
complete_repository,
complete_backup_snapshot,
complete_group_or_snapshot,
complete_pxar_archive_name,
connect,
crypto_parameters,
BackupDir,
BackupGroup,
BufferedDynamicReader,
BufferedDynamicReadAt,
CatalogReader,
CATALOG_NAME,
DynamicIndexReader,
IndexFile,
Shell,
}; };
#[api( #[api(
@ -63,7 +46,6 @@ use crate::{
)] )]
/// Dump catalog. /// Dump catalog.
async fn dump_catalog(param: Value) -> Result<Value, Error> { async fn dump_catalog(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
@ -94,18 +76,26 @@ async fn dump_catalog(param: Value) -> Result<Value, Error> {
snapshot.group().backup_id(), snapshot.group().backup_id(),
snapshot.backup_time(), snapshot.backup_time(),
true, true,
).await?; )
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let index = client.download_dynamic_index(&manifest, CATALOG_NAME).await?; let index = client
.download_dynamic_index(&manifest, CATALOG_NAME)
.await?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(CATALOG_NAME)?; let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used); let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let mut reader = BufferedDynamicReader::new(index, chunk_reader); let mut reader = BufferedDynamicReader::new(index, chunk_reader);
@ -168,7 +158,11 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
api_datastore_latest_snapshot(&client, repo.store(), group).await? api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else { } else {
let snapshot: BackupDir = path.parse()?; let snapshot: BackupDir = path.parse()?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time()) (
snapshot.group().backup_type().to_owned(),
snapshot.group().backup_id().to_owned(),
snapshot.backup_time(),
)
}; };
let crypto = crypto_parameters(&param)?; let crypto = crypto_parameters(&param)?;
@ -200,7 +194,8 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
&backup_id, &backup_id,
backup_time, backup_time,
true, true,
).await?; )
.await?;
let mut tmpfile = std::fs::OpenOptions::new() let mut tmpfile = std::fs::OpenOptions::new()
.write(true) .write(true)
@ -211,15 +206,21 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?; let index = client
.download_dynamic_index(&manifest, &server_archive_name)
.await?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(&server_archive_name)?; let file_info = manifest.lookup_file_info(&server_archive_name)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config.clone(), file_info.chunk_crypt_mode(), most_used); let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config.clone(),
file_info.chunk_crypt_mode(),
most_used,
);
let reader = BufferedDynamicReader::new(index, chunk_reader); let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size(); let archive_size = reader.archive_size();
let reader: pbs_client::pxar::fuse::Reader = let reader: pbs_client::pxar::fuse::Reader = Arc::new(BufferedDynamicReadAt::new(reader));
Arc::new(BufferedDynamicReadAt::new(reader));
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?; let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
client.download(CATALOG_NAME, &mut tmpfile).await?; client.download(CATALOG_NAME, &mut tmpfile).await?;
@ -233,7 +234,12 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let file_info = manifest.lookup_file_info(CATALOG_NAME)?; let file_info = manifest.lookup_file_info(CATALOG_NAME)?;
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used); let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let mut reader = BufferedDynamicReader::new(index, chunk_reader); let mut reader = BufferedDynamicReader::new(index, chunk_reader);
let mut catalogfile = std::fs::OpenOptions::new() let mut catalogfile = std::fs::OpenOptions::new()
.write(true) .write(true)
@ -246,11 +252,7 @@ async fn catalog_shell(param: Value) -> Result<(), Error> {
catalogfile.seek(SeekFrom::Start(0))?; catalogfile.seek(SeekFrom::Start(0))?;
let catalog_reader = CatalogReader::new(catalogfile); let catalog_reader = CatalogReader::new(catalogfile);
let state = Shell::new( let state = Shell::new(catalog_reader, &server_archive_name, decoder).await?;
catalog_reader,
&server_archive_name,
decoder,
).await?;
println!("Starting interactive shell"); println!("Starting interactive shell");
state.shell().await?; state.shell().await?;

View File

@ -5,22 +5,21 @@ use anyhow::{bail, format_err, Error};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::Value; use serde_json::Value;
use proxmox_sys::linux::tty;
use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_router::cli::{ use proxmox_router::cli::{
complete_file_name, format_and_print_result_full, get_output_format, complete_file_name, format_and_print_result_full, get_output_format, CliCommand, CliCommandMap,
CliCommand, CliCommandMap, ColumnConfig, ColumnConfig, OUTPUT_FORMAT,
OUTPUT_FORMAT,
}; };
use proxmox_schema::{api, ApiType, ReturnType}; use proxmox_schema::{api, ApiType, ReturnType};
use proxmox_sys::fs::{file_get_contents, replace_file, CreateOptions};
use proxmox_sys::linux::tty;
use pbs_api_types::{PASSWORD_HINT_SCHEMA, Kdf, KeyInfo}; use pbs_api_types::{Kdf, KeyInfo, PASSWORD_HINT_SCHEMA};
use pbs_config::key_config::{KeyConfig, rsa_decrypt_key_config};
use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat};
use pbs_client::tools::key_source::{ use pbs_client::tools::key_source::{
find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password, find_default_encryption_key, find_default_master_pubkey, get_encryption_key_password,
place_default_encryption_key, place_default_master_pubkey, place_default_encryption_key, place_default_master_pubkey,
}; };
use pbs_config::key_config::{rsa_decrypt_key_config, KeyConfig};
use pbs_datastore::paperkey::{generate_paper_key, PaperkeyFormat};
#[api] #[api]
#[derive(Deserialize, Serialize)] #[derive(Deserialize, Serialize)]
@ -55,7 +54,6 @@ impl std::convert::TryFrom<openssl::rsa::Rsa<openssl::pkey::Public>> for RsaPubK
} }
} }
#[api( #[api(
input: { input: {
properties: { properties: {
@ -391,7 +389,12 @@ fn create_master_key() -> Result<(), Error> {
let filename_priv = "master-private.pem"; let filename_priv = "master-private.pem";
println!("Writing private master key to {}", filename_priv); println!("Writing private master key to {}", filename_priv);
replace_file(filename_priv, priv_key.as_slice(), CreateOptions::new(), true)?; replace_file(
filename_priv,
priv_key.as_slice(),
CreateOptions::new(),
true,
)?;
Ok(()) Ok(())
} }

View File

@ -13,32 +13,25 @@ use nix::unistd::{fork, ForkResult};
use serde_json::Value; use serde_json::Value;
use tokio::signal::unix::{signal, SignalKind}; use tokio::signal::unix::{signal, SignalKind};
use proxmox_sys::sortable; use proxmox_router::{cli::*, ApiHandler, ApiMethod, RpcEnvironment};
use proxmox_sys::fd::Fd;
use proxmox_router::{ApiHandler, ApiMethod, RpcEnvironment, cli::*};
use proxmox_schema::*; use proxmox_schema::*;
use proxmox_sys::fd::Fd;
use proxmox_sys::sortable;
use pbs_tools::crypt_config::CryptConfig;
use pbs_config::key_config::load_and_decrypt_key;
use pbs_datastore::{BackupDir, BackupGroup, };
use pbs_datastore::index::IndexFile;
use pbs_datastore::dynamic_index::BufferedDynamicReader;
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_client::{BackupReader, RemoteChunkReader}; use pbs_client::{BackupReader, RemoteChunkReader};
use pbs_config::key_config::load_and_decrypt_key;
use pbs_datastore::cached_chunk_reader::CachedChunkReader;
use pbs_datastore::dynamic_index::BufferedDynamicReader;
use pbs_datastore::index::IndexFile;
use pbs_datastore::{BackupDir, BackupGroup};
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use crate::{ use crate::{
REPO_URL_SCHEMA, api_datastore_latest_snapshot, complete_group_or_snapshot, complete_img_archive_name,
extract_repository_from_value, complete_pxar_archive_name, complete_repository, connect, extract_repository_from_value,
complete_pxar_archive_name, record_repository, BufferedDynamicReadAt, REPO_URL_SCHEMA,
complete_img_archive_name,
complete_group_or_snapshot,
complete_repository,
record_repository,
connect,
api_datastore_latest_snapshot,
BufferedDynamicReadAt,
}; };
#[sortable] #[sortable]
@ -47,14 +40,36 @@ const API_METHOD_MOUNT: ApiMethod = ApiMethod::new(
&ObjectSchema::new( &ObjectSchema::new(
"Mount pxar archive.", "Mount pxar archive.",
&sorted!([ &sorted!([
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()), (
("archive-name", false, &StringSchema::new("Backup archive name.").schema()), "snapshot",
("target", false, &StringSchema::new("Target directory path.").schema()), false,
&StringSchema::new("Group/Snapshot path.").schema()
),
(
"archive-name",
false,
&StringSchema::new("Backup archive name.").schema()
),
(
"target",
false,
&StringSchema::new("Target directory path.").schema()
),
("repository", true, &REPO_URL_SCHEMA), ("repository", true, &REPO_URL_SCHEMA),
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()), (
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()), "keyfile",
true,
&StringSchema::new("Path to encryption key.").schema()
),
(
"verbose",
true,
&BooleanSchema::new("Verbose output and stay in foreground.")
.default(false)
.schema()
),
]), ]),
) ),
); );
#[sortable] #[sortable]
@ -64,13 +79,31 @@ const API_METHOD_MAP: ApiMethod = ApiMethod::new(
"Map a drive image from a VM backup to a local loopback device. Use 'unmap' to undo. "Map a drive image from a VM backup to a local loopback device. Use 'unmap' to undo.
WARNING: Only do this with *trusted* backups!", WARNING: Only do this with *trusted* backups!",
&sorted!([ &sorted!([
("snapshot", false, &StringSchema::new("Group/Snapshot path.").schema()), (
("archive-name", false, &StringSchema::new("Backup archive name.").schema()), "snapshot",
false,
&StringSchema::new("Group/Snapshot path.").schema()
),
(
"archive-name",
false,
&StringSchema::new("Backup archive name.").schema()
),
("repository", true, &REPO_URL_SCHEMA), ("repository", true, &REPO_URL_SCHEMA),
("keyfile", true, &StringSchema::new("Path to encryption key.").schema()), (
("verbose", true, &BooleanSchema::new("Verbose output and stay in foreground.").default(false).schema()), "keyfile",
true,
&StringSchema::new("Path to encryption key.").schema()
),
(
"verbose",
true,
&BooleanSchema::new("Verbose output and stay in foreground.")
.default(false)
.schema()
),
]), ]),
) ),
); );
#[sortable] #[sortable]
@ -78,17 +111,19 @@ const API_METHOD_UNMAP: ApiMethod = ApiMethod::new(
&ApiHandler::Sync(&unmap), &ApiHandler::Sync(&unmap),
&ObjectSchema::new( &ObjectSchema::new(
"Unmap a loop device mapped with 'map' and release all resources.", "Unmap a loop device mapped with 'map' and release all resources.",
&sorted!([ &sorted!([(
("name", true, &StringSchema::new( "name",
concat!("Archive name, path to loopdev (/dev/loopX) or loop device number. ", true,
"Omit to list all current mappings and force cleaning up leftover instances.") &StringSchema::new(concat!(
).schema()), "Archive name, path to loopdev (/dev/loopX) or loop device number. ",
]), "Omit to list all current mappings and force cleaning up leftover instances."
) ))
.schema()
),]),
),
); );
pub fn mount_cmd_def() -> CliCommand { pub fn mount_cmd_def() -> CliCommand {
CliCommand::new(&API_METHOD_MOUNT) CliCommand::new(&API_METHOD_MOUNT)
.arg_param(&["snapshot", "archive-name", "target"]) .arg_param(&["snapshot", "archive-name", "target"])
.completion_cb("repository", complete_repository) .completion_cb("repository", complete_repository)
@ -98,7 +133,6 @@ pub fn mount_cmd_def() -> CliCommand {
} }
pub fn map_cmd_def() -> CliCommand { pub fn map_cmd_def() -> CliCommand {
CliCommand::new(&API_METHOD_MAP) CliCommand::new(&API_METHOD_MAP)
.arg_param(&["snapshot", "archive-name"]) .arg_param(&["snapshot", "archive-name"])
.completion_cb("repository", complete_repository) .completion_cb("repository", complete_repository)
@ -107,21 +141,20 @@ pub fn map_cmd_def() -> CliCommand {
} }
pub fn unmap_cmd_def() -> CliCommand { pub fn unmap_cmd_def() -> CliCommand {
CliCommand::new(&API_METHOD_UNMAP) CliCommand::new(&API_METHOD_UNMAP)
.arg_param(&["name"]) .arg_param(&["name"])
.completion_cb("name", complete_mapping_names) .completion_cb("name", complete_mapping_names)
} }
fn complete_mapping_names<S: BuildHasher>(_arg: &str, _param: &HashMap<String, String, S>) fn complete_mapping_names<S: BuildHasher>(
-> Vec<String> _arg: &str,
{ _param: &HashMap<String, String, S>,
) -> Vec<String> {
match pbs_fuse_loop::find_all_mappings() { match pbs_fuse_loop::find_all_mappings() {
Ok(mappings) => mappings Ok(mappings) => mappings
.filter_map(|(name, _)| { .filter_map(|(name, _)| proxmox_sys::systemd::unescape_unit(&name).ok())
proxmox_sys::systemd::unescape_unit(&name).ok() .collect(),
}).collect(), Err(_) => Vec::new(),
Err(_) => Vec::new()
} }
} }
@ -130,7 +163,6 @@ fn mount(
_info: &ApiMethod, _info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let verbose = param["verbose"].as_bool().unwrap_or(false); let verbose = param["verbose"].as_bool().unwrap_or(false);
if verbose { if verbose {
// This will stay in foreground with debug output enabled as None is // This will stay in foreground with debug output enabled as None is
@ -172,7 +204,11 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
api_datastore_latest_snapshot(&client, repo.store(), group).await? api_datastore_latest_snapshot(&client, repo.store(), group).await?
} else { } else {
let snapshot: BackupDir = path.parse()?; let snapshot: BackupDir = path.parse()?;
(snapshot.group().backup_type().to_owned(), snapshot.group().backup_id().to_owned(), snapshot.backup_time()) (
snapshot.group().backup_type().to_owned(),
snapshot.group().backup_id().to_owned(),
snapshot.backup_time(),
)
}; };
let keyfile = param["keyfile"].as_str().map(PathBuf::from); let keyfile = param["keyfile"].as_str().map(PathBuf::from);
@ -208,7 +244,8 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
&backup_id, &backup_id,
backup_time, backup_time,
true, true,
).await?; )
.await?;
let (manifest, _) = client.download_manifest().await?; let (manifest, _) = client.download_manifest().await?;
manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?; manifest.check_fingerprint(crypt_config.as_ref().map(Arc::as_ref))?;
@ -223,7 +260,8 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
"/dev/null", "/dev/null",
nix::fcntl::OFlag::O_RDWR, nix::fcntl::OFlag::O_RDWR,
nix::sys::stat::Mode::empty(), nix::sys::stat::Mode::empty(),
).unwrap(); )
.unwrap();
nix::unistd::dup2(nullfd, 0).unwrap(); nix::unistd::dup2(nullfd, 0).unwrap();
nix::unistd::dup2(nullfd, 1).unwrap(); nix::unistd::dup2(nullfd, 1).unwrap();
nix::unistd::dup2(nullfd, 2).unwrap(); nix::unistd::dup2(nullfd, 2).unwrap();
@ -245,16 +283,23 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
let mut interrupt_int = signal(SignalKind::interrupt())?; let mut interrupt_int = signal(SignalKind::interrupt())?;
let mut interrupt_term = signal(SignalKind::terminate())?; let mut interrupt_term = signal(SignalKind::terminate())?;
let mut interrupt = futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed()); let mut interrupt =
futures::future::select(interrupt_int.recv().boxed(), interrupt_term.recv().boxed());
if server_archive_name.ends_with(".didx") { if server_archive_name.ends_with(".didx") {
let index = client.download_dynamic_index(&manifest, &server_archive_name).await?; let index = client
.download_dynamic_index(&manifest, &server_archive_name)
.await?;
let most_used = index.find_most_used_chunks(8); let most_used = index.find_most_used_chunks(8);
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), most_used); let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
most_used,
);
let reader = BufferedDynamicReader::new(index, chunk_reader); let reader = BufferedDynamicReader::new(index, chunk_reader);
let archive_size = reader.archive_size(); let archive_size = reader.archive_size();
let reader: pbs_client::pxar::fuse::Reader = let reader: pbs_client::pxar::fuse::Reader = Arc::new(BufferedDynamicReadAt::new(reader));
Arc::new(BufferedDynamicReadAt::new(reader));
let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?; let decoder = pbs_client::pxar::fuse::Accessor::new(reader, archive_size).await?;
let session = pbs_client::pxar::fuse::Session::mount( let session = pbs_client::pxar::fuse::Session::mount(
@ -274,15 +319,23 @@ async fn mount_do(param: Value, pipe: Option<Fd>) -> Result<Value, Error> {
} }
} }
} else if server_archive_name.ends_with(".fidx") { } else if server_archive_name.ends_with(".fidx") {
let index = client.download_fixed_index(&manifest, &server_archive_name).await?; let index = client
.download_fixed_index(&manifest, &server_archive_name)
.await?;
let size = index.index_bytes(); let size = index.index_bytes();
let chunk_reader = RemoteChunkReader::new(client.clone(), crypt_config, file_info.chunk_crypt_mode(), HashMap::new()); let chunk_reader = RemoteChunkReader::new(
client.clone(),
crypt_config,
file_info.chunk_crypt_mode(),
HashMap::new(),
);
let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable(); let reader = CachedChunkReader::new(chunk_reader, index, 8).seekable();
let name = &format!("{}:{}/{}", repo, path, archive_name); let name = &format!("{}:{}/{}", repo, path, archive_name);
let name_escaped = proxmox_sys::systemd::escape_unit(name, false); let name_escaped = proxmox_sys::systemd::escape_unit(name, false);
let mut session = pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?; let mut session =
pbs_fuse_loop::FuseLoopSession::map_loop(size, reader, &name_escaped, options).await?;
let loopdev = session.loopdev_path.clone(); let loopdev = session.loopdev_path.clone();
let (st_send, st_recv) = futures::channel::mpsc::channel(1); let (st_send, st_recv) = futures::channel::mpsc::channel(1);
@ -335,7 +388,6 @@ fn unmap(
_info: &ApiMethod, _info: &ApiMethod,
_rpcenv: &mut dyn RpcEnvironment, _rpcenv: &mut dyn RpcEnvironment,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let mut name = match param["name"].as_str() { let mut name = match param["name"].as_str() {
Some(name) => name.to_owned(), Some(name) => name.to_owned(),
None => { None => {
@ -343,14 +395,18 @@ fn unmap(
let mut any = false; let mut any = false;
for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? { for (backing, loopdev) in pbs_fuse_loop::find_all_mappings()? {
let name = proxmox_sys::systemd::unescape_unit(&backing)?; let name = proxmox_sys::systemd::unescape_unit(&backing)?;
println!("{}:\t{}", loopdev.unwrap_or_else(|| "(unmapped)".to_string()), name); println!(
"{}:\t{}",
loopdev.unwrap_or_else(|| "(unmapped)".to_string()),
name
);
any = true; any = true;
} }
if !any { if !any {
println!("Nothing mapped."); println!("Nothing mapped.");
} }
return Ok(Value::Null); return Ok(Value::Null);
}, }
}; };
// allow loop device number alone // allow loop device number alone

View File

@ -3,30 +3,21 @@ use std::sync::Arc;
use anyhow::Error; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_sys::fs::file_get_contents;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_schema::api; use proxmox_schema::api;
use proxmox_sys::fs::file_get_contents;
use pbs_tools::crypt_config::CryptConfig; use pbs_api_types::{CryptMode, SnapshotListItem};
use pbs_config::key_config::decrypt_key;
use pbs_api_types::{SnapshotListItem, CryptMode};
use pbs_client::tools::key_source::get_encryption_key_password; use pbs_client::tools::key_source::get_encryption_key_password;
use pbs_datastore::{DataBlob, BackupGroup}; use pbs_config::key_config::decrypt_key;
use pbs_datastore::{BackupGroup, DataBlob};
use pbs_tools::crypt_config::CryptConfig;
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use crate::{ use crate::{
REPO_URL_SCHEMA, api_datastore_list_snapshots, complete_backup_group, complete_backup_snapshot,
KEYFILE_SCHEMA, complete_repository, connect, crypto_parameters, extract_repository_from_value,
KEYFD_SCHEMA, record_repository, BackupDir, KEYFD_SCHEMA, KEYFILE_SCHEMA, REPO_URL_SCHEMA,
BackupDir,
api_datastore_list_snapshots,
complete_backup_snapshot,
complete_backup_group,
complete_repository,
connect,
crypto_parameters,
extract_repository_from_value,
record_repository,
}; };
#[api( #[api(
@ -50,7 +41,6 @@ use crate::{
)] )]
/// List backup snapshots. /// List backup snapshots.
async fn list_snapshots(param: Value) -> Result<Value, Error> { async fn list_snapshots(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let output_format = get_output_format(&param); let output_format = get_output_format(&param);
@ -86,10 +76,13 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
.sortby("backup-type", false) .sortby("backup-type", false)
.sortby("backup-id", false) .sortby("backup-id", false)
.sortby("backup-time", false) .sortby("backup-time", false)
.column(ColumnConfig::new("backup-id").renderer(render_snapshot_path).header("snapshot")) .column(
ColumnConfig::new("backup-id")
.renderer(render_snapshot_path)
.header("snapshot"),
)
.column(ColumnConfig::new("size").renderer(pbs_tools::format::render_bytes_human_readable)) .column(ColumnConfig::new("size").renderer(pbs_tools::format::render_bytes_human_readable))
.column(ColumnConfig::new("files").renderer(render_files)) .column(ColumnConfig::new("files").renderer(render_files));
;
let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE; let return_type = &pbs_api_types::ADMIN_DATASTORE_LIST_SNAPSHOTS_RETURN_TYPE;
@ -118,7 +111,6 @@ async fn list_snapshots(param: Value) -> Result<Value, Error> {
)] )]
/// List snapshot files. /// List snapshot files.
async fn list_snapshot_files(param: Value) -> Result<Value, Error> { async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
@ -130,11 +122,16 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/files", repo.store()); let path = format!("api2/json/admin/datastore/{}/files", repo.store());
let mut result = client.get(&path, Some(json!({ let mut result = client
.get(
&path,
Some(json!({
"backup-type": snapshot.group().backup_type(), "backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(), "backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(), "backup-time": snapshot.backup_time(),
}))).await?; })),
)
.await?;
record_repository(&repo); record_repository(&repo);
@ -165,7 +162,6 @@ async fn list_snapshot_files(param: Value) -> Result<Value, Error> {
)] )]
/// Forget (remove) backup snapshots. /// Forget (remove) backup snapshots.
async fn forget_snapshots(param: Value) -> Result<Value, Error> { async fn forget_snapshots(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let path = required_string_param(&param, "snapshot")?; let path = required_string_param(&param, "snapshot")?;
@ -175,11 +171,16 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store()); let path = format!("api2/json/admin/datastore/{}/snapshots", repo.store());
let result = client.delete(&path, Some(json!({ let result = client
.delete(
&path,
Some(json!({
"backup-type": snapshot.group().backup_type(), "backup-type": snapshot.group().backup_type(),
"backup-id": snapshot.group().backup_id(), "backup-id": snapshot.group().backup_id(),
"backup-time": snapshot.backup_time(), "backup-time": snapshot.backup_time(),
}))).await?; })),
)
.await?;
record_repository(&repo); record_repository(&repo);
@ -218,7 +219,6 @@ async fn forget_snapshots(param: Value) -> Result<Value, Error> {
)] )]
/// Upload backup log file. /// Upload backup log file.
async fn upload_log(param: Value) -> Result<Value, Error> { async fn upload_log(param: Value) -> Result<Value, Error> {
let logfile = required_string_param(&param, "logfile")?; let logfile = required_string_param(&param, "logfile")?;
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
@ -243,12 +243,17 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
// fixme: howto sign log? // fixme: howto sign log?
let blob = match crypto.mode { let blob = match crypto.mode {
CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?, CryptMode::None | CryptMode::SignOnly => DataBlob::encode(&data, None, true)?,
CryptMode::Encrypt => DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?, CryptMode::Encrypt => {
DataBlob::encode(&data, crypt_config.as_ref().map(Arc::as_ref), true)?
}
}; };
let raw_data = blob.into_inner(); let raw_data = blob.into_inner();
let path = format!("api2/json/admin/datastore/{}/upload-backup-log", repo.store()); let path = format!(
"api2/json/admin/datastore/{}/upload-backup-log",
repo.store()
);
let args = json!({ let args = json!({
"backup-type": snapshot.group().backup_type(), "backup-type": snapshot.group().backup_type(),
@ -258,7 +263,9 @@ async fn upload_log(param: Value) -> Result<Value, Error> {
let body = hyper::Body::from(raw_data); let body = hyper::Body::from(raw_data);
client.upload("application/octet-stream", body, &path, Some(args)).await client
.upload("application/octet-stream", body, &path, Some(args))
.await
} }
#[api( #[api(
@ -495,21 +502,21 @@ pub fn snapshot_mgtm_cli() -> CliCommandMap {
CliCommand::new(&API_METHOD_LIST_SNAPSHOTS) CliCommand::new(&API_METHOD_LIST_SNAPSHOTS)
.arg_param(&["group"]) .arg_param(&["group"])
.completion_cb("group", complete_backup_group) .completion_cb("group", complete_backup_group)
.completion_cb("repository", complete_repository) .completion_cb("repository", complete_repository),
) )
.insert( .insert(
"files", "files",
CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES) CliCommand::new(&API_METHOD_LIST_SNAPSHOT_FILES)
.arg_param(&["snapshot"]) .arg_param(&["snapshot"])
.completion_cb("repository", complete_repository) .completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_backup_snapshot) .completion_cb("snapshot", complete_backup_snapshot),
) )
.insert( .insert(
"forget", "forget",
CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS) CliCommand::new(&API_METHOD_FORGET_SNAPSHOTS)
.arg_param(&["snapshot"]) .arg_param(&["snapshot"])
.completion_cb("repository", complete_repository) .completion_cb("repository", complete_repository)
.completion_cb("snapshot", complete_backup_snapshot) .completion_cb("snapshot", complete_backup_snapshot),
) )
.insert( .insert(
"upload-log", "upload-log",
@ -518,6 +525,6 @@ pub fn snapshot_mgtm_cli() -> CliCommandMap {
.completion_cb("snapshot", complete_backup_snapshot) .completion_cb("snapshot", complete_backup_snapshot)
.completion_cb("logfile", complete_file_name) .completion_cb("logfile", complete_file_name)
.completion_cb("keyfile", complete_file_name) .completion_cb("keyfile", complete_file_name)
.completion_cb("repository", complete_repository) .completion_cb("repository", complete_repository),
) )
} }

View File

@ -1,21 +1,16 @@
use anyhow::{Error}; use anyhow::Error;
use serde_json::{json, Value}; use serde_json::{json, Value};
use proxmox_schema::api;
use proxmox_router::cli::*; use proxmox_router::cli::*;
use proxmox_schema::api;
use pbs_client::display_task_log;
use pbs_api_types::percent_encoding::percent_encode_component; use pbs_api_types::percent_encoding::percent_encode_component;
use pbs_client::display_task_log;
use pbs_tools::json::required_string_param; use pbs_tools::json::required_string_param;
use pbs_api_types::UPID; use pbs_api_types::UPID;
use crate::{ use crate::{complete_repository, connect, extract_repository_from_value, REPO_URL_SCHEMA};
REPO_URL_SCHEMA,
extract_repository_from_value,
complete_repository,
connect,
};
#[api( #[api(
input: { input: {
@ -46,7 +41,6 @@ use crate::{
)] )]
/// List running server tasks for this repo user /// List running server tasks for this repo user
async fn task_list(param: Value) -> Result<Value, Error> { async fn task_list(param: Value) -> Result<Value, Error> {
let output_format = get_output_format(&param); let output_format = get_output_format(&param);
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
@ -63,15 +57,25 @@ async fn task_list(param: Value) -> Result<Value, Error> {
"store": repo.store(), "store": repo.store(),
}); });
let mut result = client.get("api2/json/nodes/localhost/tasks", Some(args)).await?; let mut result = client
.get("api2/json/nodes/localhost/tasks", Some(args))
.await?;
let mut data = result["data"].take(); let mut data = result["data"].take();
let return_type = &pbs_api_types::NODE_TASKS_LIST_TASKS_RETURN_TYPE; let return_type = &pbs_api_types::NODE_TASKS_LIST_TASKS_RETURN_TYPE;
use pbs_tools::format::{render_epoch, render_task_status}; use pbs_tools::format::{render_epoch, render_task_status};
let options = default_table_format_options() let options = default_table_format_options()
.column(ColumnConfig::new("starttime").right_align(false).renderer(render_epoch)) .column(
.column(ColumnConfig::new("endtime").right_align(false).renderer(render_epoch)) ColumnConfig::new("starttime")
.right_align(false)
.renderer(render_epoch),
)
.column(
ColumnConfig::new("endtime")
.right_align(false)
.renderer(render_epoch),
)
.column(ColumnConfig::new("upid")) .column(ColumnConfig::new("upid"))
.column(ColumnConfig::new("status").renderer(render_task_status)); .column(ColumnConfig::new("status").renderer(render_task_status));
@ -95,7 +99,6 @@ async fn task_list(param: Value) -> Result<Value, Error> {
)] )]
/// Display the task log. /// Display the task log.
async fn task_log(param: Value) -> Result<Value, Error> { async fn task_log(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let upid = required_string_param(&param, "upid")?; let upid = required_string_param(&param, "upid")?;
@ -121,28 +124,27 @@ async fn task_log(param: Value) -> Result<Value, Error> {
)] )]
/// Try to stop a specific task. /// Try to stop a specific task.
async fn task_stop(param: Value) -> Result<Value, Error> { async fn task_stop(param: Value) -> Result<Value, Error> {
let repo = extract_repository_from_value(&param)?; let repo = extract_repository_from_value(&param)?;
let upid_str = required_string_param(&param, "upid")?; let upid_str = required_string_param(&param, "upid")?;
let client = connect(&repo)?; let client = connect(&repo)?;
let path = format!("api2/json/nodes/localhost/tasks/{}", percent_encode_component(upid_str)); let path = format!(
"api2/json/nodes/localhost/tasks/{}",
percent_encode_component(upid_str)
);
let _ = client.delete(&path, None).await?; let _ = client.delete(&path, None).await?;
Ok(Value::Null) Ok(Value::Null)
} }
pub fn task_mgmt_cli() -> CliCommandMap { pub fn task_mgmt_cli() -> CliCommandMap {
let task_list_cmd_def =
CliCommand::new(&API_METHOD_TASK_LIST).completion_cb("repository", complete_repository);
let task_list_cmd_def = CliCommand::new(&API_METHOD_TASK_LIST) let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG).arg_param(&["upid"]);
.completion_cb("repository", complete_repository);
let task_log_cmd_def = CliCommand::new(&API_METHOD_TASK_LOG) let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP).arg_param(&["upid"]);
.arg_param(&["upid"]);
let task_stop_cmd_def = CliCommand::new(&API_METHOD_TASK_STOP)
.arg_param(&["upid"]);
CliCommandMap::new() CliCommandMap::new()
.insert("log", task_log_cmd_def) .insert("log", task_log_cmd_def)